2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/if_vlan.h>
36 #include <linux/etherdevice.h>
37 #include <linux/timecounter.h>
38 #include <linux/net_tstamp.h>
39 #include <linux/crash_dump.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/qp.h>
42 #include <linux/mlx5/cq.h>
43 #include <linux/mlx5/port.h>
44 #include <linux/mlx5/vport.h>
45 #include <linux/mlx5/transobj.h>
46 #include <linux/mlx5/fs.h>
47 #include <linux/rhashtable.h>
48 #include <net/switchdev.h>
50 #include <linux/dim.h>
51 #include <linux/bits.h>
53 #include "mlx5_core.h"
57 #include "lib/hv_vhca.h"
59 extern const struct net_device_ops mlx5e_netdev_ops;
62 #define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
63 #define MLX5E_METADATA_ETHER_LEN 8
65 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
67 #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
69 #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
70 #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
72 #define MLX5E_MAX_NUM_TC 8
74 #define MLX5_RX_HEADROOM NET_SKB_PAD
75 #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
76 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
78 #define MLX5E_RX_MAX_HEAD (256)
80 #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
81 (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
82 #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
83 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
84 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
85 MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
87 #define MLX5_MPWRQ_LOG_WQE_SZ 18
88 #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
89 MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
90 #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
92 #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
93 #define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
94 #define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS))
95 #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
96 #define MLX5E_MAX_RQ_NUM_MTTS \
97 ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
98 #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
99 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
100 (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
101 #define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
102 (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
103 (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
105 #define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
106 #define MLX5E_LOG_MAX_RX_WQE_BULK \
107 (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
109 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
110 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
111 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
113 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
114 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
115 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
116 MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
118 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
120 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
121 #define MLX5E_DEFAULT_LRO_TIMEOUT 32
122 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
124 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
125 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
126 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
127 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
128 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
129 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
130 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
131 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
133 #define MLX5E_LOG_INDIR_RQT_SIZE 0x7
134 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
135 #define MLX5E_MIN_NUM_CHANNELS 0x1
136 #define MLX5E_MAX_NUM_CHANNELS MLX5E_INDIR_RQT_SIZE
137 #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
138 #define MLX5E_TX_CQ_POLL_BUDGET 128
139 #define MLX5E_TX_XSK_POLL_BUDGET 64
140 #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
142 #define MLX5E_UMR_WQE_INLINE_SZ \
143 (sizeof(struct mlx5e_umr_wqe) + \
144 ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
145 MLX5_UMR_MTT_ALIGNMENT))
146 #define MLX5E_UMR_WQEBBS \
147 (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
149 #define MLX5E_MSG_LEVEL NETIF_MSG_LINK
151 #define mlx5e_dbg(mlevel, priv, format, ...) \
153 if (NETIF_MSG_##mlevel & (priv)->msglevel) \
154 netdev_warn(priv->netdev, format, \
158 enum mlx5e_rq_group {
159 MLX5E_RQ_GROUP_REGULAR,
161 #define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
164 static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
166 if (mlx5_lag_is_lacp_owner(mdev))
169 return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS);
172 static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
175 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
176 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
179 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
184 /* Use this function to get max num channels (rxqs/txqs) only to create netdev */
185 static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
187 return is_kdump_kernel() ?
188 MLX5E_MIN_NUM_CHANNELS :
189 min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
192 struct mlx5e_tx_wqe {
193 struct mlx5_wqe_ctrl_seg ctrl;
196 struct mlx5_wqe_eth_seg eth;
197 struct mlx5_wqe_data_seg data[0];
199 u8 tls_progress_params_ctx[0];
203 struct mlx5e_rx_wqe_ll {
204 struct mlx5_wqe_srq_next_seg next;
205 struct mlx5_wqe_data_seg data[];
208 struct mlx5e_rx_wqe_cyc {
209 struct mlx5_wqe_data_seg data[0];
212 struct mlx5e_umr_wqe {
213 struct mlx5_wqe_ctrl_seg ctrl;
214 struct mlx5_wqe_umr_ctrl_seg uctrl;
215 struct mlx5_mkey_seg mkc;
217 struct mlx5_mtt inline_mtts[0];
218 u8 tls_static_params_ctx[0];
222 extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
224 enum mlx5e_priv_flag {
225 MLX5E_PFLAG_RX_CQE_BASED_MODER,
226 MLX5E_PFLAG_TX_CQE_BASED_MODER,
227 MLX5E_PFLAG_RX_CQE_COMPRESS,
228 MLX5E_PFLAG_RX_STRIDING_RQ,
229 MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
230 MLX5E_PFLAG_XDP_TX_MPWQE,
231 MLX5E_NUM_PFLAGS, /* Keep last */
234 #define MLX5E_SET_PFLAG(params, pflag, enable) \
237 (params)->pflags |= BIT(pflag); \
239 (params)->pflags &= ~(BIT(pflag)); \
242 #define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
244 struct mlx5e_params {
247 u8 log_rq_mtu_frames;
250 bool rx_cqe_compress_def;
251 bool tunneled_offload_en;
252 struct dim_cq_moder rx_cq_moderation;
253 struct dim_cq_moder tx_cq_moderation;
255 u8 tx_min_inline_mode;
256 bool vlan_strip_disable;
262 struct bpf_prog *xdp_prog;
263 struct mlx5e_xsk *xsk;
269 MLX5E_RQ_STATE_ENABLED,
270 MLX5E_RQ_STATE_RECOVERING,
272 MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
273 MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
277 /* data path - accessed per cqe */
280 /* data path - accessed per napi poll */
282 struct napi_struct *napi;
283 struct mlx5_core_cq mcq;
284 struct mlx5e_channel *channel;
287 struct mlx5_core_dev *mdev;
288 struct mlx5_wq_ctrl wq_ctrl;
289 } ____cacheline_aligned_in_smp;
291 struct mlx5e_cq_decomp {
292 /* cqe decompression */
293 struct mlx5_cqe64 title;
294 struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
298 } ____cacheline_aligned_in_smp;
300 enum mlx5e_dma_map_type {
301 MLX5E_DMA_MAP_SINGLE,
305 struct mlx5e_sq_dma {
308 enum mlx5e_dma_map_type type;
312 MLX5E_SQ_STATE_ENABLED,
313 MLX5E_SQ_STATE_RECOVERING,
314 MLX5E_SQ_STATE_IPSEC,
317 MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
318 MLX5E_SQ_STATE_PENDING_XSK_TX,
324 /* dirtied @completion */
327 struct dim dim; /* Adaptive Moderation */
330 u16 pc ____cacheline_aligned_in_smp;
336 struct mlx5_wq_cyc wq;
338 struct mlx5e_sq_stats *stats;
340 struct mlx5e_sq_dma *dma_fifo;
341 struct mlx5e_tx_wqe_info *wqe_info;
343 void __iomem *uar_map;
344 struct netdev_queue *txq;
352 struct hwtstamp_config *tstamp;
353 struct mlx5_clock *clock;
356 struct mlx5_wq_ctrl wq_ctrl;
357 struct mlx5e_channel *channel;
361 struct work_struct recover_work;
362 } ____cacheline_aligned_in_smp;
364 struct mlx5e_dma_info {
368 struct xdp_buff *xsk;
372 /* XDP packets can be transmitted in different ways. On completion, we need to
373 * distinguish between them to clean up things in a proper way.
375 enum mlx5e_xdp_xmit_mode {
376 /* An xdp_frame was transmitted due to either XDP_REDIRECT from another
377 * device or XDP_TX from an XSK RQ. The frame has to be unmapped and
380 MLX5E_XDP_XMIT_MODE_FRAME,
382 /* The xdp_frame was created in place as a result of XDP_TX from a
383 * regular RQ. No DMA remapping happened, and the page belongs to us.
385 MLX5E_XDP_XMIT_MODE_PAGE,
387 /* No xdp_frame was created at all, the transmit happened from a UMEM
388 * page. The UMEM Completion Ring producer pointer has to be increased.
390 MLX5E_XDP_XMIT_MODE_XSK,
393 struct mlx5e_xdp_info {
394 enum mlx5e_xdp_xmit_mode mode;
397 struct xdp_frame *xdpf;
402 struct mlx5e_dma_info di;
407 struct mlx5e_xdp_xmit_data {
413 struct mlx5e_xdp_info_fifo {
414 struct mlx5e_xdp_info *xi;
420 struct mlx5e_xdp_mpwqe {
421 /* Current MPWQE session */
422 struct mlx5e_tx_wqe *wqe;
429 typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
430 typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
431 struct mlx5e_xdp_xmit_data *,
432 struct mlx5e_xdp_info *,
438 /* dirtied @completion */
443 u32 xdpi_fifo_pc ____cacheline_aligned_in_smp;
445 struct mlx5_wqe_ctrl_seg *doorbell_cseg;
446 struct mlx5e_xdp_mpwqe mpwqe;
451 struct xdp_umem *umem;
452 struct mlx5_wq_cyc wq;
453 struct mlx5e_xdpsq_stats *stats;
454 mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check;
455 mlx5e_fp_xmit_xdp_frame xmit_xdp_frame;
457 struct mlx5e_xdp_wqe_info *wqe_info;
458 struct mlx5e_xdp_info_fifo xdpi_fifo;
460 void __iomem *uar_map;
469 struct mlx5_wq_ctrl wq_ctrl;
470 struct mlx5e_channel *channel;
471 } ____cacheline_aligned_in_smp;
478 struct mlx5_wqe_ctrl_seg *doorbell_cseg;
481 /* write@xmit, read@completion */
483 struct mlx5e_icosq_wqe_info *wqe_info;
487 struct mlx5_wq_cyc wq;
488 void __iomem *uar_map;
493 struct mlx5_wq_ctrl wq_ctrl;
494 struct mlx5e_channel *channel;
496 struct work_struct recover_work;
497 } ____cacheline_aligned_in_smp;
499 struct mlx5e_wqe_frag_info {
500 struct mlx5e_dma_info *di;
505 struct mlx5e_umr_dma_info {
506 struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
509 struct mlx5e_mpw_info {
510 struct mlx5e_umr_dma_info umr;
511 u16 consumed_strides;
512 DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
515 #define MLX5E_MAX_RX_FRAGS 4
517 /* a single cache unit is capable to serve one napi call (for non-striding rq)
518 * or a MPWQE (for striding rq).
520 #define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
521 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
522 #define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
523 struct mlx5e_page_cache {
526 struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
530 typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
531 typedef struct sk_buff *
532 (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
533 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
534 typedef struct sk_buff *
535 (*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
536 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
537 typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
538 typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
541 MLX5E_RQ_FLAG_XDP_XMIT,
542 MLX5E_RQ_FLAG_XDP_REDIRECT,
545 struct mlx5e_rq_frag_info {
550 struct mlx5e_rq_frags_info {
551 struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
561 struct mlx5_wq_cyc wq;
562 struct mlx5e_wqe_frag_info *frags;
563 struct mlx5e_dma_info *di;
564 struct mlx5e_rq_frags_info info;
565 mlx5e_fp_skb_from_cqe skb_from_cqe;
568 struct mlx5_wq_ll wq;
569 struct mlx5e_umr_wqe umr_wqe;
570 struct mlx5e_mpw_info *info;
571 mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
583 u8 map_dir; /* dma map direction */
586 struct mlx5e_channel *channel;
588 struct net_device *netdev;
589 struct mlx5e_rq_stats *stats;
591 struct mlx5e_cq_decomp cqd;
592 struct mlx5e_page_cache page_cache;
593 struct hwtstamp_config *tstamp;
594 struct mlx5_clock *clock;
596 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
597 mlx5e_fp_post_rx_wqes post_wqes;
598 mlx5e_fp_dealloc_wqe dealloc_wqe;
604 struct dim dim; /* Dynamic Interrupt Moderation */
607 struct bpf_prog *xdp_prog;
608 struct mlx5e_xdpsq *xdpsq;
609 DECLARE_BITMAP(flags, 8);
610 struct page_pool *page_pool;
612 /* AF_XDP zero-copy */
613 struct xdp_umem *umem;
615 struct work_struct recover_work;
618 struct mlx5_wq_ctrl wq_ctrl;
622 struct mlx5_core_dev *mdev;
623 struct mlx5_core_mkey umr_mkey;
625 /* XDP read-mostly */
626 struct xdp_rxq_info xdp_rxq;
627 } ____cacheline_aligned_in_smp;
629 enum mlx5e_channel_state {
630 MLX5E_CHANNEL_STATE_XSK,
631 MLX5E_CHANNEL_NUM_STATES
634 struct mlx5e_channel {
637 struct mlx5e_xdpsq rq_xdpsq;
638 struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
639 struct mlx5e_icosq icosq; /* internal control operations */
641 struct napi_struct napi;
643 struct net_device *netdev;
649 struct mlx5e_xdpsq xdpsq;
651 /* AF_XDP zero-copy */
652 struct mlx5e_rq xskrq;
653 struct mlx5e_xdpsq xsksq;
656 struct mlx5e_icosq async_icosq;
657 /* async_icosq can be accessed from any CPU - the spinlock protects it. */
658 spinlock_t async_icosq_lock;
660 /* data path - accessed per napi poll */
661 struct irq_desc *irq_desc;
662 struct mlx5e_ch_stats *stats;
665 struct mlx5e_priv *priv;
666 struct mlx5_core_dev *mdev;
667 struct hwtstamp_config *tstamp;
668 DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
673 struct mlx5e_channels {
674 struct mlx5e_channel **c;
676 struct mlx5e_params params;
679 struct mlx5e_channel_stats {
680 struct mlx5e_ch_stats ch;
681 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
682 struct mlx5e_rq_stats rq;
683 struct mlx5e_rq_stats xskrq;
684 struct mlx5e_xdpsq_stats rq_xdpsq;
685 struct mlx5e_xdpsq_stats xdpsq;
686 struct mlx5e_xdpsq_stats xsksq;
687 } ____cacheline_aligned_in_smp;
691 MLX5E_STATE_DESTROYING,
692 MLX5E_STATE_XDP_TX_ENABLED,
693 MLX5E_STATE_XDP_ACTIVE,
703 struct mlx5e_rqt rqt;
704 struct list_head list;
712 struct mlx5e_rss_params {
713 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
714 u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
715 u8 toeplitz_hash_key[40];
719 struct mlx5e_modify_sq_param {
726 #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
727 struct mlx5e_hv_vhca_stats_agent {
728 struct mlx5_hv_vhca_agent *agent;
729 struct delayed_work work;
736 /* UMEMs are stored separately from channels, because we don't want to
737 * lose them when channels are recreated. The kernel also stores UMEMs,
738 * but it doesn't distinguish between zero-copy and non-zero-copy UMEMs,
739 * so rely on our mechanism.
741 struct xdp_umem **umems;
746 /* Temporary storage for variables that are allocated when struct mlx5e_priv is
747 * initialized, and used where we can't allocate them because that functions
748 * must not fail. Use with care and make sure the same variable is not used
749 * simultaneously by multiple users.
751 struct mlx5e_scratchpad {
752 cpumask_var_t cpumask;
756 /* priv data path fields - start */
757 struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
758 int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
759 #ifdef CONFIG_MLX5_CORE_EN_DCB
760 struct mlx5e_dcbx_dp dcbx_dp;
762 /* priv data path fields - end */
766 struct mutex state_lock; /* Protects Interface state */
767 struct mlx5e_rq drop_rq;
769 struct mlx5e_channels channels;
770 u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
771 struct mlx5e_rqt indir_rqt;
772 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
773 struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
774 struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
775 struct mlx5e_tir xsk_tir[MLX5E_MAX_NUM_CHANNELS];
776 struct mlx5e_rss_params rss_params;
777 u32 tx_rates[MLX5E_MAX_NUM_SQS];
779 struct mlx5e_flow_steering fs;
781 struct workqueue_struct *wq;
782 struct work_struct update_carrier_work;
783 struct work_struct set_rx_mode_work;
784 struct work_struct tx_timeout_work;
785 struct work_struct update_stats_work;
786 struct work_struct monitor_counters_work;
787 struct mlx5_nb monitor_counters_nb;
789 struct mlx5_core_dev *mdev;
790 struct net_device *netdev;
791 struct mlx5e_stats stats;
792 struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
795 struct hwtstamp_config tstamp;
797 u16 drop_rq_q_counter;
798 struct notifier_block events_nb;
800 #ifdef CONFIG_MLX5_CORE_EN_DCB
801 struct mlx5e_dcbx dcbx;
804 const struct mlx5e_profile *profile;
806 #ifdef CONFIG_MLX5_EN_IPSEC
807 struct mlx5e_ipsec *ipsec;
809 #ifdef CONFIG_MLX5_EN_TLS
810 struct mlx5e_tls *tls;
812 struct devlink_health_reporter *tx_reporter;
813 struct devlink_health_reporter *rx_reporter;
814 struct devlink_port dl_port;
815 struct mlx5e_xsk xsk;
816 #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
817 struct mlx5e_hv_vhca_stats_agent stats_agent;
819 struct mlx5e_scratchpad scratchpad;
822 struct mlx5e_profile {
823 int (*init)(struct mlx5_core_dev *mdev,
824 struct net_device *netdev,
825 const struct mlx5e_profile *profile, void *ppriv);
826 void (*cleanup)(struct mlx5e_priv *priv);
827 int (*init_rx)(struct mlx5e_priv *priv);
828 void (*cleanup_rx)(struct mlx5e_priv *priv);
829 int (*init_tx)(struct mlx5e_priv *priv);
830 void (*cleanup_tx)(struct mlx5e_priv *priv);
831 void (*enable)(struct mlx5e_priv *priv);
832 void (*disable)(struct mlx5e_priv *priv);
833 int (*update_rx)(struct mlx5e_priv *priv);
834 void (*update_stats)(struct mlx5e_priv *priv);
835 void (*update_carrier)(struct mlx5e_priv *priv);
836 unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
837 mlx5e_stats_grp_t *stats_grps;
839 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
840 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
846 void mlx5e_build_ptys2ethtool_map(void);
848 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
849 struct net_device *sb_dev);
850 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
851 void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
852 struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
854 void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
855 void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
856 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
857 int mlx5e_napi_poll(struct napi_struct *napi, int budget);
858 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
859 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
860 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
862 static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
864 switch (rq->wq_type) {
865 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
866 return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
868 return mlx5_wq_cyc_get_size(&rq->wqe.wq);
872 static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
874 switch (rq->wq_type) {
875 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
876 return rq->mpwqe.wq.cur_sz;
878 return rq->wqe.wq.cur_sz;
882 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
883 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
884 struct mlx5e_params *params);
886 void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
887 void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
888 struct mlx5e_dma_info *dma_info,
890 void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
891 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
892 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
893 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
894 bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
895 void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
896 void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
898 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
899 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
901 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
902 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
904 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
905 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
907 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
908 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
910 void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
911 void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
913 void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
914 int mlx5e_self_test_num(struct mlx5e_priv *priv);
915 void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
917 void mlx5e_set_rx_mode_work(struct work_struct *work);
919 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
920 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
921 int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
923 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
925 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
927 void mlx5e_timestamp_init(struct mlx5e_priv *priv);
929 struct mlx5e_redirect_rqt_param {
932 u32 rqn; /* Direct RQN (Non-RSS) */
935 struct mlx5e_channels *channels;
936 } rss; /* RSS data */
940 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
941 struct mlx5e_redirect_rqt_param rrp);
942 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
943 const struct mlx5e_tirc_config *ttconfig,
944 void *tirc, bool inner);
945 void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in);
946 struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
948 struct mlx5e_xsk_param;
950 struct mlx5e_rq_param;
951 int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
952 struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
953 struct xdp_umem *umem, struct mlx5e_rq *rq);
954 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
955 void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
956 void mlx5e_close_rq(struct mlx5e_rq *rq);
958 struct mlx5e_sq_param;
959 int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
960 struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
961 void mlx5e_close_icosq(struct mlx5e_icosq *sq);
962 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
963 struct mlx5e_sq_param *param, struct xdp_umem *umem,
964 struct mlx5e_xdpsq *sq, bool is_redirect);
965 void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
967 struct mlx5e_cq_param;
968 int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder,
969 struct mlx5e_cq_param *param, struct mlx5e_cq *cq);
970 void mlx5e_close_cq(struct mlx5e_cq *cq);
972 int mlx5e_open_locked(struct net_device *netdev);
973 int mlx5e_close_locked(struct net_device *netdev);
975 int mlx5e_open_channels(struct mlx5e_priv *priv,
976 struct mlx5e_channels *chs);
977 void mlx5e_close_channels(struct mlx5e_channels *chs);
979 /* Function pointer to be used to modify HW or kernel settings while
982 typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context);
983 #define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \
984 int fn##_ctx(struct mlx5e_priv *priv, void *context) \
988 int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
989 int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
990 struct mlx5e_channels *new_chs,
991 mlx5e_fp_preactivate preactivate,
993 int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
994 int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
995 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
996 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
998 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
1001 void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
1002 void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
1003 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
1004 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
1006 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
1007 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
1008 struct mlx5e_params *params);
1009 int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
1010 void mlx5e_activate_rq(struct mlx5e_rq *rq);
1011 void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
1012 void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
1013 void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
1014 void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
1015 void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
1017 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1018 struct mlx5e_modify_sq_param *p);
1019 void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
1020 void mlx5e_tx_disable_queue(struct netdev_queue *txq);
1022 static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
1024 return MLX5_CAP_ETH(mdev, swp) &&
1025 MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
1028 extern const struct ethtool_ops mlx5e_ethtool_ops;
1030 int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
1032 void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
1033 struct mlx5e_tir *tir);
1034 int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
1035 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
1036 int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
1039 /* common netdev helpers */
1040 void mlx5e_create_q_counters(struct mlx5e_priv *priv);
1041 void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
1042 int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
1043 struct mlx5e_rq *drop_rq);
1044 void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
1046 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
1048 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
1049 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
1051 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1052 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1053 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1054 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1055 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
1057 int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
1058 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
1060 int mlx5e_create_tises(struct mlx5e_priv *priv);
1061 void mlx5e_destroy_tises(struct mlx5e_priv *priv);
1062 int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
1063 void mlx5e_update_carrier(struct mlx5e_priv *priv);
1064 int mlx5e_close(struct net_device *netdev);
1065 int mlx5e_open(struct net_device *netdev);
1066 void mlx5e_update_ndo_stats(struct mlx5e_priv *priv);
1068 void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
1069 int mlx5e_bits_invert(unsigned long a, int size);
1071 int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
1072 int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
1073 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
1074 mlx5e_fp_preactivate preactivate);
1076 /* ethtool helpers */
1077 void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
1078 struct ethtool_drvinfo *drvinfo);
1079 void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
1080 uint32_t stringset, uint8_t *data);
1081 int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
1082 void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
1083 struct ethtool_stats *stats, u64 *data);
1084 void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
1085 struct ethtool_ringparam *param);
1086 int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
1087 struct ethtool_ringparam *param);
1088 void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
1089 struct ethtool_channels *ch);
1090 int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
1091 struct ethtool_channels *ch);
1092 int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
1093 struct ethtool_coalesce *coal);
1094 int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
1095 struct ethtool_coalesce *coal);
1096 int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
1097 struct ethtool_link_ksettings *link_ksettings);
1098 int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1099 const struct ethtool_link_ksettings *link_ksettings);
1100 int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
1101 int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
1103 int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1105 int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
1106 u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
1107 u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
1108 int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1109 struct ethtool_ts_info *info);
1110 int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1111 struct ethtool_flash *flash);
1112 void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
1113 struct ethtool_pauseparam *pauseparam);
1114 int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
1115 struct ethtool_pauseparam *pauseparam);
1117 /* mlx5e generic netdev management API */
1118 int mlx5e_netdev_init(struct net_device *netdev,
1119 struct mlx5e_priv *priv,
1120 struct mlx5_core_dev *mdev,
1121 const struct mlx5e_profile *profile,
1123 void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv);
1125 mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
1126 int nch, void *ppriv);
1127 int mlx5e_attach_netdev(struct mlx5e_priv *priv);
1128 void mlx5e_detach_netdev(struct mlx5e_priv *priv);
1129 void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
1130 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
1131 void mlx5e_build_nic_params(struct mlx5e_priv *priv,
1132 struct mlx5e_xsk *xsk,
1133 struct mlx5e_rss_params *rss_params,
1134 struct mlx5e_params *params,
1136 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
1137 struct mlx5e_params *params);
1138 void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
1140 void mlx5e_rx_dim_work(struct work_struct *work);
1141 void mlx5e_tx_dim_work(struct work_struct *work);
1143 void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
1144 void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
1145 netdev_features_t mlx5e_features_check(struct sk_buff *skb,
1146 struct net_device *netdev,
1147 netdev_features_t features);
1148 int mlx5e_set_features(struct net_device *netdev, netdev_features_t features);
1149 #ifdef CONFIG_MLX5_ESWITCH
1150 int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
1151 int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate);
1152 int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
1153 int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
1155 #endif /* __MLX5_EN_H__ */