2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/if_vlan.h>
36 #include <linux/etherdevice.h>
37 #include <linux/timecounter.h>
38 #include <linux/net_tstamp.h>
39 #include <linux/crash_dump.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/qp.h>
42 #include <linux/mlx5/cq.h>
43 #include <linux/mlx5/port.h>
44 #include <linux/mlx5/vport.h>
45 #include <linux/mlx5/transobj.h>
46 #include <linux/mlx5/fs.h>
47 #include <linux/rhashtable.h>
48 #include <net/switchdev.h>
50 #include <linux/dim.h>
51 #include <linux/bits.h>
53 #include "mlx5_core.h"
57 #include "lib/hv_vhca.h"
59 extern const struct net_device_ops mlx5e_netdev_ops;
62 #define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
63 #define MLX5E_METADATA_ETHER_LEN 8
65 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
67 #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
69 #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
70 #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
72 #define MLX5E_MAX_NUM_TC 8
74 #define MLX5_RX_HEADROOM NET_SKB_PAD
75 #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
76 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
78 #define MLX5E_RX_MAX_HEAD (256)
80 #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
81 (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
82 #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
83 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
84 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
85 MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
87 #define MLX5_MPWRQ_LOG_WQE_SZ 18
88 #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
89 MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
90 #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
92 #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
93 #define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
94 #define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS))
95 #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
96 #define MLX5E_MAX_RQ_NUM_MTTS \
97 ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
98 #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
99 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
100 (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
101 #define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
102 (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
103 (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
105 #define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
106 #define MLX5E_LOG_MAX_RX_WQE_BULK \
107 (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
109 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
110 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
111 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
113 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
114 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
115 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
116 MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
118 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
120 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
121 #define MLX5E_DEFAULT_LRO_TIMEOUT 32
122 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
124 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
125 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
126 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
127 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
128 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
129 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
130 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
131 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
133 #define MLX5E_LOG_INDIR_RQT_SIZE 0x7
134 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
135 #define MLX5E_MIN_NUM_CHANNELS 0x1
136 #define MLX5E_MAX_NUM_CHANNELS MLX5E_INDIR_RQT_SIZE
137 #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
138 #define MLX5E_TX_CQ_POLL_BUDGET 128
139 #define MLX5E_TX_XSK_POLL_BUDGET 64
140 #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
142 #define MLX5E_UMR_WQE_INLINE_SZ \
143 (sizeof(struct mlx5e_umr_wqe) + \
144 ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
145 MLX5_UMR_MTT_ALIGNMENT))
146 #define MLX5E_UMR_WQEBBS \
147 (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
149 #define MLX5E_MSG_LEVEL NETIF_MSG_LINK
151 #define mlx5e_dbg(mlevel, priv, format, ...) \
153 if (NETIF_MSG_##mlevel & (priv)->msglevel) \
154 netdev_warn(priv->netdev, format, \
158 enum mlx5e_rq_group {
159 MLX5E_RQ_GROUP_REGULAR,
161 #define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
164 static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
166 if (mlx5_lag_is_lacp_owner(mdev))
169 return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS);
172 static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
175 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
176 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
179 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
184 /* Use this function to get max num channels (rxqs/txqs) only to create netdev */
185 static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
187 return is_kdump_kernel() ?
188 MLX5E_MIN_NUM_CHANNELS :
189 min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
192 struct mlx5e_tx_wqe {
193 struct mlx5_wqe_ctrl_seg ctrl;
194 struct mlx5_wqe_eth_seg eth;
195 struct mlx5_wqe_data_seg data[0];
198 struct mlx5e_rx_wqe_ll {
199 struct mlx5_wqe_srq_next_seg next;
200 struct mlx5_wqe_data_seg data[];
203 struct mlx5e_rx_wqe_cyc {
204 struct mlx5_wqe_data_seg data[0];
207 struct mlx5e_umr_wqe {
208 struct mlx5_wqe_ctrl_seg ctrl;
209 struct mlx5_wqe_umr_ctrl_seg uctrl;
210 struct mlx5_mkey_seg mkc;
211 struct mlx5_mtt inline_mtts[0];
214 extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
216 enum mlx5e_priv_flag {
217 MLX5E_PFLAG_RX_CQE_BASED_MODER,
218 MLX5E_PFLAG_TX_CQE_BASED_MODER,
219 MLX5E_PFLAG_RX_CQE_COMPRESS,
220 MLX5E_PFLAG_RX_STRIDING_RQ,
221 MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
222 MLX5E_PFLAG_XDP_TX_MPWQE,
223 MLX5E_NUM_PFLAGS, /* Keep last */
226 #define MLX5E_SET_PFLAG(params, pflag, enable) \
229 (params)->pflags |= BIT(pflag); \
231 (params)->pflags &= ~(BIT(pflag)); \
234 #define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
236 struct mlx5e_params {
239 u8 log_rq_mtu_frames;
242 bool rx_cqe_compress_def;
243 bool tunneled_offload_en;
244 struct dim_cq_moder rx_cq_moderation;
245 struct dim_cq_moder tx_cq_moderation;
247 u8 tx_min_inline_mode;
248 bool vlan_strip_disable;
254 struct bpf_prog *xdp_prog;
255 struct mlx5e_xsk *xsk;
261 MLX5E_RQ_STATE_ENABLED,
262 MLX5E_RQ_STATE_RECOVERING,
264 MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
265 MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
266 MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
270 /* data path - accessed per cqe */
273 /* data path - accessed per napi poll */
275 struct napi_struct *napi;
276 struct mlx5_core_cq mcq;
277 struct mlx5e_channel *channel;
280 struct mlx5_core_dev *mdev;
281 struct mlx5_wq_ctrl wq_ctrl;
282 } ____cacheline_aligned_in_smp;
284 struct mlx5e_cq_decomp {
285 /* cqe decompression */
286 struct mlx5_cqe64 title;
287 struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
291 } ____cacheline_aligned_in_smp;
293 enum mlx5e_dma_map_type {
294 MLX5E_DMA_MAP_SINGLE,
298 struct mlx5e_sq_dma {
301 enum mlx5e_dma_map_type type;
305 MLX5E_SQ_STATE_ENABLED,
306 MLX5E_SQ_STATE_RECOVERING,
307 MLX5E_SQ_STATE_IPSEC,
310 MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
311 MLX5E_SQ_STATE_PENDING_XSK_TX,
317 /* dirtied @completion */
320 struct dim dim; /* Adaptive Moderation */
323 u16 pc ____cacheline_aligned_in_smp;
329 struct mlx5_wq_cyc wq;
331 struct mlx5e_sq_stats *stats;
333 struct mlx5e_sq_dma *dma_fifo;
334 struct mlx5e_tx_wqe_info *wqe_info;
336 void __iomem *uar_map;
337 struct netdev_queue *txq;
345 struct hwtstamp_config *tstamp;
346 struct mlx5_clock *clock;
349 struct mlx5_wq_ctrl wq_ctrl;
350 struct mlx5e_channel *channel;
354 struct work_struct recover_work;
355 } ____cacheline_aligned_in_smp;
357 struct mlx5e_dma_info {
361 struct xdp_buff *xsk;
365 /* XDP packets can be transmitted in different ways. On completion, we need to
366 * distinguish between them to clean up things in a proper way.
368 enum mlx5e_xdp_xmit_mode {
369 /* An xdp_frame was transmitted due to either XDP_REDIRECT from another
370 * device or XDP_TX from an XSK RQ. The frame has to be unmapped and
373 MLX5E_XDP_XMIT_MODE_FRAME,
375 /* The xdp_frame was created in place as a result of XDP_TX from a
376 * regular RQ. No DMA remapping happened, and the page belongs to us.
378 MLX5E_XDP_XMIT_MODE_PAGE,
380 /* No xdp_frame was created at all, the transmit happened from a UMEM
381 * page. The UMEM Completion Ring producer pointer has to be increased.
383 MLX5E_XDP_XMIT_MODE_XSK,
386 struct mlx5e_xdp_info {
387 enum mlx5e_xdp_xmit_mode mode;
390 struct xdp_frame *xdpf;
395 struct mlx5e_dma_info di;
400 struct mlx5e_xdp_xmit_data {
406 struct mlx5e_xdp_info_fifo {
407 struct mlx5e_xdp_info *xi;
413 struct mlx5e_xdp_mpwqe {
414 /* Current MPWQE session */
415 struct mlx5e_tx_wqe *wqe;
422 typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
423 typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
424 struct mlx5e_xdp_xmit_data *,
425 struct mlx5e_xdp_info *,
431 /* dirtied @completion */
436 u32 xdpi_fifo_pc ____cacheline_aligned_in_smp;
438 struct mlx5_wqe_ctrl_seg *doorbell_cseg;
439 struct mlx5e_xdp_mpwqe mpwqe;
444 struct xdp_umem *umem;
445 struct mlx5_wq_cyc wq;
446 struct mlx5e_xdpsq_stats *stats;
447 mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check;
448 mlx5e_fp_xmit_xdp_frame xmit_xdp_frame;
450 struct mlx5e_xdp_wqe_info *wqe_info;
451 struct mlx5e_xdp_info_fifo xdpi_fifo;
453 void __iomem *uar_map;
462 struct mlx5_wq_ctrl wq_ctrl;
463 struct mlx5e_channel *channel;
464 } ____cacheline_aligned_in_smp;
471 struct mlx5_wqe_ctrl_seg *doorbell_cseg;
474 /* write@xmit, read@completion */
476 struct mlx5e_icosq_wqe_info *wqe_info;
480 struct mlx5_wq_cyc wq;
481 void __iomem *uar_map;
486 struct mlx5_wq_ctrl wq_ctrl;
487 struct mlx5e_channel *channel;
489 struct work_struct recover_work;
490 } ____cacheline_aligned_in_smp;
492 struct mlx5e_wqe_frag_info {
493 struct mlx5e_dma_info *di;
498 struct mlx5e_umr_dma_info {
499 struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
502 struct mlx5e_mpw_info {
503 struct mlx5e_umr_dma_info umr;
504 u16 consumed_strides;
505 DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
508 #define MLX5E_MAX_RX_FRAGS 4
510 /* a single cache unit is capable to serve one napi call (for non-striding rq)
511 * or a MPWQE (for striding rq).
513 #define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
514 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
515 #define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
516 struct mlx5e_page_cache {
519 struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
523 typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
524 typedef struct sk_buff *
525 (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
526 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
527 typedef struct sk_buff *
528 (*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
529 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
530 typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
531 typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
533 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
536 MLX5E_RQ_FLAG_XDP_XMIT,
537 MLX5E_RQ_FLAG_XDP_REDIRECT,
540 struct mlx5e_rq_frag_info {
545 struct mlx5e_rq_frags_info {
546 struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
556 struct mlx5_wq_cyc wq;
557 struct mlx5e_wqe_frag_info *frags;
558 struct mlx5e_dma_info *di;
559 struct mlx5e_rq_frags_info info;
560 mlx5e_fp_skb_from_cqe skb_from_cqe;
563 struct mlx5_wq_ll wq;
564 struct mlx5e_umr_wqe umr_wqe;
565 struct mlx5e_mpw_info *info;
566 mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
578 u8 map_dir; /* dma map direction */
581 struct mlx5e_channel *channel;
583 struct net_device *netdev;
584 struct mlx5e_rq_stats *stats;
586 struct mlx5e_cq_decomp cqd;
587 struct mlx5e_page_cache page_cache;
588 struct hwtstamp_config *tstamp;
589 struct mlx5_clock *clock;
591 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
592 mlx5e_fp_post_rx_wqes post_wqes;
593 mlx5e_fp_dealloc_wqe dealloc_wqe;
599 struct dim dim; /* Dynamic Interrupt Moderation */
602 struct bpf_prog *xdp_prog;
603 struct mlx5e_xdpsq *xdpsq;
604 DECLARE_BITMAP(flags, 8);
605 struct page_pool *page_pool;
607 /* AF_XDP zero-copy */
608 struct xdp_umem *umem;
610 struct work_struct recover_work;
613 struct mlx5_wq_ctrl wq_ctrl;
617 struct mlx5_core_dev *mdev;
618 struct mlx5_core_mkey umr_mkey;
620 /* XDP read-mostly */
621 struct xdp_rxq_info xdp_rxq;
622 } ____cacheline_aligned_in_smp;
624 enum mlx5e_channel_state {
625 MLX5E_CHANNEL_STATE_XSK,
626 MLX5E_CHANNEL_NUM_STATES
629 struct mlx5e_channel {
632 struct mlx5e_xdpsq rq_xdpsq;
633 struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
634 struct mlx5e_icosq icosq; /* internal control operations */
636 struct napi_struct napi;
638 struct net_device *netdev;
644 struct mlx5e_xdpsq xdpsq;
646 /* AF_XDP zero-copy */
647 struct mlx5e_rq xskrq;
648 struct mlx5e_xdpsq xsksq;
651 struct mlx5e_icosq async_icosq;
652 /* async_icosq can be accessed from any CPU - the spinlock protects it. */
653 spinlock_t async_icosq_lock;
655 /* data path - accessed per napi poll */
656 struct irq_desc *irq_desc;
657 struct mlx5e_ch_stats *stats;
660 struct mlx5e_priv *priv;
661 struct mlx5_core_dev *mdev;
662 struct hwtstamp_config *tstamp;
663 DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
668 struct mlx5e_channels {
669 struct mlx5e_channel **c;
671 struct mlx5e_params params;
674 struct mlx5e_channel_stats {
675 struct mlx5e_ch_stats ch;
676 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
677 struct mlx5e_rq_stats rq;
678 struct mlx5e_rq_stats xskrq;
679 struct mlx5e_xdpsq_stats rq_xdpsq;
680 struct mlx5e_xdpsq_stats xdpsq;
681 struct mlx5e_xdpsq_stats xsksq;
682 } ____cacheline_aligned_in_smp;
686 MLX5E_STATE_DESTROYING,
687 MLX5E_STATE_XDP_TX_ENABLED,
688 MLX5E_STATE_XDP_ACTIVE,
698 struct mlx5e_rqt rqt;
699 struct list_head list;
707 struct mlx5e_rss_params {
708 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
709 u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
710 u8 toeplitz_hash_key[40];
714 struct mlx5e_modify_sq_param {
721 #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
722 struct mlx5e_hv_vhca_stats_agent {
723 struct mlx5_hv_vhca_agent *agent;
724 struct delayed_work work;
731 /* UMEMs are stored separately from channels, because we don't want to
732 * lose them when channels are recreated. The kernel also stores UMEMs,
733 * but it doesn't distinguish between zero-copy and non-zero-copy UMEMs,
734 * so rely on our mechanism.
736 struct xdp_umem **umems;
741 /* Temporary storage for variables that are allocated when struct mlx5e_priv is
742 * initialized, and used where we can't allocate them because that functions
743 * must not fail. Use with care and make sure the same variable is not used
744 * simultaneously by multiple users.
746 struct mlx5e_scratchpad {
747 cpumask_var_t cpumask;
751 /* priv data path fields - start */
752 struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
753 int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
754 #ifdef CONFIG_MLX5_CORE_EN_DCB
755 struct mlx5e_dcbx_dp dcbx_dp;
757 /* priv data path fields - end */
761 struct mutex state_lock; /* Protects Interface state */
762 struct mlx5e_rq drop_rq;
764 struct mlx5e_channels channels;
765 u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
766 struct mlx5e_rqt indir_rqt;
767 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
768 struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
769 struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
770 struct mlx5e_tir xsk_tir[MLX5E_MAX_NUM_CHANNELS];
771 struct mlx5e_rss_params rss_params;
772 u32 tx_rates[MLX5E_MAX_NUM_SQS];
774 struct mlx5e_flow_steering fs;
776 struct workqueue_struct *wq;
777 struct work_struct update_carrier_work;
778 struct work_struct set_rx_mode_work;
779 struct work_struct tx_timeout_work;
780 struct work_struct update_stats_work;
781 struct work_struct monitor_counters_work;
782 struct mlx5_nb monitor_counters_nb;
784 struct mlx5_core_dev *mdev;
785 struct net_device *netdev;
786 struct mlx5e_stats stats;
787 struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
790 struct hwtstamp_config tstamp;
792 u16 drop_rq_q_counter;
793 struct notifier_block events_nb;
795 #ifdef CONFIG_MLX5_CORE_EN_DCB
796 struct mlx5e_dcbx dcbx;
799 const struct mlx5e_profile *profile;
801 #ifdef CONFIG_MLX5_EN_IPSEC
802 struct mlx5e_ipsec *ipsec;
804 #ifdef CONFIG_MLX5_EN_TLS
805 struct mlx5e_tls *tls;
807 struct devlink_health_reporter *tx_reporter;
808 struct devlink_health_reporter *rx_reporter;
809 struct devlink_port dl_port;
810 struct mlx5e_xsk xsk;
811 #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
812 struct mlx5e_hv_vhca_stats_agent stats_agent;
814 struct mlx5e_scratchpad scratchpad;
817 struct mlx5e_rx_handlers {
818 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
819 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
822 extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
824 struct mlx5e_profile {
825 int (*init)(struct mlx5_core_dev *mdev,
826 struct net_device *netdev,
827 const struct mlx5e_profile *profile, void *ppriv);
828 void (*cleanup)(struct mlx5e_priv *priv);
829 int (*init_rx)(struct mlx5e_priv *priv);
830 void (*cleanup_rx)(struct mlx5e_priv *priv);
831 int (*init_tx)(struct mlx5e_priv *priv);
832 void (*cleanup_tx)(struct mlx5e_priv *priv);
833 void (*enable)(struct mlx5e_priv *priv);
834 void (*disable)(struct mlx5e_priv *priv);
835 int (*update_rx)(struct mlx5e_priv *priv);
836 void (*update_stats)(struct mlx5e_priv *priv);
837 void (*update_carrier)(struct mlx5e_priv *priv);
838 unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
839 mlx5e_stats_grp_t *stats_grps;
840 const struct mlx5e_rx_handlers *rx_handlers;
845 void mlx5e_build_ptys2ethtool_map(void);
847 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
848 struct net_device *sb_dev);
849 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
850 void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
851 struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
853 void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
854 void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
855 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
856 int mlx5e_napi_poll(struct napi_struct *napi, int budget);
857 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
858 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
859 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
861 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
862 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
863 struct mlx5e_params *params);
865 void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
866 void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
867 struct mlx5e_dma_info *dma_info,
869 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
870 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
871 bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
872 void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
873 void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
875 void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
876 int mlx5e_self_test_num(struct mlx5e_priv *priv);
877 void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
879 void mlx5e_set_rx_mode_work(struct work_struct *work);
881 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
882 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
883 int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
885 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
887 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
889 void mlx5e_timestamp_init(struct mlx5e_priv *priv);
891 struct mlx5e_redirect_rqt_param {
894 u32 rqn; /* Direct RQN (Non-RSS) */
897 struct mlx5e_channels *channels;
898 } rss; /* RSS data */
902 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
903 struct mlx5e_redirect_rqt_param rrp);
904 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
905 const struct mlx5e_tirc_config *ttconfig,
906 void *tirc, bool inner);
907 void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in);
908 struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
910 struct mlx5e_xsk_param;
912 struct mlx5e_rq_param;
913 int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
914 struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
915 struct xdp_umem *umem, struct mlx5e_rq *rq);
916 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
917 void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
918 void mlx5e_close_rq(struct mlx5e_rq *rq);
920 struct mlx5e_sq_param;
921 int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
922 struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
923 void mlx5e_close_icosq(struct mlx5e_icosq *sq);
924 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
925 struct mlx5e_sq_param *param, struct xdp_umem *umem,
926 struct mlx5e_xdpsq *sq, bool is_redirect);
927 void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
929 struct mlx5e_cq_param;
930 int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder,
931 struct mlx5e_cq_param *param, struct mlx5e_cq *cq);
932 void mlx5e_close_cq(struct mlx5e_cq *cq);
934 int mlx5e_open_locked(struct net_device *netdev);
935 int mlx5e_close_locked(struct net_device *netdev);
937 int mlx5e_open_channels(struct mlx5e_priv *priv,
938 struct mlx5e_channels *chs);
939 void mlx5e_close_channels(struct mlx5e_channels *chs);
941 /* Function pointer to be used to modify HW or kernel settings while
944 typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context);
945 #define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \
946 int fn##_ctx(struct mlx5e_priv *priv, void *context) \
950 int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
951 int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
952 struct mlx5e_channels *new_chs,
953 mlx5e_fp_preactivate preactivate,
955 int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
956 int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
957 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
958 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
960 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
963 void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
964 void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
965 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
966 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
968 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
969 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
970 struct mlx5e_params *params);
971 int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
972 void mlx5e_activate_rq(struct mlx5e_rq *rq);
973 void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
974 void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
975 void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
976 void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
977 void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
979 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
980 struct mlx5e_modify_sq_param *p);
981 void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
982 void mlx5e_tx_disable_queue(struct netdev_queue *txq);
984 static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
986 return MLX5_CAP_ETH(mdev, swp) &&
987 MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
990 extern const struct ethtool_ops mlx5e_ethtool_ops;
992 int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
994 void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
995 struct mlx5e_tir *tir);
996 int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
997 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
998 int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
1001 /* common netdev helpers */
1002 void mlx5e_create_q_counters(struct mlx5e_priv *priv);
1003 void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
1004 int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
1005 struct mlx5e_rq *drop_rq);
1006 void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
1008 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
1010 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
1011 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
1013 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1014 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1015 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1016 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1017 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
1019 int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
1020 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
1022 int mlx5e_create_tises(struct mlx5e_priv *priv);
1023 void mlx5e_destroy_tises(struct mlx5e_priv *priv);
1024 int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
1025 void mlx5e_update_carrier(struct mlx5e_priv *priv);
1026 int mlx5e_close(struct net_device *netdev);
1027 int mlx5e_open(struct net_device *netdev);
1028 void mlx5e_update_ndo_stats(struct mlx5e_priv *priv);
1030 void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
1031 int mlx5e_bits_invert(unsigned long a, int size);
1033 int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
1034 int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
1035 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
1036 mlx5e_fp_preactivate preactivate);
1038 /* ethtool helpers */
1039 void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
1040 struct ethtool_drvinfo *drvinfo);
1041 void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
1042 uint32_t stringset, uint8_t *data);
1043 int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
1044 void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
1045 struct ethtool_stats *stats, u64 *data);
1046 void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
1047 struct ethtool_ringparam *param);
1048 int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
1049 struct ethtool_ringparam *param);
1050 void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
1051 struct ethtool_channels *ch);
1052 int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
1053 struct ethtool_channels *ch);
1054 int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
1055 struct ethtool_coalesce *coal);
1056 int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
1057 struct ethtool_coalesce *coal);
1058 int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
1059 struct ethtool_link_ksettings *link_ksettings);
1060 int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1061 const struct ethtool_link_ksettings *link_ksettings);
1062 int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
1063 int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
1065 int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1067 int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
1068 u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
1069 u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
1070 int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1071 struct ethtool_ts_info *info);
1072 int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1073 struct ethtool_flash *flash);
1074 void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
1075 struct ethtool_pauseparam *pauseparam);
1076 int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
1077 struct ethtool_pauseparam *pauseparam);
1079 /* mlx5e generic netdev management API */
1080 int mlx5e_netdev_init(struct net_device *netdev,
1081 struct mlx5e_priv *priv,
1082 struct mlx5_core_dev *mdev,
1083 const struct mlx5e_profile *profile,
1085 void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv);
1087 mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
1088 int nch, void *ppriv);
1089 int mlx5e_attach_netdev(struct mlx5e_priv *priv);
1090 void mlx5e_detach_netdev(struct mlx5e_priv *priv);
1091 void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
1092 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
1093 void mlx5e_build_nic_params(struct mlx5e_priv *priv,
1094 struct mlx5e_xsk *xsk,
1095 struct mlx5e_rss_params *rss_params,
1096 struct mlx5e_params *params,
1098 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
1099 struct mlx5e_params *params);
1100 void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
1102 void mlx5e_rx_dim_work(struct work_struct *work);
1103 void mlx5e_tx_dim_work(struct work_struct *work);
1105 void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
1106 void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
1107 netdev_features_t mlx5e_features_check(struct sk_buff *skb,
1108 struct net_device *netdev,
1109 netdev_features_t features);
1110 int mlx5e_set_features(struct net_device *netdev, netdev_features_t features);
1111 #ifdef CONFIG_MLX5_ESWITCH
1112 int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
1113 int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate);
1114 int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
1115 int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
1117 #endif /* __MLX5_EN_H__ */