2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/if_vlan.h>
36 #include <linux/etherdevice.h>
37 #include <linux/timecounter.h>
38 #include <linux/net_tstamp.h>
39 #include <linux/crash_dump.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/qp.h>
42 #include <linux/mlx5/cq.h>
43 #include <linux/mlx5/port.h>
44 #include <linux/mlx5/vport.h>
45 #include <linux/mlx5/transobj.h>
46 #include <linux/mlx5/fs.h>
47 #include <linux/rhashtable.h>
48 #include <net/udp_tunnel.h>
49 #include <net/switchdev.h>
51 #include <linux/dim.h>
52 #include <linux/bits.h>
54 #include "mlx5_core.h"
58 #include "lib/hv_vhca.h"
60 extern const struct net_device_ops mlx5e_netdev_ops;
63 #define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
64 #define MLX5E_METADATA_ETHER_LEN 8
66 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
68 #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
70 #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
71 #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
73 #define MLX5E_MAX_NUM_TC 8
75 #define MLX5_RX_HEADROOM NET_SKB_PAD
76 #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
77 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
79 #define MLX5E_RX_MAX_HEAD (256)
81 #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
82 (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
83 #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
84 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
85 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
86 MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
88 #define MLX5_MPWRQ_LOG_WQE_SZ 18
89 #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
90 MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
91 #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
93 #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
94 /* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
95 * WQEs, This page will absorb write overflow by the hardware, when
96 * receiving packets larger than MTU. These oversize packets are
97 * dropped by the driver at a later stage.
99 #define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE + 1, 8))
100 #define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS))
101 #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
102 #define MLX5E_MAX_RQ_NUM_MTTS \
103 ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
104 #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
105 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
106 (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
107 #define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
108 (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
109 (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
111 #define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
112 #define MLX5E_LOG_MAX_RX_WQE_BULK \
113 (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
115 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
116 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
117 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
119 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
120 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
121 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
122 MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
124 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
126 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
127 #define MLX5E_DEFAULT_LRO_TIMEOUT 32
128 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
130 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
131 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
132 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
133 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
134 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
135 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
136 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
137 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
139 #define MLX5E_LOG_INDIR_RQT_SIZE 0x7
140 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
141 #define MLX5E_MIN_NUM_CHANNELS 0x1
142 #define MLX5E_MAX_NUM_CHANNELS MLX5E_INDIR_RQT_SIZE
143 #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
144 #define MLX5E_TX_CQ_POLL_BUDGET 128
145 #define MLX5E_TX_XSK_POLL_BUDGET 64
146 #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
148 #define MLX5E_UMR_WQE_INLINE_SZ \
149 (sizeof(struct mlx5e_umr_wqe) + \
150 ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
151 MLX5_UMR_MTT_ALIGNMENT))
152 #define MLX5E_UMR_WQEBBS \
153 (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
155 #define MLX5E_MSG_LEVEL NETIF_MSG_LINK
157 #define mlx5e_dbg(mlevel, priv, format, ...) \
159 if (NETIF_MSG_##mlevel & (priv)->msglevel) \
160 netdev_warn(priv->netdev, format, \
164 enum mlx5e_rq_group {
165 MLX5E_RQ_GROUP_REGULAR,
167 #define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
170 static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
172 if (mlx5_lag_is_lacp_owner(mdev))
175 return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS);
178 static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
181 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
182 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
185 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
190 /* Use this function to get max num channels (rxqs/txqs) only to create netdev */
191 static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
193 return is_kdump_kernel() ?
194 MLX5E_MIN_NUM_CHANNELS :
195 min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
198 struct mlx5e_tx_wqe {
199 struct mlx5_wqe_ctrl_seg ctrl;
200 struct mlx5_wqe_eth_seg eth;
201 struct mlx5_wqe_data_seg data[0];
204 struct mlx5e_rx_wqe_ll {
205 struct mlx5_wqe_srq_next_seg next;
206 struct mlx5_wqe_data_seg data[];
209 struct mlx5e_rx_wqe_cyc {
210 struct mlx5_wqe_data_seg data[0];
213 struct mlx5e_umr_wqe {
214 struct mlx5_wqe_ctrl_seg ctrl;
215 struct mlx5_wqe_umr_ctrl_seg uctrl;
216 struct mlx5_mkey_seg mkc;
217 struct mlx5_mtt inline_mtts[0];
220 extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
222 enum mlx5e_priv_flag {
223 MLX5E_PFLAG_RX_CQE_BASED_MODER,
224 MLX5E_PFLAG_TX_CQE_BASED_MODER,
225 MLX5E_PFLAG_RX_CQE_COMPRESS,
226 MLX5E_PFLAG_RX_STRIDING_RQ,
227 MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
228 MLX5E_PFLAG_XDP_TX_MPWQE,
229 MLX5E_PFLAG_SKB_TX_MPWQE,
230 MLX5E_PFLAG_TX_PORT_TS,
231 MLX5E_NUM_PFLAGS, /* Keep last */
234 #define MLX5E_SET_PFLAG(params, pflag, enable) \
237 (params)->pflags |= BIT(pflag); \
239 (params)->pflags &= ~(BIT(pflag)); \
242 #define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
244 struct mlx5e_params {
247 u8 log_rq_mtu_frames;
250 bool rx_cqe_compress_def;
251 bool tunneled_offload_en;
252 struct dim_cq_moder rx_cq_moderation;
253 struct dim_cq_moder tx_cq_moderation;
255 u8 tx_min_inline_mode;
256 bool vlan_strip_disable;
262 struct bpf_prog *xdp_prog;
263 struct mlx5e_xsk *xsk;
269 MLX5E_RQ_STATE_ENABLED,
270 MLX5E_RQ_STATE_RECOVERING,
272 MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
273 MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
274 MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
275 MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX /* set when mini_cqe_resp_stride_index cap is used */
279 /* data path - accessed per cqe */
282 /* data path - accessed per napi poll */
284 struct napi_struct *napi;
285 struct mlx5_core_cq mcq;
286 struct mlx5e_ch_stats *ch_stats;
289 struct net_device *netdev;
290 struct mlx5_core_dev *mdev;
291 struct mlx5e_priv *priv;
292 struct mlx5_wq_ctrl wq_ctrl;
293 } ____cacheline_aligned_in_smp;
295 struct mlx5e_cq_decomp {
296 /* cqe decompression */
297 struct mlx5_cqe64 title;
298 struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
302 } ____cacheline_aligned_in_smp;
304 enum mlx5e_dma_map_type {
305 MLX5E_DMA_MAP_SINGLE,
309 struct mlx5e_sq_dma {
312 enum mlx5e_dma_map_type type;
316 MLX5E_SQ_STATE_ENABLED,
317 MLX5E_SQ_STATE_MPWQE,
318 MLX5E_SQ_STATE_RECOVERING,
319 MLX5E_SQ_STATE_IPSEC,
322 MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
323 MLX5E_SQ_STATE_PENDING_XSK_TX,
326 struct mlx5e_tx_mpwqe {
327 /* Current MPWQE session */
328 struct mlx5e_tx_wqe *wqe;
335 struct mlx5e_skb_fifo {
336 struct sk_buff **fifo;
347 /* dirtied @completion */
351 struct dim dim; /* Adaptive Moderation */
354 u16 pc ____cacheline_aligned_in_smp;
357 struct mlx5e_tx_mpwqe mpwqe;
362 struct mlx5_wq_cyc wq;
364 struct mlx5e_sq_stats *stats;
366 struct mlx5e_sq_dma *dma_fifo;
367 struct mlx5e_skb_fifo skb_fifo;
368 struct mlx5e_tx_wqe_info *wqe_info;
370 void __iomem *uar_map;
371 struct netdev_queue *txq;
379 struct hwtstamp_config *tstamp;
380 struct mlx5_clock *clock;
381 struct net_device *netdev;
382 struct mlx5_core_dev *mdev;
383 struct mlx5e_priv *priv;
386 struct mlx5_wq_ctrl wq_ctrl;
390 struct work_struct recover_work;
391 struct mlx5e_ptpsq *ptpsq;
392 } ____cacheline_aligned_in_smp;
394 struct mlx5e_dma_info {
398 struct xdp_buff *xsk;
402 /* XDP packets can be transmitted in different ways. On completion, we need to
403 * distinguish between them to clean up things in a proper way.
405 enum mlx5e_xdp_xmit_mode {
406 /* An xdp_frame was transmitted due to either XDP_REDIRECT from another
407 * device or XDP_TX from an XSK RQ. The frame has to be unmapped and
410 MLX5E_XDP_XMIT_MODE_FRAME,
412 /* The xdp_frame was created in place as a result of XDP_TX from a
413 * regular RQ. No DMA remapping happened, and the page belongs to us.
415 MLX5E_XDP_XMIT_MODE_PAGE,
417 /* No xdp_frame was created at all, the transmit happened from a UMEM
418 * page. The UMEM Completion Ring producer pointer has to be increased.
420 MLX5E_XDP_XMIT_MODE_XSK,
423 struct mlx5e_xdp_info {
424 enum mlx5e_xdp_xmit_mode mode;
427 struct xdp_frame *xdpf;
432 struct mlx5e_dma_info di;
437 struct mlx5e_xmit_data {
443 struct mlx5e_xdp_info_fifo {
444 struct mlx5e_xdp_info *xi;
451 typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
452 typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
453 struct mlx5e_xmit_data *,
454 struct mlx5e_xdp_info *,
460 /* dirtied @completion */
465 u32 xdpi_fifo_pc ____cacheline_aligned_in_smp;
467 struct mlx5_wqe_ctrl_seg *doorbell_cseg;
468 struct mlx5e_tx_mpwqe mpwqe;
473 struct xsk_buff_pool *xsk_pool;
474 struct mlx5_wq_cyc wq;
475 struct mlx5e_xdpsq_stats *stats;
476 mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check;
477 mlx5e_fp_xmit_xdp_frame xmit_xdp_frame;
479 struct mlx5e_xdp_wqe_info *wqe_info;
480 struct mlx5e_xdp_info_fifo xdpi_fifo;
482 void __iomem *uar_map;
491 struct mlx5_wq_ctrl wq_ctrl;
492 struct mlx5e_channel *channel;
493 } ____cacheline_aligned_in_smp;
500 struct mlx5_wqe_ctrl_seg *doorbell_cseg;
503 /* write@xmit, read@completion */
505 struct mlx5e_icosq_wqe_info *wqe_info;
509 struct mlx5_wq_cyc wq;
510 void __iomem *uar_map;
515 struct mlx5_wq_ctrl wq_ctrl;
516 struct mlx5e_channel *channel;
518 struct work_struct recover_work;
519 } ____cacheline_aligned_in_smp;
521 struct mlx5e_wqe_frag_info {
522 struct mlx5e_dma_info *di;
527 struct mlx5e_umr_dma_info {
528 struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
531 struct mlx5e_mpw_info {
532 struct mlx5e_umr_dma_info umr;
533 u16 consumed_strides;
534 DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
537 #define MLX5E_MAX_RX_FRAGS 4
539 /* a single cache unit is capable to serve one napi call (for non-striding rq)
540 * or a MPWQE (for striding rq).
542 #define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
543 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
544 #define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
545 struct mlx5e_page_cache {
548 struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
552 typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
553 typedef struct sk_buff *
554 (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
555 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
556 typedef struct sk_buff *
557 (*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
558 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
559 typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
560 typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
562 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
565 MLX5E_RQ_FLAG_XDP_XMIT,
566 MLX5E_RQ_FLAG_XDP_REDIRECT,
569 struct mlx5e_rq_frag_info {
574 struct mlx5e_rq_frags_info {
575 struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
585 struct mlx5_wq_cyc wq;
586 struct mlx5e_wqe_frag_info *frags;
587 struct mlx5e_dma_info *di;
588 struct mlx5e_rq_frags_info info;
589 mlx5e_fp_skb_from_cqe skb_from_cqe;
592 struct mlx5_wq_ll wq;
593 struct mlx5e_umr_wqe umr_wqe;
594 struct mlx5e_mpw_info *info;
595 mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
607 u8 map_dir; /* dma map direction */
611 struct net_device *netdev;
612 struct mlx5e_rq_stats *stats;
614 struct mlx5e_cq_decomp cqd;
615 struct mlx5e_page_cache page_cache;
616 struct hwtstamp_config *tstamp;
617 struct mlx5_clock *clock;
618 struct mlx5e_icosq *icosq;
619 struct mlx5e_priv *priv;
621 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
622 mlx5e_fp_post_rx_wqes post_wqes;
623 mlx5e_fp_dealloc_wqe dealloc_wqe;
629 struct dim dim; /* Dynamic Interrupt Moderation */
632 struct bpf_prog __rcu *xdp_prog;
633 struct mlx5e_xdpsq *xdpsq;
634 DECLARE_BITMAP(flags, 8);
635 struct page_pool *page_pool;
637 /* AF_XDP zero-copy */
638 struct xsk_buff_pool *xsk_pool;
640 struct work_struct recover_work;
643 struct mlx5_wq_ctrl wq_ctrl;
647 struct mlx5_core_dev *mdev;
648 struct mlx5_core_mkey umr_mkey;
649 struct mlx5e_dma_info wqe_overflow;
651 /* XDP read-mostly */
652 struct xdp_rxq_info xdp_rxq;
653 } ____cacheline_aligned_in_smp;
655 enum mlx5e_channel_state {
656 MLX5E_CHANNEL_STATE_XSK,
657 MLX5E_CHANNEL_NUM_STATES
660 struct mlx5e_channel {
663 struct mlx5e_xdpsq rq_xdpsq;
664 struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
665 struct mlx5e_icosq icosq; /* internal control operations */
667 struct napi_struct napi;
669 struct net_device *netdev;
675 struct mlx5e_xdpsq xdpsq;
677 /* AF_XDP zero-copy */
678 struct mlx5e_rq xskrq;
679 struct mlx5e_xdpsq xsksq;
682 struct mlx5e_icosq async_icosq;
683 /* async_icosq can be accessed from any CPU - the spinlock protects it. */
684 spinlock_t async_icosq_lock;
686 /* data path - accessed per napi poll */
687 const struct cpumask *aff_mask;
688 struct mlx5e_ch_stats *stats;
691 struct mlx5e_priv *priv;
692 struct mlx5_core_dev *mdev;
693 struct hwtstamp_config *tstamp;
694 DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
699 struct mlx5e_port_ptp;
701 struct mlx5e_channels {
702 struct mlx5e_channel **c;
703 struct mlx5e_port_ptp *port_ptp;
705 struct mlx5e_params params;
708 struct mlx5e_channel_stats {
709 struct mlx5e_ch_stats ch;
710 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
711 struct mlx5e_rq_stats rq;
712 struct mlx5e_rq_stats xskrq;
713 struct mlx5e_xdpsq_stats rq_xdpsq;
714 struct mlx5e_xdpsq_stats xdpsq;
715 struct mlx5e_xdpsq_stats xsksq;
716 } ____cacheline_aligned_in_smp;
718 struct mlx5e_port_ptp_stats {
719 struct mlx5e_ch_stats ch;
720 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
721 struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC];
722 } ____cacheline_aligned_in_smp;
726 MLX5E_STATE_DESTROYING,
727 MLX5E_STATE_XDP_TX_ENABLED,
728 MLX5E_STATE_XDP_ACTIVE,
738 struct mlx5e_rqt rqt;
739 struct list_head list;
747 struct mlx5e_rss_params {
748 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
749 u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
750 u8 toeplitz_hash_key[40];
754 struct mlx5e_modify_sq_param {
761 #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
762 struct mlx5e_hv_vhca_stats_agent {
763 struct mlx5_hv_vhca_agent *agent;
764 struct delayed_work work;
771 /* XSK buffer pools are stored separately from channels,
772 * because we don't want to lose them when channels are
773 * recreated. The kernel also stores buffer pool, but it doesn't
774 * distinguish between zero-copy and non-zero-copy UMEMs, so
775 * rely on our mechanism.
777 struct xsk_buff_pool **pools;
782 /* Temporary storage for variables that are allocated when struct mlx5e_priv is
783 * initialized, and used where we can't allocate them because that functions
784 * must not fail. Use with care and make sure the same variable is not used
785 * simultaneously by multiple users.
787 struct mlx5e_scratchpad {
788 cpumask_var_t cpumask;
792 /* priv data path fields - start */
793 /* +1 for port ptp ts */
794 struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC];
795 int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
796 int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
797 #ifdef CONFIG_MLX5_CORE_EN_DCB
798 struct mlx5e_dcbx_dp dcbx_dp;
800 /* priv data path fields - end */
804 struct mutex state_lock; /* Protects Interface state */
805 struct mlx5e_rq drop_rq;
807 struct mlx5e_channels channels;
808 u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
809 struct mlx5e_rqt indir_rqt;
810 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
811 struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
812 struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
813 struct mlx5e_tir xsk_tir[MLX5E_MAX_NUM_CHANNELS];
814 struct mlx5e_rss_params rss_params;
815 u32 tx_rates[MLX5E_MAX_NUM_SQS];
817 struct mlx5e_flow_steering fs;
819 struct workqueue_struct *wq;
820 struct work_struct update_carrier_work;
821 struct work_struct set_rx_mode_work;
822 struct work_struct tx_timeout_work;
823 struct work_struct update_stats_work;
824 struct work_struct monitor_counters_work;
825 struct mlx5_nb monitor_counters_nb;
827 struct mlx5_core_dev *mdev;
828 struct net_device *netdev;
829 struct mlx5e_stats stats;
830 struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
831 struct mlx5e_port_ptp_stats port_ptp_stats;
834 bool port_ptp_opened;
835 struct hwtstamp_config tstamp;
837 u16 drop_rq_q_counter;
838 struct notifier_block events_nb;
841 struct udp_tunnel_nic_info nic_info;
842 #ifdef CONFIG_MLX5_CORE_EN_DCB
843 struct mlx5e_dcbx dcbx;
846 const struct mlx5e_profile *profile;
848 #ifdef CONFIG_MLX5_EN_IPSEC
849 struct mlx5e_ipsec *ipsec;
851 #ifdef CONFIG_MLX5_EN_TLS
852 struct mlx5e_tls *tls;
854 struct devlink_health_reporter *tx_reporter;
855 struct devlink_health_reporter *rx_reporter;
856 struct devlink_port dl_port;
857 struct mlx5e_xsk xsk;
858 #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
859 struct mlx5e_hv_vhca_stats_agent stats_agent;
861 struct mlx5e_scratchpad scratchpad;
864 struct mlx5e_rx_handlers {
865 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
866 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
869 extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
871 struct mlx5e_profile {
872 int (*init)(struct mlx5_core_dev *mdev,
873 struct net_device *netdev,
874 const struct mlx5e_profile *profile, void *ppriv);
875 void (*cleanup)(struct mlx5e_priv *priv);
876 int (*init_rx)(struct mlx5e_priv *priv);
877 void (*cleanup_rx)(struct mlx5e_priv *priv);
878 int (*init_tx)(struct mlx5e_priv *priv);
879 void (*cleanup_tx)(struct mlx5e_priv *priv);
880 void (*enable)(struct mlx5e_priv *priv);
881 void (*disable)(struct mlx5e_priv *priv);
882 int (*update_rx)(struct mlx5e_priv *priv);
883 void (*update_stats)(struct mlx5e_priv *priv);
884 void (*update_carrier)(struct mlx5e_priv *priv);
885 unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
886 mlx5e_stats_grp_t *stats_grps;
887 const struct mlx5e_rx_handlers *rx_handlers;
892 void mlx5e_build_ptys2ethtool_map(void);
894 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
895 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
896 struct mlx5e_params *params);
898 void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
899 void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
901 void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
902 int mlx5e_self_test_num(struct mlx5e_priv *priv);
903 void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
905 void mlx5e_set_rx_mode_work(struct work_struct *work);
907 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
908 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
909 int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
911 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
913 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
915 void mlx5e_timestamp_init(struct mlx5e_priv *priv);
917 struct mlx5e_redirect_rqt_param {
920 u32 rqn; /* Direct RQN (Non-RSS) */
923 struct mlx5e_channels *channels;
924 } rss; /* RSS data */
928 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
929 struct mlx5e_redirect_rqt_param rrp);
930 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
931 const struct mlx5e_tirc_config *ttconfig,
932 void *tirc, bool inner);
933 void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in);
934 struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
936 struct mlx5e_xsk_param;
938 struct mlx5e_rq_param;
939 int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
940 struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
941 struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq);
942 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
943 void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
944 void mlx5e_close_rq(struct mlx5e_rq *rq);
946 struct mlx5e_sq_param;
947 int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
948 struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
949 void mlx5e_close_icosq(struct mlx5e_icosq *sq);
950 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
951 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
952 struct mlx5e_xdpsq *sq, bool is_redirect);
953 void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
955 struct mlx5e_create_cq_param {
956 struct napi_struct *napi;
957 struct mlx5e_ch_stats *ch_stats;
962 struct mlx5e_cq_param;
963 int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
964 struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
965 struct mlx5e_cq *cq);
966 void mlx5e_close_cq(struct mlx5e_cq *cq);
968 int mlx5e_open_locked(struct net_device *netdev);
969 int mlx5e_close_locked(struct net_device *netdev);
971 int mlx5e_open_channels(struct mlx5e_priv *priv,
972 struct mlx5e_channels *chs);
973 void mlx5e_close_channels(struct mlx5e_channels *chs);
975 /* Function pointer to be used to modify HW or kernel settings while
978 typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context);
979 #define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \
980 int fn##_ctx(struct mlx5e_priv *priv, void *context) \
984 int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
985 int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
986 struct mlx5e_channels *new_chs,
987 mlx5e_fp_preactivate preactivate,
989 int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
990 int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
991 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
992 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
994 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
997 void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
998 void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
999 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
1000 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
1002 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
1003 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
1004 struct mlx5e_params *params);
1005 int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
1006 void mlx5e_activate_rq(struct mlx5e_rq *rq);
1007 void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
1008 void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
1009 void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
1011 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1012 struct mlx5e_modify_sq_param *p);
1013 void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
1014 void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
1015 void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
1016 void mlx5e_tx_disable_queue(struct netdev_queue *txq);
1017 int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa);
1018 void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq);
1019 struct mlx5e_create_sq_param;
1020 int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1021 struct mlx5e_sq_param *param,
1022 struct mlx5e_create_sq_param *csp,
1024 void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
1026 static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
1028 return MLX5_CAP_ETH(mdev, swp) &&
1029 MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
1032 extern const struct ethtool_ops mlx5e_ethtool_ops;
1034 int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
1036 void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
1037 struct mlx5e_tir *tir);
1038 int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
1039 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
1040 int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
1042 void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc);
1044 /* common netdev helpers */
1045 void mlx5e_create_q_counters(struct mlx5e_priv *priv);
1046 void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
1047 int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
1048 struct mlx5e_rq *drop_rq);
1049 void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
1051 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
1053 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
1054 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
1056 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1057 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1058 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1059 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1060 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
1062 int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
1063 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
1065 int mlx5e_create_tises(struct mlx5e_priv *priv);
1066 void mlx5e_destroy_tises(struct mlx5e_priv *priv);
1067 int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
1068 void mlx5e_update_carrier(struct mlx5e_priv *priv);
1069 int mlx5e_close(struct net_device *netdev);
1070 int mlx5e_open(struct net_device *netdev);
1072 void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
1073 int mlx5e_bits_invert(unsigned long a, int size);
1075 int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
1076 int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
1077 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
1078 mlx5e_fp_preactivate preactivate);
1079 void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv);
1081 /* ethtool helpers */
1082 void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
1083 struct ethtool_drvinfo *drvinfo);
1084 void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
1085 uint32_t stringset, uint8_t *data);
1086 int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
1087 void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
1088 struct ethtool_stats *stats, u64 *data);
1089 void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
1090 struct ethtool_ringparam *param);
1091 int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
1092 struct ethtool_ringparam *param);
1093 void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
1094 struct ethtool_channels *ch);
1095 int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
1096 struct ethtool_channels *ch);
1097 int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
1098 struct ethtool_coalesce *coal);
1099 int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
1100 struct ethtool_coalesce *coal);
1101 int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
1102 struct ethtool_link_ksettings *link_ksettings);
1103 int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1104 const struct ethtool_link_ksettings *link_ksettings);
1105 int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
1106 int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
1108 int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1110 int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
1111 u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
1112 u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
1113 int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1114 struct ethtool_ts_info *info);
1115 int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1116 struct ethtool_flash *flash);
1117 void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
1118 struct ethtool_pauseparam *pauseparam);
1119 int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
1120 struct ethtool_pauseparam *pauseparam);
1122 /* mlx5e generic netdev management API */
1123 int mlx5e_netdev_init(struct net_device *netdev,
1124 struct mlx5e_priv *priv,
1125 struct mlx5_core_dev *mdev,
1126 const struct mlx5e_profile *profile,
1128 void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv);
1130 mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
1131 int nch, void *ppriv);
1132 int mlx5e_attach_netdev(struct mlx5e_priv *priv);
1133 void mlx5e_detach_netdev(struct mlx5e_priv *priv);
1134 void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
1135 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
1136 void mlx5e_build_nic_params(struct mlx5e_priv *priv,
1137 struct mlx5e_xsk *xsk,
1138 struct mlx5e_rss_params *rss_params,
1139 struct mlx5e_params *params,
1141 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
1142 struct mlx5e_params *params);
1143 void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
1145 void mlx5e_rx_dim_work(struct work_struct *work);
1146 void mlx5e_tx_dim_work(struct work_struct *work);
1148 netdev_features_t mlx5e_features_check(struct sk_buff *skb,
1149 struct net_device *netdev,
1150 netdev_features_t features);
1151 int mlx5e_set_features(struct net_device *netdev, netdev_features_t features);
1152 #ifdef CONFIG_MLX5_ESWITCH
1153 int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
1154 int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate);
1155 int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
1156 int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
1158 #endif /* __MLX5_EN_H__ */