1 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
3 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
9 #include <linux/mlx5/driver.h>
10 #include <net/devlink.h>
12 #define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
32 struct mlx5_eswitch_rep;
33 struct mlx5_eswitch_rep_ops {
34 int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep);
35 void (*unload)(struct mlx5_eswitch_rep *rep);
36 void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
39 struct mlx5_eswitch_rep_data {
44 struct mlx5_eswitch_rep {
45 struct mlx5_eswitch_rep_data rep_data[NUM_REP_TYPES];
48 /* Only IB rep is using vport_index */
51 struct mlx5_eswitch *esw;
54 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
55 const struct mlx5_eswitch_rep_ops *ops,
57 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type);
58 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
61 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
63 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type);
64 struct mlx5_flow_handle *
65 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
66 struct mlx5_eswitch *from_esw,
67 struct mlx5_eswitch_rep *rep, u32 sqn);
69 #ifdef CONFIG_MLX5_ESWITCH
70 enum devlink_eswitch_encap_mode
71 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev);
73 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw);
74 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw);
77 * Reg C0 = < ESW_PFNUM_BITS(4) | ESW_VPORT BITS(12) | ESW_REG_C0_OBJ(16) >
79 * Highest 4 bits of the reg c0 is the PF_NUM (range 0-15), 12 bits of
80 * unique non-zero vport id (range 1-4095). The rest (lowest 16 bits) is left
81 * for user data objects managed by a common mapping context.
82 * PFNUM + VPORT comprise the SOURCE_PORT matching.
84 #define ESW_VPORT_BITS 12
85 #define ESW_PFNUM_BITS 4
86 #define ESW_SOURCE_PORT_METADATA_BITS (ESW_PFNUM_BITS + ESW_VPORT_BITS)
87 #define ESW_SOURCE_PORT_METADATA_OFFSET (32 - ESW_SOURCE_PORT_METADATA_BITS)
88 #define ESW_REG_C0_USER_DATA_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS)
89 #define ESW_REG_C0_USER_DATA_METADATA_MASK GENMASK(ESW_REG_C0_USER_DATA_METADATA_BITS - 1, 0)
91 static inline u32 mlx5_eswitch_get_vport_metadata_mask(void)
93 return GENMASK(31, 32 - ESW_SOURCE_PORT_METADATA_BITS);
96 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
98 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
102 * Reg C1 = < Reserved(1) | ESW_TUN_ID(12) | ESW_TUN_OPTS(11) | ESW_ZONE_ID(8) >
104 * Highest bit is reserved for other offloads as marker bit, next 12 bits of reg c1
105 * is the encapsulation tunnel id, next 11 bits is encapsulation tunnel options,
106 * and the lowest 8 bits are used for zone id.
108 * Zone id is used to restore CT flow when packet misses on chain.
110 * Tunnel id and options are used together to restore the tunnel info metadata
111 * on miss and to support inner header rewrite by means of implicit chain 0
114 #define ESW_RESERVED_BITS 1
115 #define ESW_ZONE_ID_BITS 8
116 #define ESW_TUN_OPTS_BITS 11
117 #define ESW_TUN_ID_BITS 12
118 #define ESW_TUN_OPTS_OFFSET ESW_ZONE_ID_BITS
119 #define ESW_TUN_OFFSET ESW_TUN_OPTS_OFFSET
120 #define ESW_ZONE_ID_MASK GENMASK(ESW_ZONE_ID_BITS - 1, 0)
121 #define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET)
122 #define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET)
123 #define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */
124 /* 0x7FF is a reserved mapping */
125 #define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
126 #define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \
127 ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT)
128 #define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
130 u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev);
131 u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
132 struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw);
134 #else /* CONFIG_MLX5_ESWITCH */
136 static inline u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
138 return MLX5_ESWITCH_NONE;
141 static inline enum devlink_eswitch_encap_mode
142 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
144 return DEVLINK_ESWITCH_ENCAP_MODE_NONE;
148 mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
154 mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
160 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, u16 vport_num)
166 mlx5_eswitch_get_vport_metadata_mask(void)
171 static inline u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
176 static inline struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
181 #endif /* CONFIG_MLX5_ESWITCH */
183 static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev)
185 return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS;