2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef __MLX5_ESWITCH_H__
34 #define __MLX5_ESWITCH_H__
36 #include <linux/if_ether.h>
37 #include <linux/if_link.h>
38 #include <linux/atomic.h>
39 #include <linux/xarray.h>
40 #include <net/devlink.h>
41 #include <linux/mlx5/device.h>
42 #include <linux/mlx5/eswitch.h>
43 #include <linux/mlx5/vport.h>
44 #include <linux/mlx5/fs.h>
46 #include "lib/fs_chains.h"
49 #include "esw/sample.h"
51 enum mlx5_mapped_obj_type {
52 MLX5_MAPPED_OBJ_CHAIN,
53 MLX5_MAPPED_OBJ_SAMPLE,
56 struct mlx5_mapped_obj {
57 enum mlx5_mapped_obj_type type;
68 #ifdef CONFIG_MLX5_ESWITCH
70 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
72 #define MLX5_MAX_UC_PER_VPORT(dev) \
73 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
75 #define MLX5_MAX_MC_PER_VPORT(dev) \
76 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
78 #define MLX5_MIN_BW_SHARE 1
80 #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
81 min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit)
83 #define mlx5_esw_has_fwd_fdb(dev) \
84 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
86 #define esw_chains(esw) \
87 ((esw)->fdb_table.offloads.esw_chains_priv)
89 struct vport_ingress {
90 struct mlx5_flow_table *acl;
91 struct mlx5_flow_handle *allow_rule;
93 struct mlx5_flow_group *allow_spoofchk_only_grp;
94 struct mlx5_flow_group *allow_untagged_spoofchk_grp;
95 struct mlx5_flow_group *allow_untagged_only_grp;
96 struct mlx5_flow_group *drop_grp;
97 struct mlx5_flow_handle *drop_rule;
98 struct mlx5_fc *drop_counter;
101 /* Optional group to add an FTE to do internal priority
102 * tagging on ingress packets.
104 struct mlx5_flow_group *metadata_prio_tag_grp;
105 /* Group to add default match-all FTE entry to tag ingress
106 * packet with metadata.
108 struct mlx5_flow_group *metadata_allmatch_grp;
109 struct mlx5_modify_hdr *modify_metadata;
110 struct mlx5_flow_handle *modify_metadata_rule;
114 struct vport_egress {
115 struct mlx5_flow_table *acl;
116 struct mlx5_flow_handle *allowed_vlan;
117 struct mlx5_flow_group *vlan_grp;
120 struct mlx5_flow_group *drop_grp;
121 struct mlx5_flow_handle *drop_rule;
122 struct mlx5_fc *drop_counter;
125 struct mlx5_flow_group *fwd_grp;
126 struct mlx5_flow_handle *fwd_rule;
131 struct mlx5_vport_drop_stats {
136 struct mlx5_vport_info {
146 /* Vport context events */
147 enum mlx5_eswitch_vport_event {
148 MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
149 MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
150 MLX5_VPORT_PROMISC_CHANGE = BIT(3),
154 struct mlx5_core_dev *dev;
156 struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
157 struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
158 struct mlx5_flow_handle *promisc_rule;
159 struct mlx5_flow_handle *allmulti_rule;
160 struct work_struct vport_change_handler;
162 struct vport_ingress ingress;
163 struct vport_egress egress;
164 u32 default_metadata;
167 struct mlx5_vport_info info;
178 enum mlx5_eswitch_vport_event enabled_events;
179 struct devlink_port *dl_port;
182 struct mlx5_esw_indir_table;
184 struct mlx5_eswitch_fdb {
187 struct mlx5_flow_table *fdb;
188 struct mlx5_flow_group *addr_grp;
189 struct mlx5_flow_group *allmulti_grp;
190 struct mlx5_flow_group *promisc_grp;
191 struct mlx5_flow_table *vepa_fdb;
192 struct mlx5_flow_handle *vepa_uplink_rule;
193 struct mlx5_flow_handle *vepa_star_rule;
196 struct offloads_fdb {
197 struct mlx5_flow_namespace *ns;
198 struct mlx5_flow_table *slow_fdb;
199 struct mlx5_flow_group *send_to_vport_grp;
200 struct mlx5_flow_group *send_to_vport_meta_grp;
201 struct mlx5_flow_group *peer_miss_grp;
202 struct mlx5_flow_handle **peer_miss_rules;
203 struct mlx5_flow_group *miss_grp;
204 struct mlx5_flow_handle **send_to_vport_meta_rules;
205 struct mlx5_flow_handle *miss_rule_uni;
206 struct mlx5_flow_handle *miss_rule_multi;
207 int vlan_push_pop_refcount;
209 struct mlx5_fs_chains *esw_chains_priv;
211 DECLARE_HASHTABLE(table, 8);
212 /* Protects vports.table */
216 struct mlx5_esw_indir_table *indir;
223 struct mlx5_esw_offload {
224 struct mlx5_flow_table *ft_offloads_restore;
225 struct mlx5_flow_group *restore_group;
226 struct mlx5_modify_hdr *restore_copy_hdr_id;
227 struct mapping_ctx *reg_c0_obj_pool;
229 struct mlx5_flow_table *ft_offloads;
230 struct mlx5_flow_group *vport_rx_group;
231 struct mlx5_eswitch_rep *vport_reps;
232 struct list_head peer_flows;
233 struct mutex peer_mutex;
234 struct mutex encap_tbl_lock; /* protects encap_tbl */
235 DECLARE_HASHTABLE(encap_tbl, 8);
236 struct mutex decap_tbl_lock; /* protects decap_tbl */
237 DECLARE_HASHTABLE(decap_tbl, 8);
238 struct mod_hdr_tbl mod_hdr;
239 DECLARE_HASHTABLE(termtbl_tbl, 8);
240 struct mutex termtbl_mutex; /* protects termtbl hash */
241 struct xarray vhca_map;
242 const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
244 atomic64_t num_flows;
245 enum devlink_eswitch_encap_mode encap;
246 struct ida vport_metadata_ida;
247 unsigned int host_number; /* ECPF supports one external host */
250 /* E-Switch MC FDB table hash node */
251 struct esw_mc_addr { /* SRIOV only */
252 struct l2addr_node node;
253 struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
257 struct mlx5_host_work {
258 struct work_struct work;
259 struct mlx5_eswitch *esw;
262 struct mlx5_esw_functions {
268 MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
269 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
272 struct mlx5_eswitch {
273 struct mlx5_core_dev *dev;
275 struct mlx5_eswitch_fdb fdb_table;
276 /* legacy data structures */
277 struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
278 struct esw_mc_addr mc_promisc;
280 struct workqueue_struct *work_queue;
281 struct mlx5_vport *vports;
285 /* Synchronize between vport change events
286 * and async SRIOV admin state changes
288 struct mutex state_lock;
290 /* Protects eswitch mode change that occurs via one or more
291 * user commands, i.e. sriov state change, devlink commands.
293 struct rw_semaphore mode_lock;
294 atomic64_t user_count;
301 struct mlx5_esw_offload offloads;
304 u16 first_host_vport;
305 struct mlx5_esw_functions esw_funcs;
309 struct blocking_notifier_head n_head;
312 void esw_offloads_disable(struct mlx5_eswitch *esw);
313 int esw_offloads_enable(struct mlx5_eswitch *esw);
314 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
315 int esw_offloads_init_reps(struct mlx5_eswitch *esw);
317 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
318 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
320 int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
324 int mlx5_eswitch_init(struct mlx5_core_dev *dev);
325 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
327 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
328 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs);
329 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
330 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf);
331 void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
332 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
333 u16 vport, const u8 *mac);
334 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
335 u16 vport, int link_state);
336 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
337 u16 vport, u16 vlan, u8 qos);
338 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
339 u16 vport, bool spoofchk);
340 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
341 u16 vport_num, bool setting);
342 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
343 u32 max_rate, u32 min_rate);
344 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
345 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
346 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
347 u16 vport, struct ifla_vf_info *ivi);
348 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
350 struct ifla_vf_stats *vf_stats);
351 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
353 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
354 bool other_vport, void *in);
356 struct mlx5_flow_spec;
357 struct mlx5_esw_flow_attr;
358 struct mlx5_termtbl_handle;
361 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
362 struct mlx5_flow_attr *attr,
363 struct mlx5_flow_act *flow_act,
364 struct mlx5_flow_spec *spec);
366 struct mlx5_flow_handle *
367 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
368 struct mlx5_flow_table *ft,
369 struct mlx5_flow_spec *spec,
370 struct mlx5_esw_flow_attr *attr,
371 struct mlx5_flow_act *flow_act,
372 struct mlx5_flow_destination *dest,
376 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
377 struct mlx5_termtbl_handle *tt);
380 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec);
382 struct mlx5_flow_handle *
383 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
384 struct mlx5_flow_spec *spec,
385 struct mlx5_flow_attr *attr);
386 struct mlx5_flow_handle *
387 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
388 struct mlx5_flow_spec *spec,
389 struct mlx5_flow_attr *attr);
391 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
392 struct mlx5_flow_handle *rule,
393 struct mlx5_flow_attr *attr);
395 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
396 struct mlx5_flow_handle *rule,
397 struct mlx5_flow_attr *attr);
399 struct mlx5_flow_handle *
400 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
401 struct mlx5_flow_destination *dest);
404 SET_VLAN_STRIP = BIT(0),
405 SET_VLAN_INSERT = BIT(1)
408 enum mlx5_flow_match_level {
409 MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE,
410 MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2,
411 MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP,
412 MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP,
415 /* current maximum for flow based vport multicasting */
416 #define MLX5_MAX_FLOW_FWD_VPORTS 2
419 MLX5_ESW_DEST_ENCAP = BIT(0),
420 MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
421 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2),
425 MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
426 MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
427 MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
428 MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3),
429 MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4),
432 struct mlx5_esw_flow_attr {
433 struct mlx5_eswitch_rep *in_rep;
434 struct mlx5_core_dev *in_mdev;
435 struct mlx5_core_dev *counter_dev;
440 __be16 vlan_proto[MLX5_FS_VLAN_DEPTH];
441 u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
442 u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
446 struct mlx5_eswitch_rep *rep;
447 struct mlx5_pkt_reformat *pkt_reformat;
448 struct mlx5_core_dev *mdev;
449 struct mlx5_termtbl_handle *termtbl;
450 int src_port_rewrite_act_id;
451 } dests[MLX5_MAX_FLOW_FWD_VPORTS];
452 struct mlx5_rx_tun_attr *rx_tun_attr;
453 struct mlx5_pkt_reformat *decap_pkt_reformat;
454 struct mlx5_sample_attr *sample;
457 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
458 struct netlink_ext_ack *extack);
459 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
460 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
461 struct netlink_ext_ack *extack);
462 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
463 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
464 enum devlink_eswitch_encap_mode encap,
465 struct netlink_ext_ack *extack);
466 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
467 enum devlink_eswitch_encap_mode *encap);
468 int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
469 struct devlink_port *port,
470 u8 *hw_addr, int *hw_addr_len,
471 struct netlink_ext_ack *extack);
472 int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink,
473 struct devlink_port *port,
474 const u8 *hw_addr, int hw_addr_len,
475 struct netlink_ext_ack *extack);
477 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
479 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
480 struct mlx5_flow_attr *attr);
481 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
482 struct mlx5_flow_attr *attr);
483 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
484 u16 vport, u16 vlan, u8 qos, u8 set_flags);
486 static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw)
488 return esw->qos.enabled;
491 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
494 bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
495 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
500 return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) &&
501 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
504 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
505 struct mlx5_core_dev *dev1);
506 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
507 struct mlx5_core_dev *dev1);
509 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
511 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
513 #define esw_info(__dev, format, ...) \
514 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
516 #define esw_warn(__dev, format, ...) \
517 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
519 #define esw_debug(dev, format, ...) \
520 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
522 /* The returned number is valid only when the dev is eswitch manager. */
523 static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
525 return mlx5_core_is_ecpf_esw_manager(dev) ?
526 MLX5_VPORT_ECPF : MLX5_VPORT_PF;
530 mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
532 return esw->manager_vport == vport_num;
535 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
537 return mlx5_core_is_ecpf_esw_manager(dev) ?
538 MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
541 static inline int mlx5_esw_sf_start_idx(const struct mlx5_eswitch *esw)
543 /* PF and VF vports indices start from 0 to max_vfs */
544 return MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
547 static inline int mlx5_esw_sf_end_idx(const struct mlx5_eswitch *esw)
549 return mlx5_esw_sf_start_idx(esw) + mlx5_sf_max_functions(esw->dev);
553 mlx5_esw_sf_vport_num_to_index(const struct mlx5_eswitch *esw, u16 vport_num)
555 return vport_num - mlx5_sf_start_function_id(esw->dev) +
556 MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
560 mlx5_esw_sf_vport_index_to_num(const struct mlx5_eswitch *esw, int idx)
562 return mlx5_sf_start_function_id(esw->dev) + idx -
563 (MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev));
567 mlx5_esw_is_sf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
569 return mlx5_sf_supported(esw->dev) &&
570 vport_num >= mlx5_sf_start_function_id(esw->dev) &&
571 (vport_num < (mlx5_sf_start_function_id(esw->dev) +
572 mlx5_sf_max_functions(esw->dev)));
575 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
577 return mlx5_core_is_ecpf_esw_manager(dev);
580 static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
582 /* Uplink always locate at the last element of the array.*/
583 return esw->total_vports - 1;
586 static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw)
588 return esw->total_vports - 2;
591 static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw,
594 if (vport_num == MLX5_VPORT_ECPF) {
595 if (!mlx5_ecpf_vport_exists(esw->dev))
596 esw_warn(esw->dev, "ECPF vport doesn't exist!\n");
597 return mlx5_eswitch_ecpf_idx(esw);
600 if (vport_num == MLX5_VPORT_UPLINK)
601 return mlx5_eswitch_uplink_idx(esw);
603 if (mlx5_esw_is_sf_vport(esw, vport_num))
604 return mlx5_esw_sf_vport_num_to_index(esw, vport_num);
606 /* PF and VF vports start from 0 to max_vfs */
610 static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
613 if (index == mlx5_eswitch_ecpf_idx(esw) &&
614 mlx5_ecpf_vport_exists(esw->dev))
615 return MLX5_VPORT_ECPF;
617 if (index == mlx5_eswitch_uplink_idx(esw))
618 return MLX5_VPORT_UPLINK;
620 /* SF vports indices are after VFs and before ECPF */
621 if (mlx5_sf_supported(esw->dev) &&
622 index > mlx5_core_max_vfs(esw->dev))
623 return mlx5_esw_sf_vport_index_to_num(esw, index);
625 /* PF and VF vports start from 0 to max_vfs */
629 static inline unsigned int
630 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
633 return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
637 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
639 return dl_port_index & 0xffff;
642 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */
643 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
645 /* The vport getter/iterator are only valid after esw->total_vports
646 * and vport->vport are initialized in mlx5_eswitch_init.
648 #define mlx5_esw_for_all_vports(esw, i, vport) \
649 for ((i) = MLX5_VPORT_PF; \
650 (vport) = &(esw)->vports[i], \
651 (i) < (esw)->total_vports; (i)++)
653 #define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
654 for ((i) = (esw)->total_vports - 1; \
655 (vport) = &(esw)->vports[i], \
656 (i) >= MLX5_VPORT_PF; (i)--)
658 #define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
659 for ((i) = MLX5_VPORT_FIRST_VF; \
660 (vport) = &(esw)->vports[(i)], \
661 (i) <= (nvfs); (i)++)
663 #define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \
665 (vport) = &(esw)->vports[(i)], \
666 (i) >= MLX5_VPORT_FIRST_VF; (i)--)
668 /* The rep getter/iterator are only valid after esw->total_vports
669 * and vport->vport are initialized in mlx5_eswitch_init.
671 #define mlx5_esw_for_all_reps(esw, i, rep) \
672 for ((i) = MLX5_VPORT_PF; \
673 (rep) = &(esw)->offloads.vport_reps[i], \
674 (i) < (esw)->total_vports; (i)++)
676 #define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
677 for ((i) = MLX5_VPORT_FIRST_VF; \
678 (rep) = &(esw)->offloads.vport_reps[i], \
679 (i) <= (nvfs); (i)++)
681 #define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
683 (rep) = &(esw)->offloads.vport_reps[i], \
684 (i) >= MLX5_VPORT_FIRST_VF; (i)--)
686 #define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \
687 for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++)
689 #define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
690 for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
692 /* Includes host PF (vport 0) if it's not esw manager. */
693 #define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \
694 for ((i) = (esw)->first_host_vport; \
695 (rep) = &(esw)->offloads.vport_reps[i], \
696 (i) <= (nvfs); (i)++)
698 #define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \
700 (rep) = &(esw)->offloads.vport_reps[i], \
701 (i) >= (esw)->first_host_vport; (i)--)
703 #define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \
704 for ((vport) = (esw)->first_host_vport; \
705 (vport) <= (nvfs); (vport)++)
707 #define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \
708 for ((vport) = (nvfs); \
709 (vport) >= (esw)->first_host_vport; (vport)--)
711 #define mlx5_esw_for_each_sf_rep(esw, i, rep) \
712 for ((i) = mlx5_esw_sf_start_idx(esw); \
713 (rep) = &(esw)->offloads.vport_reps[(i)], \
714 (i) < mlx5_esw_sf_end_idx(esw); (i++))
716 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
717 struct mlx5_vport *__must_check
718 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
720 bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
722 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
725 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
726 enum mlx5_eswitch_vport_event enabled_events);
727 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
729 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
730 enum mlx5_eswitch_vport_event enabled_events);
731 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
734 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
735 struct mlx5_vport *vport);
737 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
738 struct mlx5_vport *vport);
740 struct esw_vport_tbl_namespace {
746 struct mlx5_vport_tbl_attr {
750 const struct esw_vport_tbl_namespace *vport_ns;
753 struct mlx5_flow_table *
754 mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
756 mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
758 struct mlx5_flow_handle *
759 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
761 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
762 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
764 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num);
765 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num);
767 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
768 enum mlx5_eswitch_vport_event enabled_events);
769 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num);
771 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
772 enum mlx5_eswitch_vport_event enabled_events);
773 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
775 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
776 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
777 struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
779 int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
780 u16 vport_num, u32 sfnum);
781 void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
783 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
784 u16 vport_num, u32 sfnum);
785 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
787 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
788 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
789 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
792 * mlx5_esw_event_info - Indicates eswitch mode changed/changing.
794 * @new_mode: New mode of eswitch.
796 struct mlx5_esw_event_info {
800 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
801 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
803 bool mlx5_esw_hold(struct mlx5_core_dev *dev);
804 void mlx5_esw_release(struct mlx5_core_dev *dev);
805 void mlx5_esw_get(struct mlx5_core_dev *dev);
806 void mlx5_esw_put(struct mlx5_core_dev *dev);
807 int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
808 void mlx5_esw_unlock(struct mlx5_eswitch *esw);
810 #else /* CONFIG_MLX5_ESWITCH */
811 /* eswitch API stubs */
812 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
813 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
814 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
815 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
816 static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
817 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
819 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
820 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
822 return ERR_PTR(-EOPNOTSUPP);
825 static inline struct mlx5_flow_handle *
826 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
828 return ERR_PTR(-EOPNOTSUPP);
831 static inline unsigned int
832 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
837 #endif /* CONFIG_MLX5_ESWITCH */
839 #endif /* __MLX5_ESWITCH_H__ */