1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
4 #include <linux/netdevice.h>
5 #include <linux/list.h>
6 #include <linux/rhashtable.h>
7 #include <linux/xarray.h>
8 #include <linux/if_bridge.h>
9 #include <net/switchdev.h>
14 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE 64000
15 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM 0
16 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 2 - 1)
17 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
18 (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
19 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE - 1)
21 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE 64000
22 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM 0
23 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1)
24 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
25 (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
26 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
29 MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
30 MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
33 struct mlx5_esw_bridge_fdb_key {
34 unsigned char addr[ETH_ALEN];
39 MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER = BIT(0),
42 struct mlx5_esw_bridge_fdb_entry {
43 struct mlx5_esw_bridge_fdb_key key;
44 struct rhash_head ht_node;
45 struct net_device *dev;
46 struct list_head list;
50 struct mlx5_flow_handle *ingress_handle;
51 struct mlx5_fc *ingress_counter;
52 unsigned long lastuse;
53 struct mlx5_flow_handle *egress_handle;
56 static const struct rhashtable_params fdb_ht_params = {
57 .key_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, key),
58 .key_len = sizeof(struct mlx5_esw_bridge_fdb_key),
59 .head_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, ht_node),
60 .automatic_shrinking = true,
63 struct mlx5_esw_bridge_vlan {
68 struct mlx5_esw_bridge_port {
74 MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG = BIT(0),
77 struct mlx5_esw_bridge {
80 struct list_head list;
81 struct mlx5_esw_bridge_offloads *br_offloads;
83 struct list_head fdb_list;
84 struct rhashtable fdb_ht;
87 struct mlx5_flow_table *egress_ft;
88 struct mlx5_flow_group *egress_vlan_fg;
89 struct mlx5_flow_group *egress_mac_fg;
90 unsigned long ageing_time;
95 mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
98 struct switchdev_notifier_fdb_info send_info;
100 send_info.addr = addr;
102 send_info.offloaded = true;
103 call_switchdev_notifiers(val, dev, &send_info.info, NULL);
106 static struct mlx5_flow_table *
107 mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
109 struct mlx5_flow_table_attr ft_attr = {};
110 struct mlx5_core_dev *dev = esw->dev;
111 struct mlx5_flow_namespace *ns;
112 struct mlx5_flow_table *fdb;
114 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
116 esw_warn(dev, "Failed to get FDB namespace\n");
117 return ERR_PTR(-ENOENT);
120 ft_attr.max_fte = max_fte;
121 ft_attr.level = level;
122 ft_attr.prio = FDB_BR_OFFLOAD;
123 fdb = mlx5_create_flow_table(ns, &ft_attr);
125 esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
130 static struct mlx5_flow_group *
131 mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
133 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
134 struct mlx5_flow_group *fg;
137 in = kvzalloc(inlen, GFP_KERNEL);
139 return ERR_PTR(-ENOMEM);
141 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
142 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
143 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
145 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
146 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
147 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
148 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
150 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
151 mlx5_eswitch_get_vport_metadata_mask());
153 MLX5_SET(create_flow_group_in, in, start_flow_index,
154 MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM);
155 MLX5_SET(create_flow_group_in, in, end_flow_index,
156 MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO);
158 fg = mlx5_create_flow_group(ingress_ft, in);
162 "Failed to create VLAN flow group for bridge ingress table (err=%ld)\n",
168 static struct mlx5_flow_group *
169 mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
171 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
172 struct mlx5_flow_group *fg;
175 in = kvzalloc(inlen, GFP_KERNEL);
177 return ERR_PTR(-ENOMEM);
179 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
180 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
181 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
183 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
184 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
186 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
187 mlx5_eswitch_get_vport_metadata_mask());
189 MLX5_SET(create_flow_group_in, in, start_flow_index,
190 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM);
191 MLX5_SET(create_flow_group_in, in, end_flow_index,
192 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO);
194 fg = mlx5_create_flow_group(ingress_ft, in);
197 "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
204 static struct mlx5_flow_group *
205 mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
207 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
208 struct mlx5_flow_group *fg;
211 in = kvzalloc(inlen, GFP_KERNEL);
213 return ERR_PTR(-ENOMEM);
215 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
216 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
218 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
219 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
220 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
221 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
223 MLX5_SET(create_flow_group_in, in, start_flow_index,
224 MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM);
225 MLX5_SET(create_flow_group_in, in, end_flow_index,
226 MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO);
228 fg = mlx5_create_flow_group(egress_ft, in);
231 "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
237 static struct mlx5_flow_group *
238 mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
240 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
241 struct mlx5_flow_group *fg;
244 in = kvzalloc(inlen, GFP_KERNEL);
246 return ERR_PTR(-ENOMEM);
248 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
249 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
251 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
252 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
254 MLX5_SET(create_flow_group_in, in, start_flow_index,
255 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM);
256 MLX5_SET(create_flow_group_in, in, end_flow_index,
257 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO);
259 fg = mlx5_create_flow_group(egress_ft, in);
262 "Failed to create bridge egress table MAC flow group (err=%ld)\n",
269 mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
271 struct mlx5_flow_group *mac_fg, *vlan_fg;
272 struct mlx5_flow_table *ingress_ft;
275 if (!mlx5_eswitch_vport_match_metadata_enabled(br_offloads->esw))
278 ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
279 MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
281 if (IS_ERR(ingress_ft))
282 return PTR_ERR(ingress_ft);
284 vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(br_offloads->esw, ingress_ft);
285 if (IS_ERR(vlan_fg)) {
286 err = PTR_ERR(vlan_fg);
290 mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(br_offloads->esw, ingress_ft);
291 if (IS_ERR(mac_fg)) {
292 err = PTR_ERR(mac_fg);
296 br_offloads->ingress_ft = ingress_ft;
297 br_offloads->ingress_vlan_fg = vlan_fg;
298 br_offloads->ingress_mac_fg = mac_fg;
302 mlx5_destroy_flow_group(vlan_fg);
304 mlx5_destroy_flow_table(ingress_ft);
309 mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
311 mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
312 br_offloads->ingress_mac_fg = NULL;
313 mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
314 br_offloads->ingress_vlan_fg = NULL;
315 mlx5_destroy_flow_table(br_offloads->ingress_ft);
316 br_offloads->ingress_ft = NULL;
320 mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
321 struct mlx5_esw_bridge *bridge)
323 struct mlx5_flow_group *mac_fg, *vlan_fg;
324 struct mlx5_flow_table *egress_ft;
327 egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
328 MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
330 if (IS_ERR(egress_ft))
331 return PTR_ERR(egress_ft);
333 vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(br_offloads->esw, egress_ft);
334 if (IS_ERR(vlan_fg)) {
335 err = PTR_ERR(vlan_fg);
339 mac_fg = mlx5_esw_bridge_egress_mac_fg_create(br_offloads->esw, egress_ft);
340 if (IS_ERR(mac_fg)) {
341 err = PTR_ERR(mac_fg);
345 bridge->egress_ft = egress_ft;
346 bridge->egress_vlan_fg = vlan_fg;
347 bridge->egress_mac_fg = mac_fg;
351 mlx5_destroy_flow_group(vlan_fg);
353 mlx5_destroy_flow_table(egress_ft);
358 mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
360 mlx5_destroy_flow_group(bridge->egress_mac_fg);
361 mlx5_destroy_flow_group(bridge->egress_vlan_fg);
362 mlx5_destroy_flow_table(bridge->egress_ft);
365 static struct mlx5_flow_handle *
366 mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
367 struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
368 struct mlx5_esw_bridge *bridge)
370 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
371 struct mlx5_flow_act flow_act = {
372 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT,
373 .flags = FLOW_ACT_NO_APPEND,
375 struct mlx5_flow_destination dests[2] = {};
376 struct mlx5_flow_spec *rule_spec;
377 struct mlx5_flow_handle *handle;
380 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
382 return ERR_PTR(-ENOMEM);
384 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
386 smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
387 outer_headers.smac_47_16);
388 ether_addr_copy(smac_v, addr);
389 smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
390 outer_headers.smac_47_16);
391 eth_broadcast_addr(smac_c);
393 MLX5_SET(fte_match_param, rule_spec->match_criteria,
394 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
395 MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
396 mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
399 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
400 outer_headers.cvlan_tag);
401 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
402 outer_headers.cvlan_tag);
403 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
404 outer_headers.first_vid);
405 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
409 dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
410 dests[0].ft = bridge->egress_ft;
411 dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
412 dests[1].counter_id = counter_id;
414 handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
421 static struct mlx5_flow_handle *
422 mlx5_esw_bridge_egress_flow_create(u16 vport_num, const unsigned char *addr,
423 struct mlx5_esw_bridge_vlan *vlan,
424 struct mlx5_esw_bridge *bridge)
426 struct mlx5_flow_destination dest = {
427 .type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
428 .vport.num = vport_num,
430 struct mlx5_flow_act flow_act = {
431 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
432 .flags = FLOW_ACT_NO_APPEND,
434 struct mlx5_flow_spec *rule_spec;
435 struct mlx5_flow_handle *handle;
438 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
440 return ERR_PTR(-ENOMEM);
442 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
444 dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
445 outer_headers.dmac_47_16);
446 ether_addr_copy(dmac_v, addr);
447 dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
448 outer_headers.dmac_47_16);
449 eth_broadcast_addr(dmac_c);
452 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
453 outer_headers.cvlan_tag);
454 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
455 outer_headers.cvlan_tag);
456 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
457 outer_headers.first_vid);
458 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
462 handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, &dest, 1);
468 static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
469 struct mlx5_esw_bridge_offloads *br_offloads)
471 struct mlx5_esw_bridge *bridge;
474 bridge = kvzalloc(sizeof(*bridge), GFP_KERNEL);
476 return ERR_PTR(-ENOMEM);
478 bridge->br_offloads = br_offloads;
479 err = mlx5_esw_bridge_egress_table_init(br_offloads, bridge);
483 err = rhashtable_init(&bridge->fdb_ht, &fdb_ht_params);
487 INIT_LIST_HEAD(&bridge->fdb_list);
488 xa_init(&bridge->vports);
489 bridge->ifindex = ifindex;
491 bridge->ageing_time = BR_DEFAULT_AGEING_TIME;
492 list_add(&bridge->list, &br_offloads->bridges);
497 mlx5_esw_bridge_egress_table_cleanup(bridge);
503 static void mlx5_esw_bridge_get(struct mlx5_esw_bridge *bridge)
508 static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
509 struct mlx5_esw_bridge *bridge)
511 if (--bridge->refcnt)
514 mlx5_esw_bridge_egress_table_cleanup(bridge);
515 WARN_ON(!xa_empty(&bridge->vports));
516 list_del(&bridge->list);
517 rhashtable_destroy(&bridge->fdb_ht);
520 if (list_empty(&br_offloads->bridges))
521 mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
524 static struct mlx5_esw_bridge *
525 mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads)
527 struct mlx5_esw_bridge *bridge;
531 list_for_each_entry(bridge, &br_offloads->bridges, list) {
532 if (bridge->ifindex == ifindex) {
533 mlx5_esw_bridge_get(bridge);
538 if (!br_offloads->ingress_ft) {
539 int err = mlx5_esw_bridge_ingress_table_init(br_offloads);
545 bridge = mlx5_esw_bridge_create(ifindex, br_offloads);
546 if (IS_ERR(bridge) && list_empty(&br_offloads->bridges))
547 mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
551 static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port *port,
552 struct mlx5_esw_bridge *bridge)
554 return xa_insert(&bridge->vports, port->vport_num, port, GFP_KERNEL);
557 static struct mlx5_esw_bridge_port *
558 mlx5_esw_bridge_port_lookup(u16 vport_num, struct mlx5_esw_bridge *bridge)
560 return xa_load(&bridge->vports, vport_num);
563 static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port,
564 struct mlx5_esw_bridge *bridge)
566 xa_erase(&bridge->vports, port->vport_num);
570 mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
571 struct mlx5_esw_bridge *bridge)
573 rhashtable_remove_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
574 mlx5_del_flow_rules(entry->egress_handle);
575 mlx5_del_flow_rules(entry->ingress_handle);
576 mlx5_fc_destroy(bridge->br_offloads->esw->dev, entry->ingress_counter);
577 list_del(&entry->list);
581 static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge)
583 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
585 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
586 if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER))
587 mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
589 SWITCHDEV_FDB_DEL_TO_BRIDGE);
590 mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
594 static struct mlx5_esw_bridge_vlan *
595 mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
597 return xa_load(&port->vlans, vid);
600 static struct mlx5_esw_bridge_vlan *
601 mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port)
603 struct mlx5_esw_bridge_vlan *vlan;
606 vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
608 return ERR_PTR(-ENOMEM);
612 err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
621 static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
622 struct mlx5_esw_bridge_vlan *vlan)
624 xa_erase(&port->vlans, vlan->vid);
627 static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
628 struct mlx5_esw_bridge_vlan *vlan)
630 mlx5_esw_bridge_vlan_erase(port, vlan);
634 static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port)
636 struct mlx5_esw_bridge_vlan *vlan;
639 xa_for_each(&port->vlans, index, vlan)
640 mlx5_esw_bridge_vlan_cleanup(port, vlan);
643 static struct mlx5_esw_bridge_vlan *
644 mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, struct mlx5_esw_bridge *bridge,
645 struct mlx5_eswitch *esw)
647 struct mlx5_esw_bridge_port *port;
648 struct mlx5_esw_bridge_vlan *vlan;
650 port = mlx5_esw_bridge_port_lookup(vport_num, bridge);
652 /* FDB is added asynchronously on wq while port might have been deleted
653 * concurrently. Report on 'info' logging level and skip the FDB offload.
655 esw_info(esw->dev, "Failed to lookup bridge port (vport=%u)\n", vport_num);
656 return ERR_PTR(-EINVAL);
659 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
661 /* FDB is added asynchronously on wq while vlan might have been deleted
662 * concurrently. Report on 'info' logging level and skip the FDB offload.
664 esw_info(esw->dev, "Failed to lookup bridge port vlan metadata (vport=%u)\n",
666 return ERR_PTR(-EINVAL);
672 static struct mlx5_esw_bridge_fdb_entry *
673 mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, const unsigned char *addr,
674 u16 vid, bool added_by_user, struct mlx5_eswitch *esw,
675 struct mlx5_esw_bridge *bridge)
677 struct mlx5_esw_bridge_vlan *vlan = NULL;
678 struct mlx5_esw_bridge_fdb_entry *entry;
679 struct mlx5_flow_handle *handle;
680 struct mlx5_fc *counter;
681 struct mlx5e_priv *priv;
684 if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
685 vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, bridge, esw);
687 return ERR_CAST(vlan);
688 if (vlan->flags & (BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED))
689 return ERR_PTR(-EOPNOTSUPP); /* can't offload vlan push/pop */
692 priv = netdev_priv(dev);
693 entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
695 return ERR_PTR(-ENOMEM);
697 ether_addr_copy(entry->key.addr, addr);
698 entry->key.vid = vid;
700 entry->vport_num = vport_num;
701 entry->lastuse = jiffies;
703 entry->flags |= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER;
705 counter = mlx5_fc_create(priv->mdev, true);
706 if (IS_ERR(counter)) {
707 err = PTR_ERR(counter);
708 goto err_ingress_fc_create;
710 entry->ingress_counter = counter;
712 handle = mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan, mlx5_fc_id(counter),
714 if (IS_ERR(handle)) {
715 err = PTR_ERR(handle);
716 esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n",
718 goto err_ingress_flow_create;
720 entry->ingress_handle = handle;
722 handle = mlx5_esw_bridge_egress_flow_create(vport_num, addr, vlan, bridge);
723 if (IS_ERR(handle)) {
724 err = PTR_ERR(handle);
725 esw_warn(esw->dev, "Failed to create egress flow(vport=%u,err=%d)\n",
727 goto err_egress_flow_create;
729 entry->egress_handle = handle;
731 err = rhashtable_insert_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
733 esw_warn(esw->dev, "Failed to insert FDB flow(vport=%u,err=%d)\n", vport_num, err);
737 list_add(&entry->list, &bridge->fdb_list);
741 mlx5_del_flow_rules(entry->egress_handle);
742 err_egress_flow_create:
743 mlx5_del_flow_rules(entry->ingress_handle);
744 err_ingress_flow_create:
745 mlx5_fc_destroy(priv->mdev, entry->ingress_counter);
746 err_ingress_fc_create:
751 int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswitch *esw,
752 struct mlx5_vport *vport)
757 vport->bridge->ageing_time = ageing_time;
761 int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw,
762 struct mlx5_vport *vport)
764 struct mlx5_esw_bridge *bridge;
770 bridge = vport->bridge;
771 filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
772 if (filtering == enable)
775 mlx5_esw_bridge_fdb_flush(bridge);
777 bridge->flags |= MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
779 bridge->flags &= ~MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
784 static int mlx5_esw_bridge_vport_init(struct mlx5_esw_bridge_offloads *br_offloads,
785 struct mlx5_esw_bridge *bridge,
786 struct mlx5_vport *vport)
788 struct mlx5_eswitch *esw = br_offloads->esw;
789 struct mlx5_esw_bridge_port *port;
792 port = kvzalloc(sizeof(*port), GFP_KERNEL);
798 port->vport_num = vport->vport;
799 xa_init(&port->vlans);
800 err = mlx5_esw_bridge_port_insert(port, bridge);
802 esw_warn(esw->dev, "Failed to insert port metadata (vport=%u,err=%d)\n",
804 goto err_port_insert;
807 vport->bridge = bridge;
813 mlx5_esw_bridge_put(br_offloads, bridge);
817 static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_offloads,
818 struct mlx5_vport *vport)
820 struct mlx5_esw_bridge *bridge = vport->bridge;
821 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
822 struct mlx5_esw_bridge_port *port;
824 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
825 if (entry->vport_num == vport->vport)
826 mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
828 port = mlx5_esw_bridge_port_lookup(vport->vport, bridge);
830 WARN(1, "Vport %u metadata not found on bridge", vport->vport);
834 mlx5_esw_bridge_port_vlans_flush(port);
835 mlx5_esw_bridge_port_erase(port, bridge);
837 mlx5_esw_bridge_put(br_offloads, bridge);
838 vport->bridge = NULL;
842 int mlx5_esw_bridge_vport_link(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
843 struct mlx5_vport *vport, struct netlink_ext_ack *extack)
845 struct mlx5_esw_bridge *bridge;
848 WARN_ON(vport->bridge);
850 bridge = mlx5_esw_bridge_lookup(ifindex, br_offloads);
851 if (IS_ERR(bridge)) {
852 NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex");
853 return PTR_ERR(bridge);
856 err = mlx5_esw_bridge_vport_init(br_offloads, bridge, vport);
858 NL_SET_ERR_MSG_MOD(extack, "Error initializing port");
862 int mlx5_esw_bridge_vport_unlink(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
863 struct mlx5_vport *vport, struct netlink_ext_ack *extack)
865 struct mlx5_esw_bridge *bridge = vport->bridge;
869 NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge");
872 if (bridge->ifindex != ifindex) {
873 NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge");
877 err = mlx5_esw_bridge_vport_cleanup(br_offloads, vport);
879 NL_SET_ERR_MSG_MOD(extack, "Port cleanup failed");
883 int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw,
884 struct mlx5_vport *vport, struct netlink_ext_ack *extack)
886 struct mlx5_esw_bridge_port *port;
887 struct mlx5_esw_bridge_vlan *vlan;
889 port = mlx5_esw_bridge_port_lookup(vport->vport, vport->bridge);
893 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
899 vlan = mlx5_esw_bridge_vlan_create(vid, flags, port);
901 NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
902 return PTR_ERR(vlan);
907 void mlx5_esw_bridge_port_vlan_del(u16 vid, struct mlx5_eswitch *esw, struct mlx5_vport *vport)
909 struct mlx5_esw_bridge_port *port;
910 struct mlx5_esw_bridge_vlan *vlan;
912 port = mlx5_esw_bridge_port_lookup(vport->vport, vport->bridge);
916 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
919 mlx5_esw_bridge_vlan_cleanup(port, vlan);
922 void mlx5_esw_bridge_fdb_create(struct net_device *dev, struct mlx5_eswitch *esw,
923 struct mlx5_vport *vport,
924 struct switchdev_notifier_fdb_info *fdb_info)
926 struct mlx5_esw_bridge *bridge = vport->bridge;
927 struct mlx5_esw_bridge_fdb_entry *entry;
928 u16 vport_num = vport->vport;
931 esw_info(esw->dev, "Vport is not assigned to bridge (vport=%u)\n", vport_num);
935 entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, fdb_info->addr, fdb_info->vid,
936 fdb_info->added_by_user, esw, bridge);
940 if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
941 mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
942 SWITCHDEV_FDB_OFFLOADED);
944 /* Take over dynamic entries to prevent kernel bridge from aging them out. */
945 mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
946 SWITCHDEV_FDB_ADD_TO_BRIDGE);
949 void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw,
950 struct mlx5_vport *vport,
951 struct switchdev_notifier_fdb_info *fdb_info)
953 struct mlx5_esw_bridge *bridge = vport->bridge;
954 struct mlx5_esw_bridge_fdb_entry *entry;
955 struct mlx5_esw_bridge_fdb_key key;
956 u16 vport_num = vport->vport;
959 esw_warn(esw->dev, "Vport is not assigned to bridge (vport=%u)\n", vport_num);
963 ether_addr_copy(key.addr, fdb_info->addr);
964 key.vid = fdb_info->vid;
965 entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
968 "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
969 key.addr, key.vid, vport_num);
973 if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER))
974 mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
975 SWITCHDEV_FDB_DEL_TO_BRIDGE);
976 mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
979 void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
981 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
982 struct mlx5_esw_bridge *bridge;
984 list_for_each_entry(bridge, &br_offloads->bridges, list) {
985 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
986 unsigned long lastuse =
987 (unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
989 if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
992 if (time_after(lastuse, entry->lastuse)) {
993 entry->lastuse = lastuse;
994 /* refresh existing bridge entry */
995 mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
997 SWITCHDEV_FDB_ADD_TO_BRIDGE);
998 } else if (time_is_before_jiffies(entry->lastuse + bridge->ageing_time)) {
999 mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
1001 SWITCHDEV_FDB_DEL_TO_BRIDGE);
1002 mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1008 static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads)
1010 struct mlx5_eswitch *esw = br_offloads->esw;
1011 struct mlx5_vport *vport;
1014 mlx5_esw_for_each_vport(esw, i, vport)
1016 mlx5_esw_bridge_vport_cleanup(br_offloads, vport);
1018 WARN_ONCE(!list_empty(&br_offloads->bridges),
1019 "Cleaning up bridge offloads while still having bridges attached\n");
1022 struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
1024 struct mlx5_esw_bridge_offloads *br_offloads;
1026 br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
1028 return ERR_PTR(-ENOMEM);
1030 INIT_LIST_HEAD(&br_offloads->bridges);
1031 br_offloads->esw = esw;
1032 esw->br_offloads = br_offloads;
1037 void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
1039 struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
1044 mlx5_esw_bridge_flush(br_offloads);
1046 esw->br_offloads = NULL;
1047 kvfree(br_offloads);