1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 #include <net/vxlan.h>
20 #include "spectrum_span.h"
21 #include "spectrum_switchdev.h"
26 struct mlxsw_sp_bridge_ops;
28 struct mlxsw_sp_bridge {
29 struct mlxsw_sp *mlxsw_sp;
31 struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 unsigned int interval; /* ms */
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
39 bool vlan_enabled_exists;
40 struct list_head bridges_list;
41 DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
46 struct mlxsw_sp_bridge_device {
47 struct net_device *dev;
48 struct list_head list;
49 struct list_head ports_list;
50 struct list_head mids_list;
54 const struct mlxsw_sp_bridge_ops *ops;
57 struct mlxsw_sp_bridge_port {
58 struct net_device *dev;
59 struct mlxsw_sp_bridge_device *bridge_device;
60 struct list_head list;
61 struct list_head vlans_list;
62 unsigned int ref_count;
73 struct mlxsw_sp_bridge_vlan {
74 struct list_head list;
75 struct list_head port_vlan_list;
79 struct mlxsw_sp_bridge_ops {
80 int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
81 struct mlxsw_sp_bridge_port *bridge_port,
82 struct mlxsw_sp_port *mlxsw_sp_port,
83 struct netlink_ext_ack *extack);
84 void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
85 struct mlxsw_sp_bridge_port *bridge_port,
86 struct mlxsw_sp_port *mlxsw_sp_port);
87 int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
88 const struct net_device *vxlan_dev, u16 vid,
89 struct netlink_ext_ack *extack);
91 (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
92 u16 vid, struct netlink_ext_ack *extack);
94 (*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
96 u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
97 const struct mlxsw_sp_fid *fid);
101 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
102 struct mlxsw_sp_bridge_port *bridge_port,
106 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
107 struct mlxsw_sp_bridge_port *bridge_port);
110 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
111 struct mlxsw_sp_bridge_device
115 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
116 struct mlxsw_sp_bridge_port *bridge_port,
119 static struct mlxsw_sp_bridge_device *
120 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
121 const struct net_device *br_dev)
123 struct mlxsw_sp_bridge_device *bridge_device;
125 list_for_each_entry(bridge_device, &bridge->bridges_list, list)
126 if (bridge_device->dev == br_dev)
127 return bridge_device;
132 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
133 const struct net_device *br_dev)
135 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
138 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
139 struct netdev_nested_priv *priv)
141 struct mlxsw_sp *mlxsw_sp = priv->data;
143 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
147 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
148 struct net_device *dev)
150 struct netdev_nested_priv priv = {
151 .data = (void *)mlxsw_sp,
154 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
155 netdev_walk_all_upper_dev_rcu(dev,
156 mlxsw_sp_bridge_device_upper_rif_destroy,
160 static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,
161 struct net_device *br_dev,
162 struct netlink_ext_ack *extack)
164 struct net_device *dev, *stop_dev;
165 struct list_head *iter;
168 netdev_for_each_lower_dev(br_dev, dev, iter) {
169 if (netif_is_vxlan(dev) && netif_running(dev)) {
170 err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp,
183 netdev_for_each_lower_dev(br_dev, dev, iter) {
184 if (netif_is_vxlan(dev) && netif_running(dev)) {
187 mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
193 static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
194 struct net_device *br_dev)
196 struct net_device *dev;
197 struct list_head *iter;
199 netdev_for_each_lower_dev(br_dev, dev, iter) {
200 if (netif_is_vxlan(dev) && netif_running(dev))
201 mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
205 static struct mlxsw_sp_bridge_device *
206 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
207 struct net_device *br_dev,
208 struct netlink_ext_ack *extack)
210 struct device *dev = bridge->mlxsw_sp->bus_info->dev;
211 struct mlxsw_sp_bridge_device *bridge_device;
212 bool vlan_enabled = br_vlan_enabled(br_dev);
215 if (vlan_enabled && bridge->vlan_enabled_exists) {
216 dev_err(dev, "Only one VLAN-aware bridge is supported\n");
217 NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported");
218 return ERR_PTR(-EINVAL);
221 bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
223 return ERR_PTR(-ENOMEM);
225 bridge_device->dev = br_dev;
226 bridge_device->vlan_enabled = vlan_enabled;
227 bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
228 bridge_device->mrouter = br_multicast_router(br_dev);
229 INIT_LIST_HEAD(&bridge_device->ports_list);
231 bridge->vlan_enabled_exists = true;
232 bridge_device->ops = bridge->bridge_8021q_ops;
234 bridge_device->ops = bridge->bridge_8021d_ops;
236 INIT_LIST_HEAD(&bridge_device->mids_list);
237 list_add(&bridge_device->list, &bridge->bridges_list);
239 /* It is possible we already have VXLAN devices enslaved to the bridge.
240 * In which case, we need to replay their configuration as if they were
241 * just now enslaved to the bridge.
243 err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack);
247 return bridge_device;
250 list_del(&bridge_device->list);
251 if (bridge_device->vlan_enabled)
252 bridge->vlan_enabled_exists = false;
253 kfree(bridge_device);
258 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
259 struct mlxsw_sp_bridge_device *bridge_device)
261 mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev);
262 mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
264 list_del(&bridge_device->list);
265 if (bridge_device->vlan_enabled)
266 bridge->vlan_enabled_exists = false;
267 WARN_ON(!list_empty(&bridge_device->ports_list));
268 WARN_ON(!list_empty(&bridge_device->mids_list));
269 kfree(bridge_device);
272 static struct mlxsw_sp_bridge_device *
273 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
274 struct net_device *br_dev,
275 struct netlink_ext_ack *extack)
277 struct mlxsw_sp_bridge_device *bridge_device;
279 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
281 return bridge_device;
283 return mlxsw_sp_bridge_device_create(bridge, br_dev, extack);
287 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
288 struct mlxsw_sp_bridge_device *bridge_device)
290 if (list_empty(&bridge_device->ports_list))
291 mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
294 static struct mlxsw_sp_bridge_port *
295 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
296 const struct net_device *brport_dev)
298 struct mlxsw_sp_bridge_port *bridge_port;
300 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
301 if (bridge_port->dev == brport_dev)
308 struct mlxsw_sp_bridge_port *
309 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
310 struct net_device *brport_dev)
312 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
313 struct mlxsw_sp_bridge_device *bridge_device;
318 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
322 return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
325 static struct mlxsw_sp_bridge_port *
326 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
327 struct net_device *brport_dev)
329 struct mlxsw_sp_bridge_port *bridge_port;
330 struct mlxsw_sp_port *mlxsw_sp_port;
332 bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
336 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
337 bridge_port->lagged = mlxsw_sp_port->lagged;
338 if (bridge_port->lagged)
339 bridge_port->lag_id = mlxsw_sp_port->lag_id;
341 bridge_port->system_port = mlxsw_sp_port->local_port;
342 bridge_port->dev = brport_dev;
343 bridge_port->bridge_device = bridge_device;
344 bridge_port->stp_state = BR_STATE_DISABLED;
345 bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
347 INIT_LIST_HEAD(&bridge_port->vlans_list);
348 list_add(&bridge_port->list, &bridge_device->ports_list);
349 bridge_port->ref_count = 1;
355 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
357 list_del(&bridge_port->list);
358 WARN_ON(!list_empty(&bridge_port->vlans_list));
362 static struct mlxsw_sp_bridge_port *
363 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
364 struct net_device *brport_dev,
365 struct netlink_ext_ack *extack)
367 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
368 struct mlxsw_sp_bridge_device *bridge_device;
369 struct mlxsw_sp_bridge_port *bridge_port;
372 bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
374 bridge_port->ref_count++;
378 bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack);
379 if (IS_ERR(bridge_device))
380 return ERR_CAST(bridge_device);
382 bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
385 goto err_bridge_port_create;
390 err_bridge_port_create:
391 mlxsw_sp_bridge_device_put(bridge, bridge_device);
395 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
396 struct mlxsw_sp_bridge_port *bridge_port)
398 struct mlxsw_sp_bridge_device *bridge_device;
400 if (--bridge_port->ref_count != 0)
402 bridge_device = bridge_port->bridge_device;
403 mlxsw_sp_bridge_port_destroy(bridge_port);
404 mlxsw_sp_bridge_device_put(bridge, bridge_device);
407 static struct mlxsw_sp_port_vlan *
408 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
409 const struct mlxsw_sp_bridge_device *
413 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
415 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
417 if (!mlxsw_sp_port_vlan->bridge_port)
419 if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
422 if (bridge_device->vlan_enabled &&
423 mlxsw_sp_port_vlan->vid != vid)
425 return mlxsw_sp_port_vlan;
431 static struct mlxsw_sp_port_vlan*
432 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
435 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
437 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
439 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
441 if (fid && mlxsw_sp_fid_index(fid) == fid_index)
442 return mlxsw_sp_port_vlan;
448 static struct mlxsw_sp_bridge_vlan *
449 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
452 struct mlxsw_sp_bridge_vlan *bridge_vlan;
454 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
455 if (bridge_vlan->vid == vid)
462 static struct mlxsw_sp_bridge_vlan *
463 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
465 struct mlxsw_sp_bridge_vlan *bridge_vlan;
467 bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
471 INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
472 bridge_vlan->vid = vid;
473 list_add(&bridge_vlan->list, &bridge_port->vlans_list);
479 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
481 list_del(&bridge_vlan->list);
482 WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
486 static struct mlxsw_sp_bridge_vlan *
487 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
489 struct mlxsw_sp_bridge_vlan *bridge_vlan;
491 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
495 return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
498 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
500 if (list_empty(&bridge_vlan->port_vlan_list))
501 mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
505 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
506 struct mlxsw_sp_bridge_vlan *bridge_vlan,
509 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
511 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
513 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
515 return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
516 bridge_vlan->vid, state);
522 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
523 struct switchdev_trans *trans,
524 struct net_device *orig_dev,
527 struct mlxsw_sp_bridge_port *bridge_port;
528 struct mlxsw_sp_bridge_vlan *bridge_vlan;
531 if (switchdev_trans_ph_prepare(trans))
534 /* It's possible we failed to enslave the port, yet this
535 * operation is executed due to it being deferred.
537 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
542 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
543 err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
546 goto err_port_bridge_vlan_stp_set;
549 bridge_port->stp_state = state;
553 err_port_bridge_vlan_stp_set:
554 list_for_each_entry_continue_reverse(bridge_vlan,
555 &bridge_port->vlans_list, list)
556 mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
557 bridge_port->stp_state);
562 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
563 struct mlxsw_sp_bridge_vlan *bridge_vlan,
564 enum mlxsw_sp_flood_type packet_type,
567 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
569 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
571 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
573 return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
575 mlxsw_sp_port->local_port,
583 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
584 struct mlxsw_sp_bridge_port *bridge_port,
585 enum mlxsw_sp_flood_type packet_type,
588 struct mlxsw_sp_bridge_vlan *bridge_vlan;
591 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
592 err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
597 goto err_port_bridge_vlan_flood_set;
602 err_port_bridge_vlan_flood_set:
603 list_for_each_entry_continue_reverse(bridge_vlan,
604 &bridge_port->vlans_list, list)
605 mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
606 packet_type, !member);
611 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
612 struct mlxsw_sp_bridge_vlan *bridge_vlan,
615 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
616 u16 vid = bridge_vlan->vid;
618 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
620 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
622 return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
629 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
630 struct mlxsw_sp_bridge_port *bridge_port,
633 struct mlxsw_sp_bridge_vlan *bridge_vlan;
636 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
637 err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
640 goto err_port_bridge_vlan_learning_set;
645 err_port_bridge_vlan_learning_set:
646 list_for_each_entry_continue_reverse(bridge_vlan,
647 &bridge_port->vlans_list, list)
648 mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
653 static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
655 struct switchdev_trans *trans,
656 unsigned long brport_flags)
658 if (brport_flags & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
664 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
665 struct switchdev_trans *trans,
666 struct net_device *orig_dev,
667 unsigned long brport_flags)
669 struct mlxsw_sp_bridge_port *bridge_port;
672 if (switchdev_trans_ph_prepare(trans))
675 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
680 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
681 MLXSW_SP_FLOOD_TYPE_UC,
682 brport_flags & BR_FLOOD);
686 err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
687 brport_flags & BR_LEARNING);
691 if (bridge_port->bridge_device->multicast_enabled)
694 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
695 MLXSW_SP_FLOOD_TYPE_MC,
702 memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
706 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
708 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
711 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
712 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
715 mlxsw_sp->bridge->ageing_time = ageing_time;
719 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
720 struct switchdev_trans *trans,
721 unsigned long ageing_clock_t)
723 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
724 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
725 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
727 if (switchdev_trans_ph_prepare(trans)) {
728 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
729 ageing_time > MLXSW_SP_MAX_AGEING_TIME)
735 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
738 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
739 struct switchdev_trans *trans,
740 struct net_device *orig_dev,
743 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
744 struct mlxsw_sp_bridge_device *bridge_device;
746 if (!switchdev_trans_ph_prepare(trans))
749 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
750 if (WARN_ON(!bridge_device))
753 if (bridge_device->vlan_enabled == vlan_enabled)
756 netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
760 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
761 struct switchdev_trans *trans,
762 struct net_device *orig_dev,
763 bool is_port_mrouter)
765 struct mlxsw_sp_bridge_port *bridge_port;
768 if (switchdev_trans_ph_prepare(trans))
771 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
776 if (!bridge_port->bridge_device->multicast_enabled)
779 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
780 MLXSW_SP_FLOOD_TYPE_MC,
785 mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
788 bridge_port->mrouter = is_port_mrouter;
792 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
794 const struct mlxsw_sp_bridge_device *bridge_device;
796 bridge_device = bridge_port->bridge_device;
797 return bridge_device->multicast_enabled ? bridge_port->mrouter :
798 bridge_port->flags & BR_MCAST_FLOOD;
801 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
802 struct switchdev_trans *trans,
803 struct net_device *orig_dev,
806 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
807 struct mlxsw_sp_bridge_device *bridge_device;
808 struct mlxsw_sp_bridge_port *bridge_port;
811 if (switchdev_trans_ph_prepare(trans))
814 /* It's possible we failed to enslave the port, yet this
815 * operation is executed due to it being deferred.
817 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
821 if (bridge_device->multicast_enabled != !mc_disabled) {
822 bridge_device->multicast_enabled = !mc_disabled;
823 mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
827 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
828 enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
829 bool member = mlxsw_sp_mc_flood(bridge_port);
831 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
833 packet_type, member);
838 bridge_device->multicast_enabled = !mc_disabled;
843 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
844 u16 mid_idx, bool add)
849 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
853 mlxsw_reg_smid_pack(smid_pl, mid_idx,
854 mlxsw_sp_router_port(mlxsw_sp), add);
855 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
861 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
862 struct mlxsw_sp_bridge_device *bridge_device,
865 struct mlxsw_sp_mid *mid;
867 list_for_each_entry(mid, &bridge_device->mids_list, list)
868 mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
872 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
873 struct switchdev_trans *trans,
874 struct net_device *orig_dev,
877 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
878 struct mlxsw_sp_bridge_device *bridge_device;
880 if (switchdev_trans_ph_prepare(trans))
883 /* It's possible we failed to enslave the port, yet this
884 * operation is executed due to it being deferred.
886 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
890 if (bridge_device->mrouter != is_mrouter)
891 mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
893 bridge_device->mrouter = is_mrouter;
897 static int mlxsw_sp_port_attr_set(struct net_device *dev,
898 const struct switchdev_attr *attr,
899 struct switchdev_trans *trans)
901 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
905 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
906 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
910 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
911 err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
913 attr->u.brport_flags);
915 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
916 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
918 attr->u.brport_flags);
920 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
921 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
922 attr->u.ageing_time);
924 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
925 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
927 attr->u.vlan_filtering);
929 case SWITCHDEV_ATTR_ID_PORT_MROUTER:
930 err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
934 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
935 err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
937 attr->u.mc_disabled);
939 case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
940 err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
949 if (switchdev_trans_ph_commit(trans))
950 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
956 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
957 struct mlxsw_sp_bridge_port *bridge_port,
958 struct netlink_ext_ack *extack)
960 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
961 struct mlxsw_sp_bridge_device *bridge_device;
962 u8 local_port = mlxsw_sp_port->local_port;
963 u16 vid = mlxsw_sp_port_vlan->vid;
964 struct mlxsw_sp_fid *fid;
967 bridge_device = bridge_port->bridge_device;
968 fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
972 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
973 bridge_port->flags & BR_FLOOD);
975 goto err_fid_uc_flood_set;
977 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
978 mlxsw_sp_mc_flood(bridge_port));
980 goto err_fid_mc_flood_set;
982 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
985 goto err_fid_bc_flood_set;
987 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
989 goto err_fid_port_vid_map;
991 mlxsw_sp_port_vlan->fid = fid;
995 err_fid_port_vid_map:
996 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
997 err_fid_bc_flood_set:
998 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
999 err_fid_mc_flood_set:
1000 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1001 err_fid_uc_flood_set:
1002 mlxsw_sp_fid_put(fid);
1007 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1009 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1010 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1011 u8 local_port = mlxsw_sp_port->local_port;
1012 u16 vid = mlxsw_sp_port_vlan->vid;
1014 mlxsw_sp_port_vlan->fid = NULL;
1015 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
1016 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1017 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1018 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1019 mlxsw_sp_fid_put(fid);
1023 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
1024 u16 vid, bool is_pvid)
1028 else if (mlxsw_sp_port->pvid == vid)
1029 return 0; /* Dis-allow untagged packets */
1031 return mlxsw_sp_port->pvid;
1035 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1036 struct mlxsw_sp_bridge_port *bridge_port,
1037 struct netlink_ext_ack *extack)
1039 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1040 struct mlxsw_sp_bridge_vlan *bridge_vlan;
1041 u16 vid = mlxsw_sp_port_vlan->vid;
1044 /* No need to continue if only VLAN flags were changed */
1045 if (mlxsw_sp_port_vlan->bridge_port)
1048 err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
1053 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1054 bridge_port->flags & BR_LEARNING);
1056 goto err_port_vid_learning_set;
1058 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1059 bridge_port->stp_state);
1061 goto err_port_vid_stp_set;
1063 bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1066 goto err_bridge_vlan_get;
1069 list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1070 &bridge_vlan->port_vlan_list);
1072 mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1073 bridge_port->dev, extack);
1074 mlxsw_sp_port_vlan->bridge_port = bridge_port;
1078 err_bridge_vlan_get:
1079 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1080 err_port_vid_stp_set:
1081 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1082 err_port_vid_learning_set:
1083 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1088 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1090 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1091 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1092 struct mlxsw_sp_bridge_vlan *bridge_vlan;
1093 struct mlxsw_sp_bridge_port *bridge_port;
1094 u16 vid = mlxsw_sp_port_vlan->vid;
1095 bool last_port, last_vlan;
1097 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1098 mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1101 bridge_port = mlxsw_sp_port_vlan->bridge_port;
1102 last_vlan = list_is_singular(&bridge_port->vlans_list);
1103 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1104 last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1106 list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1107 mlxsw_sp_bridge_vlan_put(bridge_vlan);
1108 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1109 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1111 mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1113 mlxsw_sp_fid_index(fid));
1115 mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1117 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1119 mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1120 mlxsw_sp_port_vlan->bridge_port = NULL;
1124 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1125 struct mlxsw_sp_bridge_port *bridge_port,
1126 u16 vid, bool is_untagged, bool is_pvid,
1127 struct netlink_ext_ack *extack)
1129 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1130 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1131 u16 old_pvid = mlxsw_sp_port->pvid;
1134 /* The only valid scenario in which a port-vlan already exists, is if
1135 * the VLAN flags were changed and the port-vlan is associated with the
1136 * correct bridge port
1138 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1139 if (mlxsw_sp_port_vlan &&
1140 mlxsw_sp_port_vlan->bridge_port != bridge_port)
1143 if (!mlxsw_sp_port_vlan) {
1144 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1146 if (IS_ERR(mlxsw_sp_port_vlan))
1147 return PTR_ERR(mlxsw_sp_port_vlan);
1150 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1153 goto err_port_vlan_set;
1155 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1157 goto err_port_pvid_set;
1159 err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
1162 goto err_port_vlan_bridge_join;
1166 err_port_vlan_bridge_join:
1167 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
1169 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1171 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1176 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1177 const struct net_device *br_dev,
1178 const struct switchdev_obj_port_vlan *vlan)
1183 pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
1187 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1188 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1190 netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1195 netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1204 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1205 const struct switchdev_obj_port_vlan *vlan,
1206 struct switchdev_trans *trans,
1207 struct netlink_ext_ack *extack)
1209 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1210 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1211 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1212 struct net_device *orig_dev = vlan->obj.orig_dev;
1213 struct mlxsw_sp_bridge_port *bridge_port;
1216 if (netif_is_bridge_master(orig_dev)) {
1219 if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
1220 br_vlan_enabled(orig_dev) &&
1221 switchdev_trans_ph_prepare(trans))
1222 err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1229 if (switchdev_trans_ph_commit(trans))
1232 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1233 if (WARN_ON(!bridge_port))
1236 if (!bridge_port->bridge_device->vlan_enabled)
1239 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1242 err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1252 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1254 return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1255 MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1259 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1260 struct mlxsw_sp_bridge_port *bridge_port,
1263 bool lagged = bridge_port->lagged;
1264 char sfdf_pl[MLXSW_REG_SFDF_LEN];
1267 system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1268 mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1269 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1270 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1272 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1275 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1277 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1278 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1281 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1283 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1284 MLXSW_REG_SFD_OP_WRITE_REMOVE;
1287 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1288 const char *mac, u16 fid,
1289 enum mlxsw_sp_l3proto proto,
1290 const union mlxsw_sp_l3addr *addr,
1291 bool adding, bool dynamic)
1293 enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
1300 case MLXSW_SP_L3_PROTO_IPV4:
1301 uip = be32_to_cpu(addr->addr4);
1302 sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
1304 case MLXSW_SP_L3_PROTO_IPV6:
1310 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1314 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1315 mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
1316 mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
1317 MLXSW_REG_SFD_REC_ACTION_NOP, uip,
1319 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1320 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1324 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1332 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1333 const char *mac, u16 fid, bool adding,
1334 enum mlxsw_reg_sfd_rec_action action,
1335 enum mlxsw_reg_sfd_rec_policy policy)
1341 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1345 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1346 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
1347 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1348 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1352 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1360 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1361 const char *mac, u16 fid, bool adding,
1364 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1365 MLXSW_REG_SFD_REC_ACTION_NOP,
1366 mlxsw_sp_sfd_rec_policy(dynamic));
1369 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1372 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1373 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1374 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1377 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1378 const char *mac, u16 fid, u16 lag_vid,
1379 bool adding, bool dynamic)
1385 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1389 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1390 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1391 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1393 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1394 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1398 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1407 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1408 struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1410 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1411 struct net_device *orig_dev = fdb_info->info.dev;
1412 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1413 struct mlxsw_sp_bridge_device *bridge_device;
1414 struct mlxsw_sp_bridge_port *bridge_port;
1417 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1421 bridge_device = bridge_port->bridge_device;
1422 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1425 if (!mlxsw_sp_port_vlan)
1428 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1429 vid = mlxsw_sp_port_vlan->vid;
1431 if (!bridge_port->lagged)
1432 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1433 bridge_port->system_port,
1434 fdb_info->addr, fid_index,
1437 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1438 bridge_port->lag_id,
1439 fdb_info->addr, fid_index,
1440 vid, adding, false);
1443 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1444 u16 fid, u16 mid_idx, bool adding)
1450 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1454 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1455 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1456 MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1457 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1458 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1462 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1470 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1472 bool set_router_port)
1477 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1481 mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
1482 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1483 if (mlxsw_sp->ports[i])
1484 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1487 mlxsw_reg_smid_port_mask_set(smid_pl,
1488 mlxsw_sp_router_port(mlxsw_sp), 1);
1490 for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1491 mlxsw_reg_smid_port_set(smid_pl, i, 1);
1493 mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
1496 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1501 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1502 u16 mid_idx, bool add)
1504 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1508 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1512 mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
1513 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1519 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1520 const unsigned char *addr,
1523 struct mlxsw_sp_mid *mid;
1525 list_for_each_entry(mid, &bridge_device->mids_list, list) {
1526 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1533 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1534 struct mlxsw_sp_bridge_port *bridge_port,
1535 unsigned long *ports_bitmap)
1537 struct mlxsw_sp_port *mlxsw_sp_port;
1538 u64 max_lag_members, i;
1541 if (!bridge_port->lagged) {
1542 set_bit(bridge_port->system_port, ports_bitmap);
1544 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1546 lag_id = bridge_port->lag_id;
1547 for (i = 0; i < max_lag_members; i++) {
1548 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1551 set_bit(mlxsw_sp_port->local_port,
1558 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1559 struct mlxsw_sp_bridge_device *bridge_device,
1560 struct mlxsw_sp *mlxsw_sp)
1562 struct mlxsw_sp_bridge_port *bridge_port;
1564 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1565 if (bridge_port->mrouter) {
1566 mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1574 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1575 struct mlxsw_sp_mid *mid,
1576 struct mlxsw_sp_bridge_device *bridge_device)
1584 mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1586 if (mid_idx == MLXSW_SP_MID_MAX)
1589 num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1590 alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
1591 flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
1595 bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
1596 mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1599 err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1600 bridge_device->mrouter);
1601 kfree(flood_bitmap);
1605 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1610 set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1615 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1616 struct mlxsw_sp_mid *mid)
1621 clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1623 return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1628 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1629 struct mlxsw_sp_bridge_device *bridge_device,
1630 const unsigned char *addr,
1633 struct mlxsw_sp_mid *mid;
1636 mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1640 alloc_size = sizeof(unsigned long) *
1641 BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
1643 mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
1644 if (!mid->ports_in_mid)
1645 goto err_ports_in_mid_alloc;
1647 ether_addr_copy(mid->addr, addr);
1651 if (!bridge_device->multicast_enabled)
1654 if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1655 goto err_write_mdb_entry;
1658 list_add_tail(&mid->list, &bridge_device->mids_list);
1661 err_write_mdb_entry:
1662 kfree(mid->ports_in_mid);
1663 err_ports_in_mid_alloc:
1668 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1669 struct mlxsw_sp_mid *mid)
1671 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1674 clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1675 if (bitmap_empty(mid->ports_in_mid,
1676 mlxsw_core_max_ports(mlxsw_sp->core))) {
1677 err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1678 list_del(&mid->list);
1679 kfree(mid->ports_in_mid);
1685 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1686 const struct switchdev_obj_port_mdb *mdb,
1687 struct switchdev_trans *trans)
1689 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1690 struct net_device *orig_dev = mdb->obj.orig_dev;
1691 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1692 struct net_device *dev = mlxsw_sp_port->dev;
1693 struct mlxsw_sp_bridge_device *bridge_device;
1694 struct mlxsw_sp_bridge_port *bridge_port;
1695 struct mlxsw_sp_mid *mid;
1699 if (switchdev_trans_ph_commit(trans))
1702 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1706 bridge_device = bridge_port->bridge_device;
1707 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1710 if (!mlxsw_sp_port_vlan)
1713 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1715 mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1717 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1720 netdev_err(dev, "Unable to allocate MC group\n");
1724 set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1726 if (!bridge_device->multicast_enabled)
1729 if (bridge_port->mrouter)
1732 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1734 netdev_err(dev, "Unable to set SMID\n");
1741 mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1746 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1747 struct mlxsw_sp_bridge_device
1750 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1751 struct mlxsw_sp_mid *mid;
1754 mc_enabled = bridge_device->multicast_enabled;
1756 list_for_each_entry(mid, &bridge_device->mids_list, list) {
1758 mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1761 mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1766 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1767 struct mlxsw_sp_bridge_port *bridge_port,
1770 struct mlxsw_sp_bridge_device *bridge_device;
1771 struct mlxsw_sp_mid *mid;
1773 bridge_device = bridge_port->bridge_device;
1775 list_for_each_entry(mid, &bridge_device->mids_list, list) {
1776 if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1777 mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1781 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1782 const struct switchdev_obj *obj,
1783 struct switchdev_trans *trans,
1784 struct netlink_ext_ack *extack)
1786 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1787 const struct switchdev_obj_port_vlan *vlan;
1791 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1792 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1793 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans,
1796 if (switchdev_trans_ph_prepare(trans)) {
1797 /* The event is emitted before the changes are actually
1798 * applied to the bridge. Therefore schedule the respin
1799 * call for later, so that the respin logic sees the
1800 * updated bridge state.
1802 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
1805 case SWITCHDEV_OBJ_ID_PORT_MDB:
1806 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1807 SWITCHDEV_OBJ_PORT_MDB(obj),
1819 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1820 struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1822 u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
1823 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1825 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1826 if (WARN_ON(!mlxsw_sp_port_vlan))
1829 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1830 mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1831 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1832 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1835 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1836 const struct switchdev_obj_port_vlan *vlan)
1838 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1839 struct net_device *orig_dev = vlan->obj.orig_dev;
1840 struct mlxsw_sp_bridge_port *bridge_port;
1843 if (netif_is_bridge_master(orig_dev))
1846 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1847 if (WARN_ON(!bridge_port))
1850 if (!bridge_port->bridge_device->vlan_enabled)
1853 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
1854 mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
1860 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1861 struct mlxsw_sp_bridge_port *bridge_port,
1862 struct mlxsw_sp_mid *mid)
1864 struct net_device *dev = mlxsw_sp_port->dev;
1867 if (bridge_port->bridge_device->multicast_enabled &&
1868 !bridge_port->mrouter) {
1869 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1871 netdev_err(dev, "Unable to remove port from SMID\n");
1874 err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1876 netdev_err(dev, "Unable to remove MC SFD\n");
1881 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1882 const struct switchdev_obj_port_mdb *mdb)
1884 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1885 struct net_device *orig_dev = mdb->obj.orig_dev;
1886 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1887 struct mlxsw_sp_bridge_device *bridge_device;
1888 struct net_device *dev = mlxsw_sp_port->dev;
1889 struct mlxsw_sp_bridge_port *bridge_port;
1890 struct mlxsw_sp_mid *mid;
1893 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1897 bridge_device = bridge_port->bridge_device;
1898 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1901 if (!mlxsw_sp_port_vlan)
1904 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1906 mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1908 netdev_err(dev, "Unable to remove port from MC DB\n");
1912 return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
1916 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1917 struct mlxsw_sp_bridge_port *bridge_port)
1919 struct mlxsw_sp_bridge_device *bridge_device;
1920 struct mlxsw_sp_mid *mid, *tmp;
1922 bridge_device = bridge_port->bridge_device;
1924 list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
1925 if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
1926 __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
1928 } else if (bridge_device->multicast_enabled &&
1929 bridge_port->mrouter) {
1930 mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1935 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1936 const struct switchdev_obj *obj)
1938 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1942 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1943 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1944 SWITCHDEV_OBJ_PORT_VLAN(obj));
1946 case SWITCHDEV_OBJ_ID_PORT_MDB:
1947 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1948 SWITCHDEV_OBJ_PORT_MDB(obj));
1955 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
1960 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1963 struct mlxsw_sp_port *mlxsw_sp_port;
1964 u64 max_lag_members;
1967 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1969 for (i = 0; i < max_lag_members; i++) {
1970 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1972 return mlxsw_sp_port;
1978 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1979 struct mlxsw_sp_bridge_port *bridge_port,
1980 struct mlxsw_sp_port *mlxsw_sp_port,
1981 struct netlink_ext_ack *extack)
1983 if (is_vlan_dev(bridge_port->dev)) {
1984 NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
1988 /* Port is no longer usable as a router interface */
1989 if (mlxsw_sp_port->default_vlan->fid)
1990 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
1996 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1997 struct mlxsw_sp_bridge_port *bridge_port,
1998 struct mlxsw_sp_port *mlxsw_sp_port)
2000 /* Make sure untagged frames are allowed to ingress */
2001 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
2005 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2006 const struct net_device *vxlan_dev, u16 vid,
2007 struct netlink_ext_ack *extack)
2009 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2010 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2011 struct mlxsw_sp_nve_params params = {
2012 .type = MLXSW_SP_NVE_TYPE_VXLAN,
2013 .vni = vxlan->cfg.vni,
2016 struct mlxsw_sp_fid *fid;
2019 /* If the VLAN is 0, we need to find the VLAN that is configured as
2020 * PVID and egress untagged on the bridge port of the VxLAN device.
2021 * It is possible no such VLAN exists
2024 err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
2029 fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2031 NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID");
2032 return PTR_ERR(fid);
2035 if (mlxsw_sp_fid_vni_is_set(fid)) {
2036 NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2038 goto err_vni_exists;
2041 err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack);
2043 goto err_nve_fid_enable;
2049 mlxsw_sp_fid_put(fid);
2053 static struct net_device *
2054 mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2056 struct net_device *dev;
2057 struct list_head *iter;
2059 netdev_for_each_lower_dev(br_dev, dev, iter) {
2063 if (!netif_is_vxlan(dev))
2066 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2067 if (err || pvid != vid)
2076 static struct mlxsw_sp_fid *
2077 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2078 u16 vid, struct netlink_ext_ack *extack)
2080 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2082 return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2085 static struct mlxsw_sp_fid *
2086 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2089 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2091 return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2095 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2096 const struct mlxsw_sp_fid *fid)
2098 return mlxsw_sp_fid_8021q_vid(fid);
2101 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2102 .port_join = mlxsw_sp_bridge_8021q_port_join,
2103 .port_leave = mlxsw_sp_bridge_8021q_port_leave,
2104 .vxlan_join = mlxsw_sp_bridge_8021q_vxlan_join,
2105 .fid_get = mlxsw_sp_bridge_8021q_fid_get,
2106 .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
2107 .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
2111 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2112 const struct net_device *br_dev)
2114 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2116 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2118 if (mlxsw_sp_port_vlan->bridge_port &&
2119 mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2128 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2129 struct mlxsw_sp_bridge_port *bridge_port,
2130 struct mlxsw_sp_port *mlxsw_sp_port,
2131 struct netlink_ext_ack *extack)
2133 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2134 struct net_device *dev = bridge_port->dev;
2137 vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2138 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2139 if (WARN_ON(!mlxsw_sp_port_vlan))
2142 if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2143 NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2147 /* Port is no longer usable as a router interface */
2148 if (mlxsw_sp_port_vlan->fid)
2149 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2151 return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
2156 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2157 struct mlxsw_sp_bridge_port *bridge_port,
2158 struct mlxsw_sp_port *mlxsw_sp_port)
2160 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2161 struct net_device *dev = bridge_port->dev;
2164 vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2165 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2166 if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
2169 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2173 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2174 const struct net_device *vxlan_dev, u16 vid,
2175 struct netlink_ext_ack *extack)
2177 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2178 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2179 struct mlxsw_sp_nve_params params = {
2180 .type = MLXSW_SP_NVE_TYPE_VXLAN,
2181 .vni = vxlan->cfg.vni,
2184 struct mlxsw_sp_fid *fid;
2187 fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2189 NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID");
2193 if (mlxsw_sp_fid_vni_is_set(fid)) {
2194 NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2196 goto err_vni_exists;
2199 err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack);
2201 goto err_nve_fid_enable;
2207 mlxsw_sp_fid_put(fid);
2211 static struct mlxsw_sp_fid *
2212 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2213 u16 vid, struct netlink_ext_ack *extack)
2215 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2217 return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2220 static struct mlxsw_sp_fid *
2221 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2224 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2226 /* The only valid VLAN for a VLAN-unaware bridge is 0 */
2230 return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2234 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2235 const struct mlxsw_sp_fid *fid)
2240 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2241 .port_join = mlxsw_sp_bridge_8021d_port_join,
2242 .port_leave = mlxsw_sp_bridge_8021d_port_leave,
2243 .vxlan_join = mlxsw_sp_bridge_8021d_vxlan_join,
2244 .fid_get = mlxsw_sp_bridge_8021d_fid_get,
2245 .fid_lookup = mlxsw_sp_bridge_8021d_fid_lookup,
2246 .fid_vid = mlxsw_sp_bridge_8021d_fid_vid,
2249 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2250 struct net_device *brport_dev,
2251 struct net_device *br_dev,
2252 struct netlink_ext_ack *extack)
2254 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2255 struct mlxsw_sp_bridge_device *bridge_device;
2256 struct mlxsw_sp_bridge_port *bridge_port;
2259 bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev,
2261 if (IS_ERR(bridge_port))
2262 return PTR_ERR(bridge_port);
2263 bridge_device = bridge_port->bridge_device;
2265 err = bridge_device->ops->port_join(bridge_device, bridge_port,
2266 mlxsw_sp_port, extack);
2273 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2277 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2278 struct net_device *brport_dev,
2279 struct net_device *br_dev)
2281 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2282 struct mlxsw_sp_bridge_device *bridge_device;
2283 struct mlxsw_sp_bridge_port *bridge_port;
2285 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2288 bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2292 bridge_device->ops->port_leave(bridge_device, bridge_port,
2294 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2297 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2298 const struct net_device *br_dev,
2299 const struct net_device *vxlan_dev, u16 vid,
2300 struct netlink_ext_ack *extack)
2302 struct mlxsw_sp_bridge_device *bridge_device;
2304 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2305 if (WARN_ON(!bridge_device))
2308 return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2312 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2313 const struct net_device *vxlan_dev)
2315 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2316 struct mlxsw_sp_fid *fid;
2318 /* If the VxLAN device is down, then the FID does not have a VNI */
2319 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2323 mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2324 /* Drop both the reference we just took during lookup and the reference
2325 * the VXLAN device took.
2327 mlxsw_sp_fid_put(fid);
2328 mlxsw_sp_fid_put(fid);
2332 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2333 enum mlxsw_sp_l3proto *proto,
2334 union mlxsw_sp_l3addr *addr)
2336 if (vxlan_addr->sa.sa_family == AF_INET) {
2337 addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2338 *proto = MLXSW_SP_L3_PROTO_IPV4;
2340 addr->addr6 = vxlan_addr->sin6.sin6_addr;
2341 *proto = MLXSW_SP_L3_PROTO_IPV6;
2346 mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2347 const union mlxsw_sp_l3addr *addr,
2348 union vxlan_addr *vxlan_addr)
2351 case MLXSW_SP_L3_PROTO_IPV4:
2352 vxlan_addr->sa.sa_family = AF_INET;
2353 vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2355 case MLXSW_SP_L3_PROTO_IPV6:
2356 vxlan_addr->sa.sa_family = AF_INET6;
2357 vxlan_addr->sin6.sin6_addr = addr->addr6;
2362 static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2364 enum mlxsw_sp_l3proto proto,
2365 union mlxsw_sp_l3addr *addr,
2366 __be32 vni, bool adding)
2368 struct switchdev_notifier_vxlan_fdb_info info;
2369 struct vxlan_dev *vxlan = netdev_priv(dev);
2370 enum switchdev_notifier_type type;
2372 type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
2373 SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
2374 mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
2375 info.remote_port = vxlan->cfg.dst_port;
2376 info.remote_vni = vni;
2377 info.remote_ifindex = 0;
2378 ether_addr_copy(info.eth_addr, mac);
2380 info.offloaded = adding;
2381 call_switchdev_notifiers(type, dev, &info.info, NULL);
2384 static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
2386 enum mlxsw_sp_l3proto proto,
2387 union mlxsw_sp_l3addr *addr,
2391 if (netif_is_vxlan(dev))
2392 mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
2397 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2398 const char *mac, u16 vid,
2399 struct net_device *dev, bool offloaded)
2401 struct switchdev_notifier_fdb_info info;
2405 info.offloaded = offloaded;
2406 call_switchdev_notifiers(type, dev, &info.info, NULL);
2409 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2410 char *sfn_pl, int rec_index,
2413 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2414 struct mlxsw_sp_bridge_device *bridge_device;
2415 struct mlxsw_sp_bridge_port *bridge_port;
2416 struct mlxsw_sp_port *mlxsw_sp_port;
2417 enum switchdev_notifier_type type;
2421 bool do_notification = true;
2424 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2425 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2426 if (!mlxsw_sp_port) {
2427 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2431 if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2434 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2435 if (!mlxsw_sp_port_vlan) {
2436 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2440 bridge_port = mlxsw_sp_port_vlan->bridge_port;
2442 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2446 bridge_device = bridge_port->bridge_device;
2447 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2450 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2453 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2457 if (!do_notification)
2459 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2460 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2466 do_notification = false;
2470 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2471 char *sfn_pl, int rec_index,
2474 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2475 struct mlxsw_sp_bridge_device *bridge_device;
2476 struct mlxsw_sp_bridge_port *bridge_port;
2477 struct mlxsw_sp_port *mlxsw_sp_port;
2478 enum switchdev_notifier_type type;
2483 bool do_notification = true;
2486 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2487 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2488 if (!mlxsw_sp_port) {
2489 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2493 if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2496 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2497 if (!mlxsw_sp_port_vlan) {
2498 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2502 bridge_port = mlxsw_sp_port_vlan->bridge_port;
2504 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2508 bridge_device = bridge_port->bridge_device;
2509 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2510 lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
2511 mlxsw_sp_port_vlan->vid : 0;
2514 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2517 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2521 if (!do_notification)
2523 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2524 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2530 do_notification = false;
2535 __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2536 const struct mlxsw_sp_fid *fid,
2538 struct net_device **nve_dev,
2539 u16 *p_vid, __be32 *p_vni)
2541 struct mlxsw_sp_bridge_device *bridge_device;
2542 struct net_device *br_dev, *dev;
2546 err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
2550 err = mlxsw_sp_fid_vni(fid, p_vni);
2554 dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
2559 if (!netif_running(dev))
2562 if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
2565 if (adding && netif_is_vxlan(dev)) {
2566 struct vxlan_dev *vxlan = netdev_priv(dev);
2568 if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
2572 br_dev = netdev_master_upper_dev_get(dev);
2576 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2580 *p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
2585 static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2590 enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
2591 enum switchdev_notifier_type type;
2592 struct net_device *nve_dev;
2593 union mlxsw_sp_l3addr addr;
2594 struct mlxsw_sp_fid *fid;
2601 mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
2604 fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
2606 goto err_fid_lookup;
2608 err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
2609 (enum mlxsw_sp_l3proto) sfn_proto,
2612 goto err_ip_resolve;
2614 err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
2615 &nve_dev, &vid, &vni);
2617 goto err_fdb_process;
2619 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2620 (enum mlxsw_sp_l3proto) sfn_proto,
2621 &addr, adding, true);
2625 mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
2626 (enum mlxsw_sp_l3proto) sfn_proto,
2627 &addr, vni, adding);
2629 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
2630 SWITCHDEV_FDB_DEL_TO_BRIDGE;
2631 mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
2633 mlxsw_sp_fid_put(fid);
2640 mlxsw_sp_fid_put(fid);
2642 /* Remove an FDB entry in case we cannot process it. Otherwise the
2643 * device will keep sending the same notification over and over again.
2645 mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2646 (enum mlxsw_sp_l3proto) sfn_proto, &addr,
2650 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2651 char *sfn_pl, int rec_index)
2653 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2654 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2655 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2658 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2659 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2662 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2663 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2666 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2667 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2670 case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
2671 mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2674 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
2675 mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2681 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
2684 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2685 unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
2687 mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2688 msecs_to_jiffies(interval));
2691 #define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
2693 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2695 struct mlxsw_sp_bridge *bridge;
2696 struct mlxsw_sp *mlxsw_sp;
2703 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2707 bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2708 mlxsw_sp = bridge->mlxsw_sp;
2711 queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
2712 while (queries > 0) {
2713 mlxsw_reg_sfn_pack(sfn_pl);
2714 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2716 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2719 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2720 for (i = 0; i < num_rec; i++)
2721 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2722 if (num_rec != MLXSW_REG_SFN_REC_MAX_COUNT)
2730 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
2733 struct mlxsw_sp_switchdev_event_work {
2734 struct work_struct work;
2736 struct switchdev_notifier_fdb_info fdb_info;
2737 struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2739 struct net_device *dev;
2740 unsigned long event;
2744 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
2745 struct mlxsw_sp_switchdev_event_work *
2747 struct mlxsw_sp_fid *fid, __be32 vni)
2749 struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2750 struct switchdev_notifier_fdb_info *fdb_info;
2751 struct net_device *dev = switchdev_work->dev;
2752 enum mlxsw_sp_l3proto proto;
2753 union mlxsw_sp_l3addr addr;
2756 fdb_info = &switchdev_work->fdb_info;
2757 err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
2761 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
2764 switch (switchdev_work->event) {
2765 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2766 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2767 vxlan_fdb_info.eth_addr,
2768 mlxsw_sp_fid_index(fid),
2769 proto, &addr, true, false);
2772 vxlan_fdb_info.offloaded = true;
2773 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2774 &vxlan_fdb_info.info, NULL);
2775 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2776 vxlan_fdb_info.eth_addr,
2777 fdb_info->vid, dev, true);
2779 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2780 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2781 vxlan_fdb_info.eth_addr,
2782 mlxsw_sp_fid_index(fid),
2783 proto, &addr, false,
2785 vxlan_fdb_info.offloaded = false;
2786 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2787 &vxlan_fdb_info.info, NULL);
2793 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
2796 struct mlxsw_sp_bridge_device *bridge_device;
2797 struct net_device *dev = switchdev_work->dev;
2798 struct net_device *br_dev;
2799 struct mlxsw_sp *mlxsw_sp;
2800 struct mlxsw_sp_fid *fid;
2804 if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
2805 switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
2808 if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
2809 !switchdev_work->fdb_info.added_by_user)
2812 if (!netif_running(dev))
2814 br_dev = netdev_master_upper_dev_get(dev);
2817 if (!netif_is_bridge_master(br_dev))
2819 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
2822 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2826 fid = bridge_device->ops->fid_lookup(bridge_device,
2827 switchdev_work->fdb_info.vid);
2831 err = mlxsw_sp_fid_vni(fid, &vni);
2835 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
2839 mlxsw_sp_fid_put(fid);
2842 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
2844 struct mlxsw_sp_switchdev_event_work *switchdev_work =
2845 container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2846 struct net_device *dev = switchdev_work->dev;
2847 struct switchdev_notifier_fdb_info *fdb_info;
2848 struct mlxsw_sp_port *mlxsw_sp_port;
2852 if (netif_is_vxlan(dev)) {
2853 mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
2857 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2861 switch (switchdev_work->event) {
2862 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2863 fdb_info = &switchdev_work->fdb_info;
2864 if (!fdb_info->added_by_user)
2866 err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2869 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2871 fdb_info->vid, dev, true);
2873 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2874 fdb_info = &switchdev_work->fdb_info;
2875 mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2877 case SWITCHDEV_FDB_ADD_TO_BRIDGE:
2878 case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2879 /* These events are only used to potentially update an existing
2885 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2889 kfree(switchdev_work->fdb_info.addr);
2890 kfree(switchdev_work);
2895 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
2896 struct mlxsw_sp_switchdev_event_work *
2899 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2900 struct mlxsw_sp_bridge_device *bridge_device;
2901 struct net_device *dev = switchdev_work->dev;
2902 u8 all_zeros_mac[ETH_ALEN] = { 0 };
2903 enum mlxsw_sp_l3proto proto;
2904 union mlxsw_sp_l3addr addr;
2905 struct net_device *br_dev;
2906 struct mlxsw_sp_fid *fid;
2910 vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2911 br_dev = netdev_master_upper_dev_get(dev);
2913 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2917 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2921 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2924 if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2925 err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
2927 mlxsw_sp_fid_put(fid);
2930 vxlan_fdb_info->offloaded = true;
2931 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2932 &vxlan_fdb_info->info, NULL);
2933 mlxsw_sp_fid_put(fid);
2937 /* The device has a single FDB table, whereas Linux has two - one
2938 * in the bridge driver and another in the VxLAN driver. We only
2939 * program an entry to the device if the MAC points to the VxLAN
2940 * device in the bridge's FDB table
2942 vid = bridge_device->ops->fid_vid(bridge_device, fid);
2943 if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
2944 goto err_br_fdb_find;
2946 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
2947 mlxsw_sp_fid_index(fid), proto,
2948 &addr, true, false);
2950 goto err_fdb_tunnel_uc_op;
2951 vxlan_fdb_info->offloaded = true;
2952 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2953 &vxlan_fdb_info->info, NULL);
2954 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2955 vxlan_fdb_info->eth_addr, vid, dev, true);
2957 mlxsw_sp_fid_put(fid);
2961 err_fdb_tunnel_uc_op:
2963 mlxsw_sp_fid_put(fid);
2967 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
2968 struct mlxsw_sp_switchdev_event_work *
2971 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2972 struct mlxsw_sp_bridge_device *bridge_device;
2973 struct net_device *dev = switchdev_work->dev;
2974 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
2975 u8 all_zeros_mac[ETH_ALEN] = { 0 };
2976 enum mlxsw_sp_l3proto proto;
2977 union mlxsw_sp_l3addr addr;
2978 struct mlxsw_sp_fid *fid;
2981 vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2983 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2987 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2991 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2994 if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2995 mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
2996 mlxsw_sp_fid_put(fid);
3000 mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3001 mlxsw_sp_fid_index(fid), proto, &addr,
3003 vid = bridge_device->ops->fid_vid(bridge_device, fid);
3004 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3005 vxlan_fdb_info->eth_addr, vid, dev, false);
3007 mlxsw_sp_fid_put(fid);
3010 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3012 struct mlxsw_sp_switchdev_event_work *switchdev_work =
3013 container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3014 struct net_device *dev = switchdev_work->dev;
3015 struct mlxsw_sp *mlxsw_sp;
3016 struct net_device *br_dev;
3020 if (!netif_running(dev))
3022 br_dev = netdev_master_upper_dev_get(dev);
3025 if (!netif_is_bridge_master(br_dev))
3027 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3031 switch (switchdev_work->event) {
3032 case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3033 mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3035 case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3036 mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3042 kfree(switchdev_work);
3047 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3049 struct switchdev_notifier_info *info)
3051 struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3052 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3053 struct vxlan_config *cfg = &vxlan->cfg;
3054 struct netlink_ext_ack *extack;
3056 extack = switchdev_notifier_info_to_extack(info);
3057 vxlan_fdb_info = container_of(info,
3058 struct switchdev_notifier_vxlan_fdb_info,
3061 if (vxlan_fdb_info->remote_port != cfg->dst_port) {
3062 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
3065 if (vxlan_fdb_info->remote_vni != cfg->vni ||
3066 vxlan_fdb_info->vni != cfg->vni) {
3067 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
3070 if (vxlan_fdb_info->remote_ifindex) {
3071 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
3074 if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
3075 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
3078 if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
3079 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
3083 switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3088 /* Called under rcu_read_lock() */
3089 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3090 unsigned long event, void *ptr)
3092 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3093 struct mlxsw_sp_switchdev_event_work *switchdev_work;
3094 struct switchdev_notifier_fdb_info *fdb_info;
3095 struct switchdev_notifier_info *info = ptr;
3096 struct net_device *br_dev;
3099 if (event == SWITCHDEV_PORT_ATTR_SET) {
3100 err = switchdev_handle_port_attr_set(dev, ptr,
3101 mlxsw_sp_port_dev_check,
3102 mlxsw_sp_port_attr_set);
3103 return notifier_from_errno(err);
3106 /* Tunnel devices are not our uppers, so check their master instead */
3107 br_dev = netdev_master_upper_dev_get_rcu(dev);
3110 if (!netif_is_bridge_master(br_dev))
3112 if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3115 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3116 if (!switchdev_work)
3119 switchdev_work->dev = dev;
3120 switchdev_work->event = event;
3123 case SWITCHDEV_FDB_ADD_TO_DEVICE:
3124 case SWITCHDEV_FDB_DEL_TO_DEVICE:
3125 case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3126 case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3127 fdb_info = container_of(info,
3128 struct switchdev_notifier_fdb_info,
3130 INIT_WORK(&switchdev_work->work,
3131 mlxsw_sp_switchdev_bridge_fdb_event_work);
3132 memcpy(&switchdev_work->fdb_info, ptr,
3133 sizeof(switchdev_work->fdb_info));
3134 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3135 if (!switchdev_work->fdb_info.addr)
3136 goto err_addr_alloc;
3137 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3139 /* Take a reference on the device. This can be either
3140 * upper device containig mlxsw_sp_port or just a
3145 case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3146 case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3147 INIT_WORK(&switchdev_work->work,
3148 mlxsw_sp_switchdev_vxlan_fdb_event_work);
3149 err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3152 goto err_vxlan_work_prepare;
3156 kfree(switchdev_work);
3160 mlxsw_core_schedule_work(&switchdev_work->work);
3164 err_vxlan_work_prepare:
3166 kfree(switchdev_work);
3170 struct notifier_block mlxsw_sp_switchdev_notifier = {
3171 .notifier_call = mlxsw_sp_switchdev_event,
3175 mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3176 struct mlxsw_sp_bridge_device *bridge_device,
3177 const struct net_device *vxlan_dev, u16 vid,
3178 bool flag_untagged, bool flag_pvid,
3179 struct netlink_ext_ack *extack)
3181 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3182 __be32 vni = vxlan->cfg.vni;
3183 struct mlxsw_sp_fid *fid;
3187 /* We cannot have the same VLAN as PVID and egress untagged on multiple
3188 * VxLAN devices. Note that we get this notification before the VLAN is
3189 * actually added to the bridge's database, so it is not possible for
3190 * the lookup function to return 'vxlan_dev'
3192 if (flag_untagged && flag_pvid &&
3193 mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
3194 NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
3198 if (!netif_running(vxlan_dev))
3201 /* First case: FID is not associated with this VNI, but the new VLAN
3202 * is both PVID and egress untagged. Need to enable NVE on the FID, if
3205 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3207 if (!flag_untagged || !flag_pvid)
3209 return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device,
3210 vxlan_dev, vid, extack);
3213 /* Second case: FID is associated with the VNI and the VLAN associated
3214 * with the FID is the same as the notified VLAN. This means the flags
3215 * (PVID / egress untagged) were toggled and that NVE should be
3216 * disabled on the FID
3218 old_vid = mlxsw_sp_fid_8021q_vid(fid);
3219 if (vid == old_vid) {
3220 if (WARN_ON(flag_untagged && flag_pvid)) {
3221 mlxsw_sp_fid_put(fid);
3224 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3225 mlxsw_sp_fid_put(fid);
3229 /* Third case: A new VLAN was configured on the VxLAN device, but this
3230 * VLAN is not PVID, so there is nothing to do.
3233 mlxsw_sp_fid_put(fid);
3237 /* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
3238 * mapped to the VNI should be unmapped
3240 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3241 mlxsw_sp_fid_put(fid);
3243 /* Fifth case: The new VLAN is also egress untagged, which means the
3244 * VLAN needs to be mapped to the VNI
3249 err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
3252 goto err_vxlan_join;
3257 mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid,
3263 mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3264 struct mlxsw_sp_bridge_device *bridge_device,
3265 const struct net_device *vxlan_dev, u16 vid)
3267 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3268 __be32 vni = vxlan->cfg.vni;
3269 struct mlxsw_sp_fid *fid;
3271 if (!netif_running(vxlan_dev))
3274 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3278 /* A different VLAN than the one mapped to the VNI is deleted */
3279 if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3282 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3285 mlxsw_sp_fid_put(fid);
3289 mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3290 struct switchdev_notifier_port_obj_info *
3293 struct switchdev_obj_port_vlan *vlan =
3294 SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3295 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3296 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
3297 struct switchdev_trans *trans = port_obj_info->trans;
3298 struct mlxsw_sp_bridge_device *bridge_device;
3299 struct netlink_ext_ack *extack;
3300 struct mlxsw_sp *mlxsw_sp;
3301 struct net_device *br_dev;
3304 extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
3305 br_dev = netdev_master_upper_dev_get(vxlan_dev);
3309 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3313 port_obj_info->handled = true;
3315 if (switchdev_trans_ph_commit(trans))
3318 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3322 if (!bridge_device->vlan_enabled)
3325 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
3328 err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3340 mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3341 struct switchdev_notifier_port_obj_info *
3344 struct switchdev_obj_port_vlan *vlan =
3345 SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3346 struct mlxsw_sp_bridge_device *bridge_device;
3347 struct mlxsw_sp *mlxsw_sp;
3348 struct net_device *br_dev;
3351 br_dev = netdev_master_upper_dev_get(vxlan_dev);
3355 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3359 port_obj_info->handled = true;
3361 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3365 if (!bridge_device->vlan_enabled)
3368 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
3369 mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device,
3374 mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
3375 struct switchdev_notifier_port_obj_info *
3380 switch (port_obj_info->obj->id) {
3381 case SWITCHDEV_OBJ_ID_PORT_VLAN:
3382 err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
3393 mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
3394 struct switchdev_notifier_port_obj_info *
3397 switch (port_obj_info->obj->id) {
3398 case SWITCHDEV_OBJ_ID_PORT_VLAN:
3399 mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
3406 static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
3407 unsigned long event, void *ptr)
3409 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3413 case SWITCHDEV_PORT_OBJ_ADD:
3414 if (netif_is_vxlan(dev))
3415 err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
3417 err = switchdev_handle_port_obj_add(dev, ptr,
3418 mlxsw_sp_port_dev_check,
3419 mlxsw_sp_port_obj_add);
3420 return notifier_from_errno(err);
3421 case SWITCHDEV_PORT_OBJ_DEL:
3422 if (netif_is_vxlan(dev))
3423 mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
3425 err = switchdev_handle_port_obj_del(dev, ptr,
3426 mlxsw_sp_port_dev_check,
3427 mlxsw_sp_port_obj_del);
3428 return notifier_from_errno(err);
3429 case SWITCHDEV_PORT_ATTR_SET:
3430 err = switchdev_handle_port_attr_set(dev, ptr,
3431 mlxsw_sp_port_dev_check,
3432 mlxsw_sp_port_attr_set);
3433 return notifier_from_errno(err);
3439 static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
3440 .notifier_call = mlxsw_sp_switchdev_blocking_event,
3444 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
3446 return bridge_port->stp_state;
3449 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
3451 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
3452 struct notifier_block *nb;
3455 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
3457 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
3461 err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3463 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
3467 nb = &mlxsw_sp_switchdev_blocking_notifier;
3468 err = register_switchdev_blocking_notifier(nb);
3470 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
3471 goto err_register_switchdev_blocking_notifier;
3474 INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
3475 bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
3476 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, false);
3479 err_register_switchdev_blocking_notifier:
3480 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3484 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
3486 struct notifier_block *nb;
3488 cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
3490 nb = &mlxsw_sp_switchdev_blocking_notifier;
3491 unregister_switchdev_blocking_notifier(nb);
3493 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3496 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3498 struct mlxsw_sp_bridge *bridge;
3500 bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
3503 mlxsw_sp->bridge = bridge;
3504 bridge->mlxsw_sp = mlxsw_sp;
3506 INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
3508 bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
3509 bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
3511 return mlxsw_sp_fdb_init(mlxsw_sp);
3514 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
3516 mlxsw_sp_fdb_fini(mlxsw_sp);
3517 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
3518 kfree(mlxsw_sp->bridge);