1 // SPDX-License-Identifier: GPL-2.0+
3 #include <linux/if_bridge.h>
4 #include <net/switchdev.h>
6 #include "lan966x_main.h"
8 static struct notifier_block lan966x_netdevice_nb __read_mostly;
9 static struct notifier_block lan966x_switchdev_nb __read_mostly;
10 static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
12 static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port,
15 struct lan966x *lan966x = port->lan966x;
18 flood_mask_ip = lan_rd(lan966x, ANA_PGID(pgid_ip));
19 flood_mask_ip = ANA_PGID_PGID_GET(flood_mask_ip);
21 /* If mcast snooping is not enabled then use mcast flood mask
22 * to decide to enable multicast flooding or not.
24 if (!port->mcast_ena) {
27 flood_mask = lan_rd(lan966x, ANA_PGID(PGID_MC));
28 flood_mask = ANA_PGID_PGID_GET(flood_mask);
30 if (flood_mask & BIT(port->chip_port))
31 flood_mask_ip |= BIT(port->chip_port);
33 flood_mask_ip &= ~BIT(port->chip_port);
35 flood_mask_ip &= ~BIT(port->chip_port);
38 lan_rmw(ANA_PGID_PGID_SET(flood_mask_ip),
40 lan966x, ANA_PGID(pgid_ip));
43 static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
46 u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC));
48 val = ANA_PGID_PGID_GET(val);
50 val |= BIT(port->chip_port);
52 val &= ~BIT(port->chip_port);
54 lan_rmw(ANA_PGID_PGID_SET(val),
56 port->lan966x, ANA_PGID(PGID_MC));
58 if (!port->mcast_ena) {
59 lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
60 lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
64 static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
67 u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC));
69 val = ANA_PGID_PGID_GET(val);
71 val |= BIT(port->chip_port);
73 val &= ~BIT(port->chip_port);
75 lan_rmw(ANA_PGID_PGID_SET(val),
77 port->lan966x, ANA_PGID(PGID_UC));
80 static void lan966x_port_set_bcast_flood(struct lan966x_port *port,
83 u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC));
85 val = ANA_PGID_PGID_GET(val);
87 val |= BIT(port->chip_port);
89 val &= ~BIT(port->chip_port);
91 lan_rmw(ANA_PGID_PGID_SET(val),
93 port->lan966x, ANA_PGID(PGID_BC));
96 static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled)
98 lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled),
99 ANA_PORT_CFG_LEARN_ENA,
100 port->lan966x, ANA_PORT_CFG(port->chip_port));
102 port->learn_ena = enabled;
105 static void lan966x_port_bridge_flags(struct lan966x_port *port,
106 struct switchdev_brport_flags flags)
108 if (flags.mask & BR_MCAST_FLOOD)
109 lan966x_port_set_mcast_flood(port,
110 !!(flags.val & BR_MCAST_FLOOD));
112 if (flags.mask & BR_FLOOD)
113 lan966x_port_set_ucast_flood(port,
114 !!(flags.val & BR_FLOOD));
116 if (flags.mask & BR_BCAST_FLOOD)
117 lan966x_port_set_bcast_flood(port,
118 !!(flags.val & BR_BCAST_FLOOD));
120 if (flags.mask & BR_LEARNING)
121 lan966x_port_set_learning(port,
122 !!(flags.val & BR_LEARNING));
125 static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
126 struct switchdev_brport_flags flags)
128 if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD |
135 static void lan966x_update_fwd_mask(struct lan966x *lan966x)
139 for (i = 0; i < lan966x->num_phys_ports; i++) {
140 struct lan966x_port *port = lan966x->ports[i];
141 unsigned long mask = 0;
143 if (port && lan966x->bridge_fwd_mask & BIT(i))
144 mask = lan966x->bridge_fwd_mask & ~BIT(i);
146 mask |= BIT(CPU_PORT);
148 lan_wr(ANA_PGID_PGID_SET(mask),
149 lan966x, ANA_PGID(PGID_SRC + i));
153 static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
155 struct lan966x *lan966x = port->lan966x;
156 bool learn_ena = false;
158 if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) &&
162 if (state == BR_STATE_FORWARDING)
163 lan966x->bridge_fwd_mask |= BIT(port->chip_port);
165 lan966x->bridge_fwd_mask &= ~BIT(port->chip_port);
167 lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena),
168 ANA_PORT_CFG_LEARN_ENA,
169 lan966x, ANA_PORT_CFG(port->chip_port));
171 lan966x_update_fwd_mask(lan966x);
174 static void lan966x_port_ageing_set(struct lan966x_port *port,
175 unsigned long ageing_clock_t)
177 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
178 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
180 lan966x_mac_set_ageing(port->lan966x, ageing_time);
183 static void lan966x_port_mc_set(struct lan966x_port *port, bool mcast_ena)
185 struct lan966x *lan966x = port->lan966x;
187 port->mcast_ena = mcast_ena;
189 lan966x_mdb_restore_entries(lan966x);
191 lan966x_mdb_clear_entries(lan966x);
193 lan_rmw(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(mcast_ena) |
194 ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(mcast_ena) |
195 ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(mcast_ena),
196 ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
197 ANA_CPU_FWD_CFG_MLD_REDIR_ENA |
198 ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA,
199 lan966x, ANA_CPU_FWD_CFG(port->chip_port));
201 lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
202 lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
205 static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
206 const struct switchdev_attr *attr,
207 struct netlink_ext_ack *extack)
209 struct lan966x_port *port = netdev_priv(dev);
212 if (ctx && ctx != port)
216 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
217 lan966x_port_bridge_flags(port, attr->u.brport_flags);
219 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
220 err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags);
222 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
223 lan966x_port_stp_state_set(port, attr->u.stp_state);
225 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
226 lan966x_port_ageing_set(port, attr->u.ageing_time);
228 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
229 lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
230 lan966x_vlan_port_apply(port);
232 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
233 lan966x_port_mc_set(port, !attr->u.mc_disabled);
243 static int lan966x_port_bridge_join(struct lan966x_port *port,
244 struct net_device *bridge,
245 struct netlink_ext_ack *extack)
247 struct switchdev_brport_flags flags = {0};
248 struct lan966x *lan966x = port->lan966x;
249 struct net_device *dev = port->dev;
252 if (!lan966x->bridge_mask) {
253 lan966x->bridge = bridge;
255 if (lan966x->bridge != bridge) {
256 NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge");
261 err = switchdev_bridge_port_offload(dev, dev, port,
262 &lan966x_switchdev_nb,
263 &lan966x_switchdev_blocking_nb,
268 lan966x->bridge_mask |= BIT(port->chip_port);
270 flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
271 flags.val = flags.mask;
272 lan966x_port_bridge_flags(port, flags);
277 static void lan966x_port_bridge_leave(struct lan966x_port *port,
278 struct net_device *bridge)
280 struct switchdev_brport_flags flags = {0};
281 struct lan966x *lan966x = port->lan966x;
283 flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
284 flags.val = flags.mask & ~BR_LEARNING;
285 lan966x_port_bridge_flags(port, flags);
287 lan966x->bridge_mask &= ~BIT(port->chip_port);
289 if (!lan966x->bridge_mask)
290 lan966x->bridge = NULL;
292 /* Set the port back to host mode */
293 lan966x_vlan_port_set_vlan_aware(port, false);
294 lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
295 lan966x_vlan_port_apply(port);
298 static int lan966x_port_changeupper(struct net_device *dev,
299 struct netdev_notifier_changeupper_info *info)
301 struct lan966x_port *port = netdev_priv(dev);
302 struct netlink_ext_ack *extack;
305 extack = netdev_notifier_info_to_extack(&info->info);
307 if (netif_is_bridge_master(info->upper_dev)) {
309 err = lan966x_port_bridge_join(port, info->upper_dev,
312 lan966x_port_bridge_leave(port, info->upper_dev);
318 static int lan966x_port_prechangeupper(struct net_device *dev,
319 struct netdev_notifier_changeupper_info *info)
321 struct lan966x_port *port = netdev_priv(dev);
323 if (netif_is_bridge_master(info->upper_dev) && !info->linking)
324 switchdev_bridge_port_unoffload(port->dev, port,
330 static int lan966x_foreign_bridging_check(struct net_device *bridge,
331 struct netlink_ext_ack *extack)
333 struct lan966x *lan966x = NULL;
334 bool has_foreign = false;
335 struct net_device *dev;
336 struct list_head *iter;
338 if (!netif_is_bridge_master(bridge))
341 netdev_for_each_lower_dev(bridge, dev, iter) {
342 if (lan966x_netdevice_check(dev)) {
343 struct lan966x_port *port = netdev_priv(dev);
346 /* Bridge already has at least one port of a
347 * lan966x switch inside it, check that it's
348 * the same instance of the driver.
350 if (port->lan966x != lan966x) {
351 NL_SET_ERR_MSG_MOD(extack,
352 "Bridging between multiple lan966x switches disallowed");
356 /* This is the first lan966x port inside this
359 lan966x = port->lan966x;
365 if (lan966x && has_foreign) {
366 NL_SET_ERR_MSG_MOD(extack,
367 "Bridging lan966x ports with foreign interfaces disallowed");
375 static int lan966x_bridge_check(struct net_device *dev,
376 struct netdev_notifier_changeupper_info *info)
378 return lan966x_foreign_bridging_check(info->upper_dev,
382 static int lan966x_netdevice_port_event(struct net_device *dev,
383 struct notifier_block *nb,
384 unsigned long event, void *ptr)
388 if (!lan966x_netdevice_check(dev)) {
389 if (event == NETDEV_CHANGEUPPER)
390 return lan966x_bridge_check(dev, ptr);
395 case NETDEV_PRECHANGEUPPER:
396 err = lan966x_port_prechangeupper(dev, ptr);
398 case NETDEV_CHANGEUPPER:
399 err = lan966x_bridge_check(dev, ptr);
403 err = lan966x_port_changeupper(dev, ptr);
410 static int lan966x_netdevice_event(struct notifier_block *nb,
411 unsigned long event, void *ptr)
413 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
416 ret = lan966x_netdevice_port_event(dev, nb, event, ptr);
418 return notifier_from_errno(ret);
421 /* We don't offload uppers such as LAG as bridge ports, so every device except
422 * the bridge itself is foreign.
424 static bool lan966x_foreign_dev_check(const struct net_device *dev,
425 const struct net_device *foreign_dev)
427 struct lan966x_port *port = netdev_priv(dev);
428 struct lan966x *lan966x = port->lan966x;
430 if (netif_is_bridge_master(foreign_dev))
431 if (lan966x->bridge == foreign_dev)
437 static int lan966x_switchdev_event(struct notifier_block *nb,
438 unsigned long event, void *ptr)
440 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
444 case SWITCHDEV_PORT_ATTR_SET:
445 err = switchdev_handle_port_attr_set(dev, ptr,
446 lan966x_netdevice_check,
447 lan966x_port_attr_set);
448 return notifier_from_errno(err);
449 case SWITCHDEV_FDB_ADD_TO_DEVICE:
450 case SWITCHDEV_FDB_DEL_TO_DEVICE:
451 err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
452 lan966x_netdevice_check,
453 lan966x_foreign_dev_check,
455 return notifier_from_errno(err);
461 static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
462 const struct switchdev_obj *obj)
464 const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
465 struct lan966x *lan966x = port->lan966x;
467 if (!netif_is_bridge_master(obj->orig_dev))
468 lan966x_vlan_port_add_vlan(port, v->vid,
469 v->flags & BRIDGE_VLAN_INFO_PVID,
470 v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
472 lan966x_vlan_cpu_add_vlan(lan966x, v->vid);
477 static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx,
478 const struct switchdev_obj *obj,
479 struct netlink_ext_ack *extack)
481 struct lan966x_port *port = netdev_priv(dev);
484 if (ctx && ctx != port)
488 case SWITCHDEV_OBJ_ID_PORT_VLAN:
489 err = lan966x_handle_port_vlan_add(port, obj);
491 case SWITCHDEV_OBJ_ID_PORT_MDB:
492 case SWITCHDEV_OBJ_ID_HOST_MDB:
493 err = lan966x_handle_port_mdb_add(port, obj);
503 static int lan966x_handle_port_vlan_del(struct lan966x_port *port,
504 const struct switchdev_obj *obj)
506 const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
507 struct lan966x *lan966x = port->lan966x;
509 if (!netif_is_bridge_master(obj->orig_dev))
510 lan966x_vlan_port_del_vlan(port, v->vid);
512 lan966x_vlan_cpu_del_vlan(lan966x, v->vid);
517 static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx,
518 const struct switchdev_obj *obj)
520 struct lan966x_port *port = netdev_priv(dev);
523 if (ctx && ctx != port)
527 case SWITCHDEV_OBJ_ID_PORT_VLAN:
528 err = lan966x_handle_port_vlan_del(port, obj);
530 case SWITCHDEV_OBJ_ID_PORT_MDB:
531 case SWITCHDEV_OBJ_ID_HOST_MDB:
532 err = lan966x_handle_port_mdb_del(port, obj);
542 static int lan966x_switchdev_blocking_event(struct notifier_block *nb,
546 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
550 case SWITCHDEV_PORT_OBJ_ADD:
551 err = switchdev_handle_port_obj_add(dev, ptr,
552 lan966x_netdevice_check,
553 lan966x_handle_port_obj_add);
554 return notifier_from_errno(err);
555 case SWITCHDEV_PORT_OBJ_DEL:
556 err = switchdev_handle_port_obj_del(dev, ptr,
557 lan966x_netdevice_check,
558 lan966x_handle_port_obj_del);
559 return notifier_from_errno(err);
560 case SWITCHDEV_PORT_ATTR_SET:
561 err = switchdev_handle_port_attr_set(dev, ptr,
562 lan966x_netdevice_check,
563 lan966x_port_attr_set);
564 return notifier_from_errno(err);
570 static struct notifier_block lan966x_netdevice_nb __read_mostly = {
571 .notifier_call = lan966x_netdevice_event,
574 static struct notifier_block lan966x_switchdev_nb __read_mostly = {
575 .notifier_call = lan966x_switchdev_event,
578 static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
579 .notifier_call = lan966x_switchdev_blocking_event,
582 void lan966x_register_notifier_blocks(void)
584 register_netdevice_notifier(&lan966x_netdevice_nb);
585 register_switchdev_notifier(&lan966x_switchdev_nb);
586 register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
589 void lan966x_unregister_notifier_blocks(void)
591 unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
592 unregister_switchdev_notifier(&lan966x_switchdev_nb);
593 unregister_netdevice_notifier(&lan966x_netdevice_nb);