1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Handling of a single switch chip, part of a switch fabric
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 unsigned int ageing_time)
22 for (i = 0; i < ds->num_ports; ++i) {
23 struct dsa_port *dp = dsa_to_port(ds, i);
25 if (dp->ageing_time && dp->ageing_time < ageing_time)
26 ageing_time = dp->ageing_time;
32 static int dsa_switch_ageing_time(struct dsa_switch *ds,
33 struct dsa_notifier_ageing_time_info *info)
35 unsigned int ageing_time = info->ageing_time;
37 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
40 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
43 /* Program the fastest ageing time in case of multiple bridges */
44 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
46 if (ds->ops->set_ageing_time)
47 return ds->ops->set_ageing_time(ds, ageing_time);
52 static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
53 struct dsa_notifier_mtu_info *info)
55 if (ds->index == info->sw_index && port == info->port)
58 /* Do not propagate to other switches in the tree if the notifier was
59 * targeted for a single switch.
61 if (info->targeted_match)
64 if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
70 static int dsa_switch_mtu(struct dsa_switch *ds,
71 struct dsa_notifier_mtu_info *info)
75 if (!ds->ops->port_change_mtu)
78 for (port = 0; port < ds->num_ports; port++) {
79 if (dsa_switch_mtu_match(ds, port, info)) {
80 ret = ds->ops->port_change_mtu(ds, port, info->mtu);
89 static int dsa_switch_bridge_join(struct dsa_switch *ds,
90 struct dsa_notifier_bridge_info *info)
92 struct dsa_switch_tree *dst = ds->dst;
95 if (dst->index == info->tree_index && ds->index == info->sw_index) {
96 if (!ds->ops->port_bridge_join)
99 err = ds->ops->port_bridge_join(ds, info->port, info->br);
104 if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
105 ds->ops->crosschip_bridge_join) {
106 err = ds->ops->crosschip_bridge_join(ds, info->tree_index,
108 info->port, info->br);
113 return dsa_tag_8021q_bridge_join(ds, info);
116 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
117 struct dsa_notifier_bridge_info *info)
119 bool unset_vlan_filtering = br_vlan_enabled(info->br);
120 struct dsa_switch_tree *dst = ds->dst;
121 struct netlink_ext_ack extack = {0};
124 if (dst->index == info->tree_index && ds->index == info->sw_index &&
125 ds->ops->port_bridge_leave)
126 ds->ops->port_bridge_leave(ds, info->port, info->br);
128 if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
129 ds->ops->crosschip_bridge_leave)
130 ds->ops->crosschip_bridge_leave(ds, info->tree_index,
131 info->sw_index, info->port,
134 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an
135 * event for changing vlan_filtering setting upon slave ports leaving
136 * it. That is a good thing, because that lets us handle it and also
137 * handle the case where the switch's vlan_filtering setting is global
138 * (not per port). When that happens, the correct moment to trigger the
139 * vlan_filtering callback is only when the last port leaves the last
142 if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
143 for (port = 0; port < ds->num_ports; port++) {
144 struct net_device *bridge_dev;
146 bridge_dev = dsa_to_port(ds, port)->bridge_dev;
148 if (bridge_dev && br_vlan_enabled(bridge_dev)) {
149 unset_vlan_filtering = false;
154 if (unset_vlan_filtering) {
155 err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
158 dev_err(ds->dev, "port %d: %s\n", info->port,
160 if (err && err != EOPNOTSUPP)
164 return dsa_tag_8021q_bridge_leave(ds, info);
167 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
168 * DSA links) that sit between the targeted port on which the notifier was
169 * emitted and its dedicated CPU port.
171 static bool dsa_switch_host_address_match(struct dsa_switch *ds, int port,
172 int info_sw_index, int info_port)
174 struct dsa_port *targeted_dp, *cpu_dp;
175 struct dsa_switch *targeted_ds;
177 targeted_ds = dsa_switch_find(ds->dst->index, info_sw_index);
178 targeted_dp = dsa_to_port(targeted_ds, info_port);
179 cpu_dp = targeted_dp->cpu_dp;
181 if (dsa_switch_is_upstream_of(ds, targeted_ds))
182 return port == dsa_towards_port(ds, cpu_dp->ds->index,
188 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
189 const unsigned char *addr,
192 struct dsa_mac_addr *a;
194 list_for_each_entry(a, addr_list, list)
195 if (ether_addr_equal(a->addr, addr) && a->vid == vid)
201 static int dsa_switch_do_mdb_add(struct dsa_switch *ds, int port,
202 const struct switchdev_obj_port_mdb *mdb)
204 struct dsa_port *dp = dsa_to_port(ds, port);
205 struct dsa_mac_addr *a;
208 /* No need to bother with refcounting for user ports */
209 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
210 return ds->ops->port_mdb_add(ds, port, mdb);
212 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
214 refcount_inc(&a->refcount);
218 a = kzalloc(sizeof(*a), GFP_KERNEL);
222 err = ds->ops->port_mdb_add(ds, port, mdb);
228 ether_addr_copy(a->addr, mdb->addr);
230 refcount_set(&a->refcount, 1);
231 list_add_tail(&a->list, &dp->mdbs);
236 static int dsa_switch_do_mdb_del(struct dsa_switch *ds, int port,
237 const struct switchdev_obj_port_mdb *mdb)
239 struct dsa_port *dp = dsa_to_port(ds, port);
240 struct dsa_mac_addr *a;
243 /* No need to bother with refcounting for user ports */
244 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
245 return ds->ops->port_mdb_del(ds, port, mdb);
247 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
251 if (!refcount_dec_and_test(&a->refcount))
254 err = ds->ops->port_mdb_del(ds, port, mdb);
256 refcount_inc(&a->refcount);
266 static int dsa_switch_do_fdb_add(struct dsa_switch *ds, int port,
267 const unsigned char *addr, u16 vid)
269 struct dsa_port *dp = dsa_to_port(ds, port);
270 struct dsa_mac_addr *a;
273 /* No need to bother with refcounting for user ports */
274 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
275 return ds->ops->port_fdb_add(ds, port, addr, vid);
277 a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
279 refcount_inc(&a->refcount);
283 a = kzalloc(sizeof(*a), GFP_KERNEL);
287 err = ds->ops->port_fdb_add(ds, port, addr, vid);
293 ether_addr_copy(a->addr, addr);
295 refcount_set(&a->refcount, 1);
296 list_add_tail(&a->list, &dp->fdbs);
301 static int dsa_switch_do_fdb_del(struct dsa_switch *ds, int port,
302 const unsigned char *addr, u16 vid)
304 struct dsa_port *dp = dsa_to_port(ds, port);
305 struct dsa_mac_addr *a;
308 /* No need to bother with refcounting for user ports */
309 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
310 return ds->ops->port_fdb_del(ds, port, addr, vid);
312 a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
316 if (!refcount_dec_and_test(&a->refcount))
319 err = ds->ops->port_fdb_del(ds, port, addr, vid);
321 refcount_inc(&a->refcount);
331 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
332 struct dsa_notifier_fdb_info *info)
337 if (!ds->ops->port_fdb_add)
340 for (port = 0; port < ds->num_ports; port++) {
341 if (dsa_switch_host_address_match(ds, port, info->sw_index,
343 err = dsa_switch_do_fdb_add(ds, port, info->addr,
353 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
354 struct dsa_notifier_fdb_info *info)
359 if (!ds->ops->port_fdb_del)
362 for (port = 0; port < ds->num_ports; port++) {
363 if (dsa_switch_host_address_match(ds, port, info->sw_index,
365 err = dsa_switch_do_fdb_del(ds, port, info->addr,
375 static int dsa_switch_fdb_add(struct dsa_switch *ds,
376 struct dsa_notifier_fdb_info *info)
378 int port = dsa_towards_port(ds, info->sw_index, info->port);
380 if (!ds->ops->port_fdb_add)
383 return dsa_switch_do_fdb_add(ds, port, info->addr, info->vid);
386 static int dsa_switch_fdb_del(struct dsa_switch *ds,
387 struct dsa_notifier_fdb_info *info)
389 int port = dsa_towards_port(ds, info->sw_index, info->port);
391 if (!ds->ops->port_fdb_del)
394 return dsa_switch_do_fdb_del(ds, port, info->addr, info->vid);
397 static int dsa_switch_hsr_join(struct dsa_switch *ds,
398 struct dsa_notifier_hsr_info *info)
400 if (ds->index == info->sw_index && ds->ops->port_hsr_join)
401 return ds->ops->port_hsr_join(ds, info->port, info->hsr);
406 static int dsa_switch_hsr_leave(struct dsa_switch *ds,
407 struct dsa_notifier_hsr_info *info)
409 if (ds->index == info->sw_index && ds->ops->port_hsr_leave)
410 return ds->ops->port_hsr_leave(ds, info->port, info->hsr);
415 static int dsa_switch_lag_change(struct dsa_switch *ds,
416 struct dsa_notifier_lag_info *info)
418 if (ds->index == info->sw_index && ds->ops->port_lag_change)
419 return ds->ops->port_lag_change(ds, info->port);
421 if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
422 return ds->ops->crosschip_lag_change(ds, info->sw_index,
428 static int dsa_switch_lag_join(struct dsa_switch *ds,
429 struct dsa_notifier_lag_info *info)
431 if (ds->index == info->sw_index && ds->ops->port_lag_join)
432 return ds->ops->port_lag_join(ds, info->port, info->lag,
435 if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
436 return ds->ops->crosschip_lag_join(ds, info->sw_index,
437 info->port, info->lag,
443 static int dsa_switch_lag_leave(struct dsa_switch *ds,
444 struct dsa_notifier_lag_info *info)
446 if (ds->index == info->sw_index && ds->ops->port_lag_leave)
447 return ds->ops->port_lag_leave(ds, info->port, info->lag);
449 if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
450 return ds->ops->crosschip_lag_leave(ds, info->sw_index,
451 info->port, info->lag);
456 static int dsa_switch_mdb_add(struct dsa_switch *ds,
457 struct dsa_notifier_mdb_info *info)
459 int port = dsa_towards_port(ds, info->sw_index, info->port);
461 if (!ds->ops->port_mdb_add)
464 return dsa_switch_do_mdb_add(ds, port, info->mdb);
467 static int dsa_switch_mdb_del(struct dsa_switch *ds,
468 struct dsa_notifier_mdb_info *info)
470 int port = dsa_towards_port(ds, info->sw_index, info->port);
472 if (!ds->ops->port_mdb_del)
475 return dsa_switch_do_mdb_del(ds, port, info->mdb);
478 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
479 struct dsa_notifier_mdb_info *info)
484 if (!ds->ops->port_mdb_add)
487 for (port = 0; port < ds->num_ports; port++) {
488 if (dsa_switch_host_address_match(ds, port, info->sw_index,
490 err = dsa_switch_do_mdb_add(ds, port, info->mdb);
499 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
500 struct dsa_notifier_mdb_info *info)
505 if (!ds->ops->port_mdb_del)
508 for (port = 0; port < ds->num_ports; port++) {
509 if (dsa_switch_host_address_match(ds, port, info->sw_index,
511 err = dsa_switch_do_mdb_del(ds, port, info->mdb);
520 static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
521 struct dsa_notifier_vlan_info *info)
523 if (ds->index == info->sw_index && port == info->port)
526 if (dsa_is_dsa_port(ds, port))
532 static int dsa_switch_vlan_add(struct dsa_switch *ds,
533 struct dsa_notifier_vlan_info *info)
537 if (!ds->ops->port_vlan_add)
540 for (port = 0; port < ds->num_ports; port++) {
541 if (dsa_switch_vlan_match(ds, port, info)) {
542 err = ds->ops->port_vlan_add(ds, port, info->vlan,
552 static int dsa_switch_vlan_del(struct dsa_switch *ds,
553 struct dsa_notifier_vlan_info *info)
555 if (!ds->ops->port_vlan_del)
558 if (ds->index == info->sw_index)
559 return ds->ops->port_vlan_del(ds, info->port, info->vlan);
561 /* Do not deprogram the DSA links as they may be used as conduit
562 * for other VLAN members in the fabric.
567 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
568 struct dsa_notifier_tag_proto_info *info)
570 const struct dsa_device_ops *tag_ops = info->tag_ops;
573 if (!ds->ops->change_tag_protocol)
578 for (port = 0; port < ds->num_ports; port++) {
579 if (!dsa_is_cpu_port(ds, port))
582 err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
586 dsa_port_set_tag_protocol(dsa_to_port(ds, port), tag_ops);
589 /* Now that changing the tag protocol can no longer fail, let's update
590 * the remaining bits which are "duplicated for faster access", and the
591 * bits that depend on the tagger, such as the MTU.
593 for (port = 0; port < ds->num_ports; port++) {
594 if (dsa_is_user_port(ds, port)) {
595 struct net_device *slave;
597 slave = dsa_to_port(ds, port)->slave;
598 dsa_slave_setup_tagger(slave);
600 /* rtnl_mutex is held in dsa_tree_change_tag_proto */
601 dsa_slave_change_mtu(slave, slave->mtu);
608 static int dsa_switch_mrp_add(struct dsa_switch *ds,
609 struct dsa_notifier_mrp_info *info)
611 if (!ds->ops->port_mrp_add)
614 if (ds->index == info->sw_index)
615 return ds->ops->port_mrp_add(ds, info->port, info->mrp);
620 static int dsa_switch_mrp_del(struct dsa_switch *ds,
621 struct dsa_notifier_mrp_info *info)
623 if (!ds->ops->port_mrp_del)
626 if (ds->index == info->sw_index)
627 return ds->ops->port_mrp_del(ds, info->port, info->mrp);
633 dsa_switch_mrp_add_ring_role(struct dsa_switch *ds,
634 struct dsa_notifier_mrp_ring_role_info *info)
636 if (!ds->ops->port_mrp_add)
639 if (ds->index == info->sw_index)
640 return ds->ops->port_mrp_add_ring_role(ds, info->port,
647 dsa_switch_mrp_del_ring_role(struct dsa_switch *ds,
648 struct dsa_notifier_mrp_ring_role_info *info)
650 if (!ds->ops->port_mrp_del)
653 if (ds->index == info->sw_index)
654 return ds->ops->port_mrp_del_ring_role(ds, info->port,
660 static int dsa_switch_event(struct notifier_block *nb,
661 unsigned long event, void *info)
663 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
667 case DSA_NOTIFIER_AGEING_TIME:
668 err = dsa_switch_ageing_time(ds, info);
670 case DSA_NOTIFIER_BRIDGE_JOIN:
671 err = dsa_switch_bridge_join(ds, info);
673 case DSA_NOTIFIER_BRIDGE_LEAVE:
674 err = dsa_switch_bridge_leave(ds, info);
676 case DSA_NOTIFIER_FDB_ADD:
677 err = dsa_switch_fdb_add(ds, info);
679 case DSA_NOTIFIER_FDB_DEL:
680 err = dsa_switch_fdb_del(ds, info);
682 case DSA_NOTIFIER_HOST_FDB_ADD:
683 err = dsa_switch_host_fdb_add(ds, info);
685 case DSA_NOTIFIER_HOST_FDB_DEL:
686 err = dsa_switch_host_fdb_del(ds, info);
688 case DSA_NOTIFIER_HSR_JOIN:
689 err = dsa_switch_hsr_join(ds, info);
691 case DSA_NOTIFIER_HSR_LEAVE:
692 err = dsa_switch_hsr_leave(ds, info);
694 case DSA_NOTIFIER_LAG_CHANGE:
695 err = dsa_switch_lag_change(ds, info);
697 case DSA_NOTIFIER_LAG_JOIN:
698 err = dsa_switch_lag_join(ds, info);
700 case DSA_NOTIFIER_LAG_LEAVE:
701 err = dsa_switch_lag_leave(ds, info);
703 case DSA_NOTIFIER_MDB_ADD:
704 err = dsa_switch_mdb_add(ds, info);
706 case DSA_NOTIFIER_MDB_DEL:
707 err = dsa_switch_mdb_del(ds, info);
709 case DSA_NOTIFIER_HOST_MDB_ADD:
710 err = dsa_switch_host_mdb_add(ds, info);
712 case DSA_NOTIFIER_HOST_MDB_DEL:
713 err = dsa_switch_host_mdb_del(ds, info);
715 case DSA_NOTIFIER_VLAN_ADD:
716 err = dsa_switch_vlan_add(ds, info);
718 case DSA_NOTIFIER_VLAN_DEL:
719 err = dsa_switch_vlan_del(ds, info);
721 case DSA_NOTIFIER_MTU:
722 err = dsa_switch_mtu(ds, info);
724 case DSA_NOTIFIER_TAG_PROTO:
725 err = dsa_switch_change_tag_proto(ds, info);
727 case DSA_NOTIFIER_MRP_ADD:
728 err = dsa_switch_mrp_add(ds, info);
730 case DSA_NOTIFIER_MRP_DEL:
731 err = dsa_switch_mrp_del(ds, info);
733 case DSA_NOTIFIER_MRP_ADD_RING_ROLE:
734 err = dsa_switch_mrp_add_ring_role(ds, info);
736 case DSA_NOTIFIER_MRP_DEL_RING_ROLE:
737 err = dsa_switch_mrp_del_ring_role(ds, info);
739 case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
740 err = dsa_switch_tag_8021q_vlan_add(ds, info);
742 case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
743 err = dsa_switch_tag_8021q_vlan_del(ds, info);
751 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
754 return notifier_from_errno(err);
757 int dsa_switch_register_notifier(struct dsa_switch *ds)
759 ds->nb.notifier_call = dsa_switch_event;
761 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
764 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
768 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
770 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);