1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Handling of a single switch chip, part of a switch fabric
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 unsigned int ageing_time)
22 dsa_switch_for_each_port(dp, ds)
23 if (dp->ageing_time && dp->ageing_time < ageing_time)
24 ageing_time = dp->ageing_time;
29 static int dsa_switch_ageing_time(struct dsa_switch *ds,
30 struct dsa_notifier_ageing_time_info *info)
32 unsigned int ageing_time = info->ageing_time;
34 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
37 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
40 /* Program the fastest ageing time in case of multiple bridges */
41 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
43 if (ds->ops->set_ageing_time)
44 return ds->ops->set_ageing_time(ds, ageing_time);
49 static bool dsa_port_mtu_match(struct dsa_port *dp,
50 struct dsa_notifier_mtu_info *info)
52 if (dp->ds->index == info->sw_index && dp->index == info->port)
55 /* Do not propagate to other switches in the tree if the notifier was
56 * targeted for a single switch.
58 if (info->targeted_match)
61 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
67 static int dsa_switch_mtu(struct dsa_switch *ds,
68 struct dsa_notifier_mtu_info *info)
73 if (!ds->ops->port_change_mtu)
76 dsa_switch_for_each_port(dp, ds) {
77 if (dsa_port_mtu_match(dp, info)) {
78 ret = ds->ops->port_change_mtu(ds, dp->index,
88 static int dsa_switch_bridge_join(struct dsa_switch *ds,
89 struct dsa_notifier_bridge_info *info)
91 struct dsa_switch_tree *dst = ds->dst;
94 if (dst->index == info->tree_index && ds->index == info->sw_index) {
95 if (!ds->ops->port_bridge_join)
98 err = ds->ops->port_bridge_join(ds, info->port, info->br);
103 if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
104 ds->ops->crosschip_bridge_join) {
105 err = ds->ops->crosschip_bridge_join(ds, info->tree_index,
107 info->port, info->br);
112 return dsa_tag_8021q_bridge_join(ds, info);
115 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
116 struct dsa_notifier_bridge_info *info)
118 struct dsa_switch_tree *dst = ds->dst;
119 struct netlink_ext_ack extack = {0};
120 bool change_vlan_filtering = false;
125 if (dst->index == info->tree_index && ds->index == info->sw_index &&
126 ds->ops->port_bridge_leave)
127 ds->ops->port_bridge_leave(ds, info->port, info->br);
129 if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
130 ds->ops->crosschip_bridge_leave)
131 ds->ops->crosschip_bridge_leave(ds, info->tree_index,
132 info->sw_index, info->port,
135 if (ds->needs_standalone_vlan_filtering && !br_vlan_enabled(info->br)) {
136 change_vlan_filtering = true;
137 vlan_filtering = true;
138 } else if (!ds->needs_standalone_vlan_filtering &&
139 br_vlan_enabled(info->br)) {
140 change_vlan_filtering = true;
141 vlan_filtering = false;
144 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an
145 * event for changing vlan_filtering setting upon slave ports leaving
146 * it. That is a good thing, because that lets us handle it and also
147 * handle the case where the switch's vlan_filtering setting is global
148 * (not per port). When that happens, the correct moment to trigger the
149 * vlan_filtering callback is only when the last port leaves the last
152 if (change_vlan_filtering && ds->vlan_filtering_is_global) {
153 dsa_switch_for_each_port(dp, ds) {
154 struct net_device *bridge_dev;
156 bridge_dev = dp->bridge_dev;
158 if (bridge_dev && br_vlan_enabled(bridge_dev)) {
159 change_vlan_filtering = false;
165 if (change_vlan_filtering) {
166 err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
167 vlan_filtering, &extack);
169 dev_err(ds->dev, "port %d: %s\n", info->port,
171 if (err && err != -EOPNOTSUPP)
175 return dsa_tag_8021q_bridge_leave(ds, info);
178 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
179 * DSA links) that sit between the targeted port on which the notifier was
180 * emitted and its dedicated CPU port.
182 static bool dsa_port_host_address_match(struct dsa_port *dp,
183 int info_sw_index, int info_port)
185 struct dsa_port *targeted_dp, *cpu_dp;
186 struct dsa_switch *targeted_ds;
188 targeted_ds = dsa_switch_find(dp->ds->dst->index, info_sw_index);
189 targeted_dp = dsa_to_port(targeted_ds, info_port);
190 cpu_dp = targeted_dp->cpu_dp;
192 if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
193 return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
199 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
200 const unsigned char *addr,
203 struct dsa_mac_addr *a;
205 list_for_each_entry(a, addr_list, list)
206 if (ether_addr_equal(a->addr, addr) && a->vid == vid)
212 static int dsa_port_do_mdb_add(struct dsa_port *dp,
213 const struct switchdev_obj_port_mdb *mdb)
215 struct dsa_switch *ds = dp->ds;
216 struct dsa_mac_addr *a;
217 int port = dp->index;
220 /* No need to bother with refcounting for user ports */
221 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
222 return ds->ops->port_mdb_add(ds, port, mdb);
224 mutex_lock(&dp->addr_lists_lock);
226 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
228 refcount_inc(&a->refcount);
232 a = kzalloc(sizeof(*a), GFP_KERNEL);
238 err = ds->ops->port_mdb_add(ds, port, mdb);
244 ether_addr_copy(a->addr, mdb->addr);
246 refcount_set(&a->refcount, 1);
247 list_add_tail(&a->list, &dp->mdbs);
250 mutex_unlock(&dp->addr_lists_lock);
255 static int dsa_port_do_mdb_del(struct dsa_port *dp,
256 const struct switchdev_obj_port_mdb *mdb)
258 struct dsa_switch *ds = dp->ds;
259 struct dsa_mac_addr *a;
260 int port = dp->index;
263 /* No need to bother with refcounting for user ports */
264 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
265 return ds->ops->port_mdb_del(ds, port, mdb);
267 mutex_lock(&dp->addr_lists_lock);
269 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
275 if (!refcount_dec_and_test(&a->refcount))
278 err = ds->ops->port_mdb_del(ds, port, mdb);
280 refcount_set(&a->refcount, 1);
288 mutex_unlock(&dp->addr_lists_lock);
293 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
296 struct dsa_switch *ds = dp->ds;
297 struct dsa_mac_addr *a;
298 int port = dp->index;
301 /* No need to bother with refcounting for user ports */
302 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
303 return ds->ops->port_fdb_add(ds, port, addr, vid);
305 mutex_lock(&dp->addr_lists_lock);
307 a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
309 refcount_inc(&a->refcount);
313 a = kzalloc(sizeof(*a), GFP_KERNEL);
319 err = ds->ops->port_fdb_add(ds, port, addr, vid);
325 ether_addr_copy(a->addr, addr);
327 refcount_set(&a->refcount, 1);
328 list_add_tail(&a->list, &dp->fdbs);
331 mutex_unlock(&dp->addr_lists_lock);
336 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
339 struct dsa_switch *ds = dp->ds;
340 struct dsa_mac_addr *a;
341 int port = dp->index;
344 /* No need to bother with refcounting for user ports */
345 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
346 return ds->ops->port_fdb_del(ds, port, addr, vid);
348 mutex_lock(&dp->addr_lists_lock);
350 a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
356 if (!refcount_dec_and_test(&a->refcount))
359 err = ds->ops->port_fdb_del(ds, port, addr, vid);
361 refcount_set(&a->refcount, 1);
369 mutex_unlock(&dp->addr_lists_lock);
374 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
375 struct dsa_notifier_fdb_info *info)
380 if (!ds->ops->port_fdb_add)
383 dsa_switch_for_each_port(dp, ds) {
384 if (dsa_port_host_address_match(dp, info->sw_index,
386 err = dsa_port_do_fdb_add(dp, info->addr, info->vid);
395 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
396 struct dsa_notifier_fdb_info *info)
401 if (!ds->ops->port_fdb_del)
404 dsa_switch_for_each_port(dp, ds) {
405 if (dsa_port_host_address_match(dp, info->sw_index,
407 err = dsa_port_do_fdb_del(dp, info->addr, info->vid);
416 static int dsa_switch_fdb_add(struct dsa_switch *ds,
417 struct dsa_notifier_fdb_info *info)
419 int port = dsa_towards_port(ds, info->sw_index, info->port);
420 struct dsa_port *dp = dsa_to_port(ds, port);
422 if (!ds->ops->port_fdb_add)
425 return dsa_port_do_fdb_add(dp, info->addr, info->vid);
428 static int dsa_switch_fdb_del(struct dsa_switch *ds,
429 struct dsa_notifier_fdb_info *info)
431 int port = dsa_towards_port(ds, info->sw_index, info->port);
432 struct dsa_port *dp = dsa_to_port(ds, port);
434 if (!ds->ops->port_fdb_del)
437 return dsa_port_do_fdb_del(dp, info->addr, info->vid);
440 static int dsa_switch_hsr_join(struct dsa_switch *ds,
441 struct dsa_notifier_hsr_info *info)
443 if (ds->index == info->sw_index && ds->ops->port_hsr_join)
444 return ds->ops->port_hsr_join(ds, info->port, info->hsr);
449 static int dsa_switch_hsr_leave(struct dsa_switch *ds,
450 struct dsa_notifier_hsr_info *info)
452 if (ds->index == info->sw_index && ds->ops->port_hsr_leave)
453 return ds->ops->port_hsr_leave(ds, info->port, info->hsr);
458 static int dsa_switch_lag_change(struct dsa_switch *ds,
459 struct dsa_notifier_lag_info *info)
461 if (ds->index == info->sw_index && ds->ops->port_lag_change)
462 return ds->ops->port_lag_change(ds, info->port);
464 if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
465 return ds->ops->crosschip_lag_change(ds, info->sw_index,
471 static int dsa_switch_lag_join(struct dsa_switch *ds,
472 struct dsa_notifier_lag_info *info)
474 if (ds->index == info->sw_index && ds->ops->port_lag_join)
475 return ds->ops->port_lag_join(ds, info->port, info->lag,
478 if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
479 return ds->ops->crosschip_lag_join(ds, info->sw_index,
480 info->port, info->lag,
486 static int dsa_switch_lag_leave(struct dsa_switch *ds,
487 struct dsa_notifier_lag_info *info)
489 if (ds->index == info->sw_index && ds->ops->port_lag_leave)
490 return ds->ops->port_lag_leave(ds, info->port, info->lag);
492 if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
493 return ds->ops->crosschip_lag_leave(ds, info->sw_index,
494 info->port, info->lag);
499 static int dsa_switch_mdb_add(struct dsa_switch *ds,
500 struct dsa_notifier_mdb_info *info)
502 int port = dsa_towards_port(ds, info->sw_index, info->port);
503 struct dsa_port *dp = dsa_to_port(ds, port);
505 if (!ds->ops->port_mdb_add)
508 return dsa_port_do_mdb_add(dp, info->mdb);
511 static int dsa_switch_mdb_del(struct dsa_switch *ds,
512 struct dsa_notifier_mdb_info *info)
514 int port = dsa_towards_port(ds, info->sw_index, info->port);
515 struct dsa_port *dp = dsa_to_port(ds, port);
517 if (!ds->ops->port_mdb_del)
520 return dsa_port_do_mdb_del(dp, info->mdb);
523 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
524 struct dsa_notifier_mdb_info *info)
529 if (!ds->ops->port_mdb_add)
532 dsa_switch_for_each_port(dp, ds) {
533 if (dsa_port_host_address_match(dp, info->sw_index,
535 err = dsa_port_do_mdb_add(dp, info->mdb);
544 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
545 struct dsa_notifier_mdb_info *info)
550 if (!ds->ops->port_mdb_del)
553 dsa_switch_for_each_port(dp, ds) {
554 if (dsa_port_host_address_match(dp, info->sw_index,
556 err = dsa_port_do_mdb_del(dp, info->mdb);
565 static bool dsa_port_vlan_match(struct dsa_port *dp,
566 struct dsa_notifier_vlan_info *info)
568 if (dp->ds->index == info->sw_index && dp->index == info->port)
571 if (dsa_port_is_dsa(dp))
577 static int dsa_switch_vlan_add(struct dsa_switch *ds,
578 struct dsa_notifier_vlan_info *info)
583 if (!ds->ops->port_vlan_add)
586 dsa_switch_for_each_port(dp, ds) {
587 if (dsa_port_vlan_match(dp, info)) {
588 err = ds->ops->port_vlan_add(ds, dp->index, info->vlan,
598 static int dsa_switch_vlan_del(struct dsa_switch *ds,
599 struct dsa_notifier_vlan_info *info)
601 if (!ds->ops->port_vlan_del)
604 if (ds->index == info->sw_index)
605 return ds->ops->port_vlan_del(ds, info->port, info->vlan);
607 /* Do not deprogram the DSA links as they may be used as conduit
608 * for other VLAN members in the fabric.
613 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
614 struct dsa_notifier_tag_proto_info *info)
616 const struct dsa_device_ops *tag_ops = info->tag_ops;
617 struct dsa_port *dp, *cpu_dp;
620 if (!ds->ops->change_tag_protocol)
625 dsa_switch_for_each_cpu_port(cpu_dp, ds) {
626 err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
631 dsa_port_set_tag_protocol(cpu_dp, tag_ops);
634 /* Now that changing the tag protocol can no longer fail, let's update
635 * the remaining bits which are "duplicated for faster access", and the
636 * bits that depend on the tagger, such as the MTU.
638 dsa_switch_for_each_user_port(dp, ds) {
639 struct net_device *slave = dp->slave;
641 dsa_slave_setup_tagger(slave);
643 /* rtnl_mutex is held in dsa_tree_change_tag_proto */
644 dsa_slave_change_mtu(slave, slave->mtu);
650 static int dsa_switch_mrp_add(struct dsa_switch *ds,
651 struct dsa_notifier_mrp_info *info)
653 if (!ds->ops->port_mrp_add)
656 if (ds->index == info->sw_index)
657 return ds->ops->port_mrp_add(ds, info->port, info->mrp);
662 static int dsa_switch_mrp_del(struct dsa_switch *ds,
663 struct dsa_notifier_mrp_info *info)
665 if (!ds->ops->port_mrp_del)
668 if (ds->index == info->sw_index)
669 return ds->ops->port_mrp_del(ds, info->port, info->mrp);
675 dsa_switch_mrp_add_ring_role(struct dsa_switch *ds,
676 struct dsa_notifier_mrp_ring_role_info *info)
678 if (!ds->ops->port_mrp_add)
681 if (ds->index == info->sw_index)
682 return ds->ops->port_mrp_add_ring_role(ds, info->port,
689 dsa_switch_mrp_del_ring_role(struct dsa_switch *ds,
690 struct dsa_notifier_mrp_ring_role_info *info)
692 if (!ds->ops->port_mrp_del)
695 if (ds->index == info->sw_index)
696 return ds->ops->port_mrp_del_ring_role(ds, info->port,
702 static int dsa_switch_event(struct notifier_block *nb,
703 unsigned long event, void *info)
705 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
709 case DSA_NOTIFIER_AGEING_TIME:
710 err = dsa_switch_ageing_time(ds, info);
712 case DSA_NOTIFIER_BRIDGE_JOIN:
713 err = dsa_switch_bridge_join(ds, info);
715 case DSA_NOTIFIER_BRIDGE_LEAVE:
716 err = dsa_switch_bridge_leave(ds, info);
718 case DSA_NOTIFIER_FDB_ADD:
719 err = dsa_switch_fdb_add(ds, info);
721 case DSA_NOTIFIER_FDB_DEL:
722 err = dsa_switch_fdb_del(ds, info);
724 case DSA_NOTIFIER_HOST_FDB_ADD:
725 err = dsa_switch_host_fdb_add(ds, info);
727 case DSA_NOTIFIER_HOST_FDB_DEL:
728 err = dsa_switch_host_fdb_del(ds, info);
730 case DSA_NOTIFIER_HSR_JOIN:
731 err = dsa_switch_hsr_join(ds, info);
733 case DSA_NOTIFIER_HSR_LEAVE:
734 err = dsa_switch_hsr_leave(ds, info);
736 case DSA_NOTIFIER_LAG_CHANGE:
737 err = dsa_switch_lag_change(ds, info);
739 case DSA_NOTIFIER_LAG_JOIN:
740 err = dsa_switch_lag_join(ds, info);
742 case DSA_NOTIFIER_LAG_LEAVE:
743 err = dsa_switch_lag_leave(ds, info);
745 case DSA_NOTIFIER_MDB_ADD:
746 err = dsa_switch_mdb_add(ds, info);
748 case DSA_NOTIFIER_MDB_DEL:
749 err = dsa_switch_mdb_del(ds, info);
751 case DSA_NOTIFIER_HOST_MDB_ADD:
752 err = dsa_switch_host_mdb_add(ds, info);
754 case DSA_NOTIFIER_HOST_MDB_DEL:
755 err = dsa_switch_host_mdb_del(ds, info);
757 case DSA_NOTIFIER_VLAN_ADD:
758 err = dsa_switch_vlan_add(ds, info);
760 case DSA_NOTIFIER_VLAN_DEL:
761 err = dsa_switch_vlan_del(ds, info);
763 case DSA_NOTIFIER_MTU:
764 err = dsa_switch_mtu(ds, info);
766 case DSA_NOTIFIER_TAG_PROTO:
767 err = dsa_switch_change_tag_proto(ds, info);
769 case DSA_NOTIFIER_MRP_ADD:
770 err = dsa_switch_mrp_add(ds, info);
772 case DSA_NOTIFIER_MRP_DEL:
773 err = dsa_switch_mrp_del(ds, info);
775 case DSA_NOTIFIER_MRP_ADD_RING_ROLE:
776 err = dsa_switch_mrp_add_ring_role(ds, info);
778 case DSA_NOTIFIER_MRP_DEL_RING_ROLE:
779 err = dsa_switch_mrp_del_ring_role(ds, info);
781 case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
782 err = dsa_switch_tag_8021q_vlan_add(ds, info);
784 case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
785 err = dsa_switch_tag_8021q_vlan_del(ds, info);
793 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
796 return notifier_from_errno(err);
799 int dsa_switch_register_notifier(struct dsa_switch *ds)
801 ds->nb.notifier_call = dsa_switch_event;
803 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
806 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
810 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
812 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);