1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Handling of a single switch chip, part of a switch fabric
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
20 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
21 unsigned int ageing_time)
25 dsa_switch_for_each_port(dp, ds)
26 if (dp->ageing_time && dp->ageing_time < ageing_time)
27 ageing_time = dp->ageing_time;
32 static int dsa_switch_ageing_time(struct dsa_switch *ds,
33 struct dsa_notifier_ageing_time_info *info)
35 unsigned int ageing_time = info->ageing_time;
37 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
40 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
43 /* Program the fastest ageing time in case of multiple bridges */
44 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
46 if (ds->ops->set_ageing_time)
47 return ds->ops->set_ageing_time(ds, ageing_time);
52 static bool dsa_port_mtu_match(struct dsa_port *dp,
53 struct dsa_notifier_mtu_info *info)
55 return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp);
58 static int dsa_switch_mtu(struct dsa_switch *ds,
59 struct dsa_notifier_mtu_info *info)
64 if (!ds->ops->port_change_mtu)
67 dsa_switch_for_each_port(dp, ds) {
68 if (dsa_port_mtu_match(dp, info)) {
69 ret = ds->ops->port_change_mtu(ds, dp->index,
79 static int dsa_switch_bridge_join(struct dsa_switch *ds,
80 struct dsa_notifier_bridge_info *info)
84 if (info->dp->ds == ds) {
85 if (!ds->ops->port_bridge_join)
88 err = ds->ops->port_bridge_join(ds, info->dp->index,
90 &info->tx_fwd_offload,
96 if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) {
97 err = ds->ops->crosschip_bridge_join(ds,
98 info->dp->ds->dst->index,
110 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
111 struct dsa_notifier_bridge_info *info)
113 if (info->dp->ds == ds && ds->ops->port_bridge_leave)
114 ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge);
116 if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave)
117 ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index,
125 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
126 * DSA links) that sit between the targeted port on which the notifier was
127 * emitted and its dedicated CPU port.
129 static bool dsa_port_host_address_match(struct dsa_port *dp,
130 const struct dsa_port *targeted_dp)
132 struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
134 if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
135 return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
141 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
142 const unsigned char *addr, u16 vid,
145 struct dsa_mac_addr *a;
147 list_for_each_entry(a, addr_list, list)
148 if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
149 dsa_db_equal(&a->db, &db))
155 static int dsa_port_do_mdb_add(struct dsa_port *dp,
156 const struct switchdev_obj_port_mdb *mdb,
159 struct dsa_switch *ds = dp->ds;
160 struct dsa_mac_addr *a;
161 int port = dp->index;
164 /* No need to bother with refcounting for user ports */
165 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
166 return ds->ops->port_mdb_add(ds, port, mdb, db);
168 mutex_lock(&dp->addr_lists_lock);
170 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
172 refcount_inc(&a->refcount);
176 a = kzalloc(sizeof(*a), GFP_KERNEL);
182 err = ds->ops->port_mdb_add(ds, port, mdb, db);
188 ether_addr_copy(a->addr, mdb->addr);
191 refcount_set(&a->refcount, 1);
192 list_add_tail(&a->list, &dp->mdbs);
195 mutex_unlock(&dp->addr_lists_lock);
200 static int dsa_port_do_mdb_del(struct dsa_port *dp,
201 const struct switchdev_obj_port_mdb *mdb,
204 struct dsa_switch *ds = dp->ds;
205 struct dsa_mac_addr *a;
206 int port = dp->index;
209 /* No need to bother with refcounting for user ports */
210 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
211 return ds->ops->port_mdb_del(ds, port, mdb, db);
213 mutex_lock(&dp->addr_lists_lock);
215 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
221 if (!refcount_dec_and_test(&a->refcount))
224 err = ds->ops->port_mdb_del(ds, port, mdb, db);
226 refcount_set(&a->refcount, 1);
234 mutex_unlock(&dp->addr_lists_lock);
239 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
240 u16 vid, struct dsa_db db)
242 struct dsa_switch *ds = dp->ds;
243 struct dsa_mac_addr *a;
244 int port = dp->index;
247 /* No need to bother with refcounting for user ports */
248 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
249 return ds->ops->port_fdb_add(ds, port, addr, vid, db);
251 mutex_lock(&dp->addr_lists_lock);
253 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
255 refcount_inc(&a->refcount);
259 a = kzalloc(sizeof(*a), GFP_KERNEL);
265 err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
271 ether_addr_copy(a->addr, addr);
274 refcount_set(&a->refcount, 1);
275 list_add_tail(&a->list, &dp->fdbs);
278 mutex_unlock(&dp->addr_lists_lock);
283 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
284 u16 vid, struct dsa_db db)
286 struct dsa_switch *ds = dp->ds;
287 struct dsa_mac_addr *a;
288 int port = dp->index;
291 /* No need to bother with refcounting for user ports */
292 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
293 return ds->ops->port_fdb_del(ds, port, addr, vid, db);
295 mutex_lock(&dp->addr_lists_lock);
297 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
303 if (!refcount_dec_and_test(&a->refcount))
306 err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
308 refcount_set(&a->refcount, 1);
316 mutex_unlock(&dp->addr_lists_lock);
321 static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
322 const unsigned char *addr, u16 vid,
325 struct dsa_mac_addr *a;
328 mutex_lock(&lag->fdb_lock);
330 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
332 refcount_inc(&a->refcount);
336 a = kzalloc(sizeof(*a), GFP_KERNEL);
342 err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
348 ether_addr_copy(a->addr, addr);
351 refcount_set(&a->refcount, 1);
352 list_add_tail(&a->list, &lag->fdbs);
355 mutex_unlock(&lag->fdb_lock);
360 static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
361 const unsigned char *addr, u16 vid,
364 struct dsa_mac_addr *a;
367 mutex_lock(&lag->fdb_lock);
369 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
375 if (!refcount_dec_and_test(&a->refcount))
378 err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
380 refcount_set(&a->refcount, 1);
388 mutex_unlock(&lag->fdb_lock);
393 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
394 struct dsa_notifier_fdb_info *info)
399 if (!ds->ops->port_fdb_add)
402 dsa_switch_for_each_port(dp, ds) {
403 if (dsa_port_host_address_match(dp, info->dp)) {
404 if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
405 err = dsa_switch_do_lag_fdb_add(ds, dp->lag,
410 err = dsa_port_do_fdb_add(dp, info->addr,
411 info->vid, info->db);
421 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
422 struct dsa_notifier_fdb_info *info)
427 if (!ds->ops->port_fdb_del)
430 dsa_switch_for_each_port(dp, ds) {
431 if (dsa_port_host_address_match(dp, info->dp)) {
432 if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
433 err = dsa_switch_do_lag_fdb_del(ds, dp->lag,
438 err = dsa_port_do_fdb_del(dp, info->addr,
439 info->vid, info->db);
449 static int dsa_switch_fdb_add(struct dsa_switch *ds,
450 struct dsa_notifier_fdb_info *info)
452 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
453 struct dsa_port *dp = dsa_to_port(ds, port);
455 if (!ds->ops->port_fdb_add)
458 return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
461 static int dsa_switch_fdb_del(struct dsa_switch *ds,
462 struct dsa_notifier_fdb_info *info)
464 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
465 struct dsa_port *dp = dsa_to_port(ds, port);
467 if (!ds->ops->port_fdb_del)
470 return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
473 static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
474 struct dsa_notifier_lag_fdb_info *info)
478 if (!ds->ops->lag_fdb_add)
481 /* Notify switch only if it has a port in this LAG */
482 dsa_switch_for_each_port(dp, ds)
483 if (dsa_port_offloads_lag(dp, info->lag))
484 return dsa_switch_do_lag_fdb_add(ds, info->lag,
485 info->addr, info->vid,
491 static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
492 struct dsa_notifier_lag_fdb_info *info)
496 if (!ds->ops->lag_fdb_del)
499 /* Notify switch only if it has a port in this LAG */
500 dsa_switch_for_each_port(dp, ds)
501 if (dsa_port_offloads_lag(dp, info->lag))
502 return dsa_switch_do_lag_fdb_del(ds, info->lag,
503 info->addr, info->vid,
509 static int dsa_switch_lag_change(struct dsa_switch *ds,
510 struct dsa_notifier_lag_info *info)
512 if (info->dp->ds == ds && ds->ops->port_lag_change)
513 return ds->ops->port_lag_change(ds, info->dp->index);
515 if (info->dp->ds != ds && ds->ops->crosschip_lag_change)
516 return ds->ops->crosschip_lag_change(ds, info->dp->ds->index,
522 static int dsa_switch_lag_join(struct dsa_switch *ds,
523 struct dsa_notifier_lag_info *info)
525 if (info->dp->ds == ds && ds->ops->port_lag_join)
526 return ds->ops->port_lag_join(ds, info->dp->index, info->lag,
527 info->info, info->extack);
529 if (info->dp->ds != ds && ds->ops->crosschip_lag_join)
530 return ds->ops->crosschip_lag_join(ds, info->dp->ds->index,
531 info->dp->index, info->lag,
532 info->info, info->extack);
537 static int dsa_switch_lag_leave(struct dsa_switch *ds,
538 struct dsa_notifier_lag_info *info)
540 if (info->dp->ds == ds && ds->ops->port_lag_leave)
541 return ds->ops->port_lag_leave(ds, info->dp->index, info->lag);
543 if (info->dp->ds != ds && ds->ops->crosschip_lag_leave)
544 return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index,
545 info->dp->index, info->lag);
550 static int dsa_switch_mdb_add(struct dsa_switch *ds,
551 struct dsa_notifier_mdb_info *info)
553 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
554 struct dsa_port *dp = dsa_to_port(ds, port);
556 if (!ds->ops->port_mdb_add)
559 return dsa_port_do_mdb_add(dp, info->mdb, info->db);
562 static int dsa_switch_mdb_del(struct dsa_switch *ds,
563 struct dsa_notifier_mdb_info *info)
565 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
566 struct dsa_port *dp = dsa_to_port(ds, port);
568 if (!ds->ops->port_mdb_del)
571 return dsa_port_do_mdb_del(dp, info->mdb, info->db);
574 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
575 struct dsa_notifier_mdb_info *info)
580 if (!ds->ops->port_mdb_add)
583 dsa_switch_for_each_port(dp, ds) {
584 if (dsa_port_host_address_match(dp, info->dp)) {
585 err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
594 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
595 struct dsa_notifier_mdb_info *info)
600 if (!ds->ops->port_mdb_del)
603 dsa_switch_for_each_port(dp, ds) {
604 if (dsa_port_host_address_match(dp, info->dp)) {
605 err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
614 /* Port VLANs match on the targeted port and on all DSA ports */
615 static bool dsa_port_vlan_match(struct dsa_port *dp,
616 struct dsa_notifier_vlan_info *info)
618 return dsa_port_is_dsa(dp) || dp == info->dp;
621 /* Host VLANs match on the targeted port's CPU port, and on all DSA ports
622 * (upstream and downstream) of that switch and its upstream switches.
624 static bool dsa_port_host_vlan_match(struct dsa_port *dp,
625 const struct dsa_port *targeted_dp)
627 struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
629 if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
630 return dsa_port_is_dsa(dp) || dp == cpu_dp;
635 static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
636 const struct switchdev_obj_port_vlan *vlan)
640 list_for_each_entry(v, vlan_list, list)
641 if (v->vid == vlan->vid)
647 static int dsa_port_do_vlan_add(struct dsa_port *dp,
648 const struct switchdev_obj_port_vlan *vlan,
649 struct netlink_ext_ack *extack)
651 struct dsa_switch *ds = dp->ds;
652 int port = dp->index;
656 /* No need to bother with refcounting for user ports. */
657 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
658 return ds->ops->port_vlan_add(ds, port, vlan, extack);
660 /* No need to propagate on shared ports the existing VLANs that were
661 * re-notified after just the flags have changed. This would cause a
662 * refcount bump which we need to avoid, since it unbalances the
663 * additions with the deletions.
668 mutex_lock(&dp->vlans_lock);
670 v = dsa_vlan_find(&dp->vlans, vlan);
672 refcount_inc(&v->refcount);
676 v = kzalloc(sizeof(*v), GFP_KERNEL);
682 err = ds->ops->port_vlan_add(ds, port, vlan, extack);
689 refcount_set(&v->refcount, 1);
690 list_add_tail(&v->list, &dp->vlans);
693 mutex_unlock(&dp->vlans_lock);
698 static int dsa_port_do_vlan_del(struct dsa_port *dp,
699 const struct switchdev_obj_port_vlan *vlan)
701 struct dsa_switch *ds = dp->ds;
702 int port = dp->index;
706 /* No need to bother with refcounting for user ports */
707 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
708 return ds->ops->port_vlan_del(ds, port, vlan);
710 mutex_lock(&dp->vlans_lock);
712 v = dsa_vlan_find(&dp->vlans, vlan);
718 if (!refcount_dec_and_test(&v->refcount))
721 err = ds->ops->port_vlan_del(ds, port, vlan);
723 refcount_set(&v->refcount, 1);
731 mutex_unlock(&dp->vlans_lock);
736 static int dsa_switch_vlan_add(struct dsa_switch *ds,
737 struct dsa_notifier_vlan_info *info)
742 if (!ds->ops->port_vlan_add)
745 dsa_switch_for_each_port(dp, ds) {
746 if (dsa_port_vlan_match(dp, info)) {
747 err = dsa_port_do_vlan_add(dp, info->vlan,
757 static int dsa_switch_vlan_del(struct dsa_switch *ds,
758 struct dsa_notifier_vlan_info *info)
763 if (!ds->ops->port_vlan_del)
766 dsa_switch_for_each_port(dp, ds) {
767 if (dsa_port_vlan_match(dp, info)) {
768 err = dsa_port_do_vlan_del(dp, info->vlan);
777 static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
778 struct dsa_notifier_vlan_info *info)
783 if (!ds->ops->port_vlan_add)
786 dsa_switch_for_each_port(dp, ds) {
787 if (dsa_port_host_vlan_match(dp, info->dp)) {
788 err = dsa_port_do_vlan_add(dp, info->vlan,
798 static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
799 struct dsa_notifier_vlan_info *info)
804 if (!ds->ops->port_vlan_del)
807 dsa_switch_for_each_port(dp, ds) {
808 if (dsa_port_host_vlan_match(dp, info->dp)) {
809 err = dsa_port_do_vlan_del(dp, info->vlan);
818 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
819 struct dsa_notifier_tag_proto_info *info)
821 const struct dsa_device_ops *tag_ops = info->tag_ops;
822 struct dsa_port *dp, *cpu_dp;
825 if (!ds->ops->change_tag_protocol)
830 err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
834 dsa_switch_for_each_cpu_port(cpu_dp, ds)
835 dsa_port_set_tag_protocol(cpu_dp, tag_ops);
837 /* Now that changing the tag protocol can no longer fail, let's update
838 * the remaining bits which are "duplicated for faster access", and the
839 * bits that depend on the tagger, such as the MTU.
841 dsa_switch_for_each_user_port(dp, ds) {
842 struct net_device *slave = dp->slave;
844 dsa_slave_setup_tagger(slave);
846 /* rtnl_mutex is held in dsa_tree_change_tag_proto */
847 dsa_slave_change_mtu(slave, slave->mtu);
853 /* We use the same cross-chip notifiers to inform both the tagger side, as well
854 * as the switch side, of connection and disconnection events.
855 * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
856 * switch side doesn't support connecting to this tagger, and therefore, the
857 * fact that we don't disconnect the tagger side doesn't constitute a memory
858 * leak: the tagger will still operate with persistent per-switch memory, just
859 * with the switch side unconnected to it. What does constitute a hard error is
860 * when the switch side supports connecting but fails.
863 dsa_switch_connect_tag_proto(struct dsa_switch *ds,
864 struct dsa_notifier_tag_proto_info *info)
866 const struct dsa_device_ops *tag_ops = info->tag_ops;
869 /* Notify the new tagger about the connection to this switch */
870 if (tag_ops->connect) {
871 err = tag_ops->connect(ds);
876 if (!ds->ops->connect_tag_protocol)
879 /* Notify the switch about the connection to the new tagger */
880 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
882 /* Revert the new tagger's connection to this tree */
883 if (tag_ops->disconnect)
884 tag_ops->disconnect(ds);
892 dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
893 struct dsa_notifier_tag_proto_info *info)
895 const struct dsa_device_ops *tag_ops = info->tag_ops;
897 /* Notify the tagger about the disconnection from this switch */
898 if (tag_ops->disconnect && ds->tagger_data)
899 tag_ops->disconnect(ds);
901 /* No need to notify the switch, since it shouldn't have any
902 * resources to tear down
908 dsa_switch_master_state_change(struct dsa_switch *ds,
909 struct dsa_notifier_master_state_info *info)
911 if (!ds->ops->master_state_change)
914 ds->ops->master_state_change(ds, info->master, info->operational);
919 static int dsa_switch_event(struct notifier_block *nb,
920 unsigned long event, void *info)
922 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
926 case DSA_NOTIFIER_AGEING_TIME:
927 err = dsa_switch_ageing_time(ds, info);
929 case DSA_NOTIFIER_BRIDGE_JOIN:
930 err = dsa_switch_bridge_join(ds, info);
932 case DSA_NOTIFIER_BRIDGE_LEAVE:
933 err = dsa_switch_bridge_leave(ds, info);
935 case DSA_NOTIFIER_FDB_ADD:
936 err = dsa_switch_fdb_add(ds, info);
938 case DSA_NOTIFIER_FDB_DEL:
939 err = dsa_switch_fdb_del(ds, info);
941 case DSA_NOTIFIER_HOST_FDB_ADD:
942 err = dsa_switch_host_fdb_add(ds, info);
944 case DSA_NOTIFIER_HOST_FDB_DEL:
945 err = dsa_switch_host_fdb_del(ds, info);
947 case DSA_NOTIFIER_LAG_FDB_ADD:
948 err = dsa_switch_lag_fdb_add(ds, info);
950 case DSA_NOTIFIER_LAG_FDB_DEL:
951 err = dsa_switch_lag_fdb_del(ds, info);
953 case DSA_NOTIFIER_LAG_CHANGE:
954 err = dsa_switch_lag_change(ds, info);
956 case DSA_NOTIFIER_LAG_JOIN:
957 err = dsa_switch_lag_join(ds, info);
959 case DSA_NOTIFIER_LAG_LEAVE:
960 err = dsa_switch_lag_leave(ds, info);
962 case DSA_NOTIFIER_MDB_ADD:
963 err = dsa_switch_mdb_add(ds, info);
965 case DSA_NOTIFIER_MDB_DEL:
966 err = dsa_switch_mdb_del(ds, info);
968 case DSA_NOTIFIER_HOST_MDB_ADD:
969 err = dsa_switch_host_mdb_add(ds, info);
971 case DSA_NOTIFIER_HOST_MDB_DEL:
972 err = dsa_switch_host_mdb_del(ds, info);
974 case DSA_NOTIFIER_VLAN_ADD:
975 err = dsa_switch_vlan_add(ds, info);
977 case DSA_NOTIFIER_VLAN_DEL:
978 err = dsa_switch_vlan_del(ds, info);
980 case DSA_NOTIFIER_HOST_VLAN_ADD:
981 err = dsa_switch_host_vlan_add(ds, info);
983 case DSA_NOTIFIER_HOST_VLAN_DEL:
984 err = dsa_switch_host_vlan_del(ds, info);
986 case DSA_NOTIFIER_MTU:
987 err = dsa_switch_mtu(ds, info);
989 case DSA_NOTIFIER_TAG_PROTO:
990 err = dsa_switch_change_tag_proto(ds, info);
992 case DSA_NOTIFIER_TAG_PROTO_CONNECT:
993 err = dsa_switch_connect_tag_proto(ds, info);
995 case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
996 err = dsa_switch_disconnect_tag_proto(ds, info);
998 case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
999 err = dsa_switch_tag_8021q_vlan_add(ds, info);
1001 case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
1002 err = dsa_switch_tag_8021q_vlan_del(ds, info);
1004 case DSA_NOTIFIER_MASTER_STATE_CHANGE:
1005 err = dsa_switch_master_state_change(ds, info);
1013 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
1016 return notifier_from_errno(err);
1020 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
1021 * @dst: collection of struct dsa_switch devices to notify.
1022 * @e: event, must be of type DSA_NOTIFIER_*
1023 * @v: event-specific value.
1025 * Given a struct dsa_switch_tree, this can be used to run a function once for
1026 * each member DSA switch. The other alternative of traversing the tree is only
1027 * through its ports list, which does not uniquely list the switches.
1029 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
1031 struct raw_notifier_head *nh = &dst->nh;
1034 err = raw_notifier_call_chain(nh, e, v);
1036 return notifier_to_errno(err);
1040 * dsa_broadcast - Notify all DSA trees in the system.
1041 * @e: event, must be of type DSA_NOTIFIER_*
1042 * @v: event-specific value.
1044 * Can be used to notify the switching fabric of events such as cross-chip
1045 * bridging between disjoint trees (such as islands of tagger-compatible
1046 * switches bridged by an incompatible middle switch).
1048 * WARNING: this function is not reliable during probe time, because probing
1049 * between trees is asynchronous and not all DSA trees might have probed.
1051 int dsa_broadcast(unsigned long e, void *v)
1053 struct dsa_switch_tree *dst;
1056 list_for_each_entry(dst, &dsa_tree_list, list) {
1057 err = dsa_tree_notify(dst, e, v);
1065 int dsa_switch_register_notifier(struct dsa_switch *ds)
1067 ds->nb.notifier_call = dsa_switch_event;
1069 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
1072 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
1076 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
1078 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);