1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Handling of a single switch chip, part of a switch fabric
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
21 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
22 unsigned int ageing_time)
26 dsa_switch_for_each_port(dp, ds)
27 if (dp->ageing_time && dp->ageing_time < ageing_time)
28 ageing_time = dp->ageing_time;
33 static int dsa_switch_ageing_time(struct dsa_switch *ds,
34 struct dsa_notifier_ageing_time_info *info)
36 unsigned int ageing_time = info->ageing_time;
38 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
41 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
44 /* Program the fastest ageing time in case of multiple bridges */
45 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
47 if (ds->ops->set_ageing_time)
48 return ds->ops->set_ageing_time(ds, ageing_time);
53 static bool dsa_port_mtu_match(struct dsa_port *dp,
54 struct dsa_notifier_mtu_info *info)
56 return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp);
59 static int dsa_switch_mtu(struct dsa_switch *ds,
60 struct dsa_notifier_mtu_info *info)
65 if (!ds->ops->port_change_mtu)
68 dsa_switch_for_each_port(dp, ds) {
69 if (dsa_port_mtu_match(dp, info)) {
70 ret = ds->ops->port_change_mtu(ds, dp->index,
80 static int dsa_switch_bridge_join(struct dsa_switch *ds,
81 struct dsa_notifier_bridge_info *info)
85 if (info->dp->ds == ds) {
86 if (!ds->ops->port_bridge_join)
89 err = ds->ops->port_bridge_join(ds, info->dp->index,
91 &info->tx_fwd_offload,
97 if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) {
98 err = ds->ops->crosschip_bridge_join(ds,
99 info->dp->ds->dst->index,
111 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
112 struct dsa_notifier_bridge_info *info)
114 if (info->dp->ds == ds && ds->ops->port_bridge_leave)
115 ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge);
117 if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave)
118 ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index,
126 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
127 * DSA links) that sit between the targeted port on which the notifier was
128 * emitted and its dedicated CPU port.
130 static bool dsa_port_host_address_match(struct dsa_port *dp,
131 const struct dsa_port *targeted_dp)
133 struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
135 if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
136 return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
142 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
143 const unsigned char *addr, u16 vid,
146 struct dsa_mac_addr *a;
148 list_for_each_entry(a, addr_list, list)
149 if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
150 dsa_db_equal(&a->db, &db))
156 static int dsa_port_do_mdb_add(struct dsa_port *dp,
157 const struct switchdev_obj_port_mdb *mdb,
160 struct dsa_switch *ds = dp->ds;
161 struct dsa_mac_addr *a;
162 int port = dp->index;
165 /* No need to bother with refcounting for user ports */
166 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
167 return ds->ops->port_mdb_add(ds, port, mdb, db);
169 mutex_lock(&dp->addr_lists_lock);
171 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
173 refcount_inc(&a->refcount);
177 a = kzalloc(sizeof(*a), GFP_KERNEL);
183 err = ds->ops->port_mdb_add(ds, port, mdb, db);
189 ether_addr_copy(a->addr, mdb->addr);
192 refcount_set(&a->refcount, 1);
193 list_add_tail(&a->list, &dp->mdbs);
196 mutex_unlock(&dp->addr_lists_lock);
201 static int dsa_port_do_mdb_del(struct dsa_port *dp,
202 const struct switchdev_obj_port_mdb *mdb,
205 struct dsa_switch *ds = dp->ds;
206 struct dsa_mac_addr *a;
207 int port = dp->index;
210 /* No need to bother with refcounting for user ports */
211 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
212 return ds->ops->port_mdb_del(ds, port, mdb, db);
214 mutex_lock(&dp->addr_lists_lock);
216 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
222 if (!refcount_dec_and_test(&a->refcount))
225 err = ds->ops->port_mdb_del(ds, port, mdb, db);
227 refcount_set(&a->refcount, 1);
235 mutex_unlock(&dp->addr_lists_lock);
240 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
241 u16 vid, struct dsa_db db)
243 struct dsa_switch *ds = dp->ds;
244 struct dsa_mac_addr *a;
245 int port = dp->index;
248 /* No need to bother with refcounting for user ports */
249 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
250 return ds->ops->port_fdb_add(ds, port, addr, vid, db);
252 mutex_lock(&dp->addr_lists_lock);
254 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
256 refcount_inc(&a->refcount);
260 a = kzalloc(sizeof(*a), GFP_KERNEL);
266 err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
272 ether_addr_copy(a->addr, addr);
275 refcount_set(&a->refcount, 1);
276 list_add_tail(&a->list, &dp->fdbs);
279 mutex_unlock(&dp->addr_lists_lock);
284 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
285 u16 vid, struct dsa_db db)
287 struct dsa_switch *ds = dp->ds;
288 struct dsa_mac_addr *a;
289 int port = dp->index;
292 /* No need to bother with refcounting for user ports */
293 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
294 return ds->ops->port_fdb_del(ds, port, addr, vid, db);
296 mutex_lock(&dp->addr_lists_lock);
298 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
304 if (!refcount_dec_and_test(&a->refcount))
307 err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
309 refcount_set(&a->refcount, 1);
317 mutex_unlock(&dp->addr_lists_lock);
322 static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
323 const unsigned char *addr, u16 vid,
326 struct dsa_mac_addr *a;
329 mutex_lock(&lag->fdb_lock);
331 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
333 refcount_inc(&a->refcount);
337 a = kzalloc(sizeof(*a), GFP_KERNEL);
343 err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
349 ether_addr_copy(a->addr, addr);
352 refcount_set(&a->refcount, 1);
353 list_add_tail(&a->list, &lag->fdbs);
356 mutex_unlock(&lag->fdb_lock);
361 static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
362 const unsigned char *addr, u16 vid,
365 struct dsa_mac_addr *a;
368 mutex_lock(&lag->fdb_lock);
370 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
376 if (!refcount_dec_and_test(&a->refcount))
379 err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
381 refcount_set(&a->refcount, 1);
389 mutex_unlock(&lag->fdb_lock);
394 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
395 struct dsa_notifier_fdb_info *info)
400 if (!ds->ops->port_fdb_add)
403 dsa_switch_for_each_port(dp, ds) {
404 if (dsa_port_host_address_match(dp, info->dp)) {
405 if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
406 err = dsa_switch_do_lag_fdb_add(ds, dp->lag,
411 err = dsa_port_do_fdb_add(dp, info->addr,
412 info->vid, info->db);
422 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
423 struct dsa_notifier_fdb_info *info)
428 if (!ds->ops->port_fdb_del)
431 dsa_switch_for_each_port(dp, ds) {
432 if (dsa_port_host_address_match(dp, info->dp)) {
433 if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
434 err = dsa_switch_do_lag_fdb_del(ds, dp->lag,
439 err = dsa_port_do_fdb_del(dp, info->addr,
440 info->vid, info->db);
450 static int dsa_switch_fdb_add(struct dsa_switch *ds,
451 struct dsa_notifier_fdb_info *info)
453 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
454 struct dsa_port *dp = dsa_to_port(ds, port);
456 if (!ds->ops->port_fdb_add)
459 return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
462 static int dsa_switch_fdb_del(struct dsa_switch *ds,
463 struct dsa_notifier_fdb_info *info)
465 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
466 struct dsa_port *dp = dsa_to_port(ds, port);
468 if (!ds->ops->port_fdb_del)
471 return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
474 static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
475 struct dsa_notifier_lag_fdb_info *info)
479 if (!ds->ops->lag_fdb_add)
482 /* Notify switch only if it has a port in this LAG */
483 dsa_switch_for_each_port(dp, ds)
484 if (dsa_port_offloads_lag(dp, info->lag))
485 return dsa_switch_do_lag_fdb_add(ds, info->lag,
486 info->addr, info->vid,
492 static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
493 struct dsa_notifier_lag_fdb_info *info)
497 if (!ds->ops->lag_fdb_del)
500 /* Notify switch only if it has a port in this LAG */
501 dsa_switch_for_each_port(dp, ds)
502 if (dsa_port_offloads_lag(dp, info->lag))
503 return dsa_switch_do_lag_fdb_del(ds, info->lag,
504 info->addr, info->vid,
510 static int dsa_switch_lag_change(struct dsa_switch *ds,
511 struct dsa_notifier_lag_info *info)
513 if (info->dp->ds == ds && ds->ops->port_lag_change)
514 return ds->ops->port_lag_change(ds, info->dp->index);
516 if (info->dp->ds != ds && ds->ops->crosschip_lag_change)
517 return ds->ops->crosschip_lag_change(ds, info->dp->ds->index,
523 static int dsa_switch_lag_join(struct dsa_switch *ds,
524 struct dsa_notifier_lag_info *info)
526 if (info->dp->ds == ds && ds->ops->port_lag_join)
527 return ds->ops->port_lag_join(ds, info->dp->index, info->lag,
528 info->info, info->extack);
530 if (info->dp->ds != ds && ds->ops->crosschip_lag_join)
531 return ds->ops->crosschip_lag_join(ds, info->dp->ds->index,
532 info->dp->index, info->lag,
533 info->info, info->extack);
538 static int dsa_switch_lag_leave(struct dsa_switch *ds,
539 struct dsa_notifier_lag_info *info)
541 if (info->dp->ds == ds && ds->ops->port_lag_leave)
542 return ds->ops->port_lag_leave(ds, info->dp->index, info->lag);
544 if (info->dp->ds != ds && ds->ops->crosschip_lag_leave)
545 return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index,
546 info->dp->index, info->lag);
551 static int dsa_switch_mdb_add(struct dsa_switch *ds,
552 struct dsa_notifier_mdb_info *info)
554 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
555 struct dsa_port *dp = dsa_to_port(ds, port);
557 if (!ds->ops->port_mdb_add)
560 return dsa_port_do_mdb_add(dp, info->mdb, info->db);
563 static int dsa_switch_mdb_del(struct dsa_switch *ds,
564 struct dsa_notifier_mdb_info *info)
566 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
567 struct dsa_port *dp = dsa_to_port(ds, port);
569 if (!ds->ops->port_mdb_del)
572 return dsa_port_do_mdb_del(dp, info->mdb, info->db);
575 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
576 struct dsa_notifier_mdb_info *info)
581 if (!ds->ops->port_mdb_add)
584 dsa_switch_for_each_port(dp, ds) {
585 if (dsa_port_host_address_match(dp, info->dp)) {
586 err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
595 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
596 struct dsa_notifier_mdb_info *info)
601 if (!ds->ops->port_mdb_del)
604 dsa_switch_for_each_port(dp, ds) {
605 if (dsa_port_host_address_match(dp, info->dp)) {
606 err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
615 /* Port VLANs match on the targeted port and on all DSA ports */
616 static bool dsa_port_vlan_match(struct dsa_port *dp,
617 struct dsa_notifier_vlan_info *info)
619 return dsa_port_is_dsa(dp) || dp == info->dp;
622 /* Host VLANs match on the targeted port's CPU port, and on all DSA ports
623 * (upstream and downstream) of that switch and its upstream switches.
625 static bool dsa_port_host_vlan_match(struct dsa_port *dp,
626 const struct dsa_port *targeted_dp)
628 struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
630 if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
631 return dsa_port_is_dsa(dp) || dp == cpu_dp;
636 static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
637 const struct switchdev_obj_port_vlan *vlan)
641 list_for_each_entry(v, vlan_list, list)
642 if (v->vid == vlan->vid)
648 static int dsa_port_do_vlan_add(struct dsa_port *dp,
649 const struct switchdev_obj_port_vlan *vlan,
650 struct netlink_ext_ack *extack)
652 struct dsa_switch *ds = dp->ds;
653 int port = dp->index;
657 /* No need to bother with refcounting for user ports. */
658 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
659 return ds->ops->port_vlan_add(ds, port, vlan, extack);
661 /* No need to propagate on shared ports the existing VLANs that were
662 * re-notified after just the flags have changed. This would cause a
663 * refcount bump which we need to avoid, since it unbalances the
664 * additions with the deletions.
669 mutex_lock(&dp->vlans_lock);
671 v = dsa_vlan_find(&dp->vlans, vlan);
673 refcount_inc(&v->refcount);
677 v = kzalloc(sizeof(*v), GFP_KERNEL);
683 err = ds->ops->port_vlan_add(ds, port, vlan, extack);
690 refcount_set(&v->refcount, 1);
691 list_add_tail(&v->list, &dp->vlans);
694 mutex_unlock(&dp->vlans_lock);
699 static int dsa_port_do_vlan_del(struct dsa_port *dp,
700 const struct switchdev_obj_port_vlan *vlan)
702 struct dsa_switch *ds = dp->ds;
703 int port = dp->index;
707 /* No need to bother with refcounting for user ports */
708 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
709 return ds->ops->port_vlan_del(ds, port, vlan);
711 mutex_lock(&dp->vlans_lock);
713 v = dsa_vlan_find(&dp->vlans, vlan);
719 if (!refcount_dec_and_test(&v->refcount))
722 err = ds->ops->port_vlan_del(ds, port, vlan);
724 refcount_set(&v->refcount, 1);
732 mutex_unlock(&dp->vlans_lock);
737 static int dsa_switch_vlan_add(struct dsa_switch *ds,
738 struct dsa_notifier_vlan_info *info)
743 if (!ds->ops->port_vlan_add)
746 dsa_switch_for_each_port(dp, ds) {
747 if (dsa_port_vlan_match(dp, info)) {
748 err = dsa_port_do_vlan_add(dp, info->vlan,
758 static int dsa_switch_vlan_del(struct dsa_switch *ds,
759 struct dsa_notifier_vlan_info *info)
764 if (!ds->ops->port_vlan_del)
767 dsa_switch_for_each_port(dp, ds) {
768 if (dsa_port_vlan_match(dp, info)) {
769 err = dsa_port_do_vlan_del(dp, info->vlan);
778 static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
779 struct dsa_notifier_vlan_info *info)
784 if (!ds->ops->port_vlan_add)
787 dsa_switch_for_each_port(dp, ds) {
788 if (dsa_port_host_vlan_match(dp, info->dp)) {
789 err = dsa_port_do_vlan_add(dp, info->vlan,
799 static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
800 struct dsa_notifier_vlan_info *info)
805 if (!ds->ops->port_vlan_del)
808 dsa_switch_for_each_port(dp, ds) {
809 if (dsa_port_host_vlan_match(dp, info->dp)) {
810 err = dsa_port_do_vlan_del(dp, info->vlan);
819 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
820 struct dsa_notifier_tag_proto_info *info)
822 const struct dsa_device_ops *tag_ops = info->tag_ops;
823 struct dsa_port *dp, *cpu_dp;
826 if (!ds->ops->change_tag_protocol)
831 err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
835 dsa_switch_for_each_cpu_port(cpu_dp, ds)
836 dsa_port_set_tag_protocol(cpu_dp, tag_ops);
838 /* Now that changing the tag protocol can no longer fail, let's update
839 * the remaining bits which are "duplicated for faster access", and the
840 * bits that depend on the tagger, such as the MTU.
842 dsa_switch_for_each_user_port(dp, ds) {
843 struct net_device *slave = dp->slave;
845 dsa_slave_setup_tagger(slave);
847 /* rtnl_mutex is held in dsa_tree_change_tag_proto */
848 dsa_slave_change_mtu(slave, slave->mtu);
854 /* We use the same cross-chip notifiers to inform both the tagger side, as well
855 * as the switch side, of connection and disconnection events.
856 * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
857 * switch side doesn't support connecting to this tagger, and therefore, the
858 * fact that we don't disconnect the tagger side doesn't constitute a memory
859 * leak: the tagger will still operate with persistent per-switch memory, just
860 * with the switch side unconnected to it. What does constitute a hard error is
861 * when the switch side supports connecting but fails.
864 dsa_switch_connect_tag_proto(struct dsa_switch *ds,
865 struct dsa_notifier_tag_proto_info *info)
867 const struct dsa_device_ops *tag_ops = info->tag_ops;
870 /* Notify the new tagger about the connection to this switch */
871 if (tag_ops->connect) {
872 err = tag_ops->connect(ds);
877 if (!ds->ops->connect_tag_protocol)
880 /* Notify the switch about the connection to the new tagger */
881 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
883 /* Revert the new tagger's connection to this tree */
884 if (tag_ops->disconnect)
885 tag_ops->disconnect(ds);
893 dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
894 struct dsa_notifier_tag_proto_info *info)
896 const struct dsa_device_ops *tag_ops = info->tag_ops;
898 /* Notify the tagger about the disconnection from this switch */
899 if (tag_ops->disconnect && ds->tagger_data)
900 tag_ops->disconnect(ds);
902 /* No need to notify the switch, since it shouldn't have any
903 * resources to tear down
909 dsa_switch_master_state_change(struct dsa_switch *ds,
910 struct dsa_notifier_master_state_info *info)
912 if (!ds->ops->master_state_change)
915 ds->ops->master_state_change(ds, info->master, info->operational);
920 static int dsa_switch_event(struct notifier_block *nb,
921 unsigned long event, void *info)
923 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
927 case DSA_NOTIFIER_AGEING_TIME:
928 err = dsa_switch_ageing_time(ds, info);
930 case DSA_NOTIFIER_BRIDGE_JOIN:
931 err = dsa_switch_bridge_join(ds, info);
933 case DSA_NOTIFIER_BRIDGE_LEAVE:
934 err = dsa_switch_bridge_leave(ds, info);
936 case DSA_NOTIFIER_FDB_ADD:
937 err = dsa_switch_fdb_add(ds, info);
939 case DSA_NOTIFIER_FDB_DEL:
940 err = dsa_switch_fdb_del(ds, info);
942 case DSA_NOTIFIER_HOST_FDB_ADD:
943 err = dsa_switch_host_fdb_add(ds, info);
945 case DSA_NOTIFIER_HOST_FDB_DEL:
946 err = dsa_switch_host_fdb_del(ds, info);
948 case DSA_NOTIFIER_LAG_FDB_ADD:
949 err = dsa_switch_lag_fdb_add(ds, info);
951 case DSA_NOTIFIER_LAG_FDB_DEL:
952 err = dsa_switch_lag_fdb_del(ds, info);
954 case DSA_NOTIFIER_LAG_CHANGE:
955 err = dsa_switch_lag_change(ds, info);
957 case DSA_NOTIFIER_LAG_JOIN:
958 err = dsa_switch_lag_join(ds, info);
960 case DSA_NOTIFIER_LAG_LEAVE:
961 err = dsa_switch_lag_leave(ds, info);
963 case DSA_NOTIFIER_MDB_ADD:
964 err = dsa_switch_mdb_add(ds, info);
966 case DSA_NOTIFIER_MDB_DEL:
967 err = dsa_switch_mdb_del(ds, info);
969 case DSA_NOTIFIER_HOST_MDB_ADD:
970 err = dsa_switch_host_mdb_add(ds, info);
972 case DSA_NOTIFIER_HOST_MDB_DEL:
973 err = dsa_switch_host_mdb_del(ds, info);
975 case DSA_NOTIFIER_VLAN_ADD:
976 err = dsa_switch_vlan_add(ds, info);
978 case DSA_NOTIFIER_VLAN_DEL:
979 err = dsa_switch_vlan_del(ds, info);
981 case DSA_NOTIFIER_HOST_VLAN_ADD:
982 err = dsa_switch_host_vlan_add(ds, info);
984 case DSA_NOTIFIER_HOST_VLAN_DEL:
985 err = dsa_switch_host_vlan_del(ds, info);
987 case DSA_NOTIFIER_MTU:
988 err = dsa_switch_mtu(ds, info);
990 case DSA_NOTIFIER_TAG_PROTO:
991 err = dsa_switch_change_tag_proto(ds, info);
993 case DSA_NOTIFIER_TAG_PROTO_CONNECT:
994 err = dsa_switch_connect_tag_proto(ds, info);
996 case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
997 err = dsa_switch_disconnect_tag_proto(ds, info);
999 case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
1000 err = dsa_switch_tag_8021q_vlan_add(ds, info);
1002 case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
1003 err = dsa_switch_tag_8021q_vlan_del(ds, info);
1005 case DSA_NOTIFIER_MASTER_STATE_CHANGE:
1006 err = dsa_switch_master_state_change(ds, info);
1014 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
1017 return notifier_from_errno(err);
1021 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
1022 * @dst: collection of struct dsa_switch devices to notify.
1023 * @e: event, must be of type DSA_NOTIFIER_*
1024 * @v: event-specific value.
1026 * Given a struct dsa_switch_tree, this can be used to run a function once for
1027 * each member DSA switch. The other alternative of traversing the tree is only
1028 * through its ports list, which does not uniquely list the switches.
1030 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
1032 struct raw_notifier_head *nh = &dst->nh;
1035 err = raw_notifier_call_chain(nh, e, v);
1037 return notifier_to_errno(err);
1041 * dsa_broadcast - Notify all DSA trees in the system.
1042 * @e: event, must be of type DSA_NOTIFIER_*
1043 * @v: event-specific value.
1045 * Can be used to notify the switching fabric of events such as cross-chip
1046 * bridging between disjoint trees (such as islands of tagger-compatible
1047 * switches bridged by an incompatible middle switch).
1049 * WARNING: this function is not reliable during probe time, because probing
1050 * between trees is asynchronous and not all DSA trees might have probed.
1052 int dsa_broadcast(unsigned long e, void *v)
1054 struct dsa_switch_tree *dst;
1057 list_for_each_entry(dst, &dsa_tree_list, list) {
1058 err = dsa_tree_notify(dst, e, v);
1066 int dsa_switch_register_notifier(struct dsa_switch *ds)
1068 ds->nb.notifier_call = dsa_switch_event;
1070 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
1073 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
1077 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
1079 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);