1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Handling of a single switch chip, part of a switch fabric
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 unsigned int ageing_time)
22 for (i = 0; i < ds->num_ports; ++i) {
23 struct dsa_port *dp = dsa_to_port(ds, i);
25 if (dp->ageing_time && dp->ageing_time < ageing_time)
26 ageing_time = dp->ageing_time;
32 static int dsa_switch_ageing_time(struct dsa_switch *ds,
33 struct dsa_notifier_ageing_time_info *info)
35 unsigned int ageing_time = info->ageing_time;
37 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
40 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
43 /* Program the fastest ageing time in case of multiple bridges */
44 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
46 if (ds->ops->set_ageing_time)
47 return ds->ops->set_ageing_time(ds, ageing_time);
52 static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
53 struct dsa_notifier_mtu_info *info)
55 if (ds->index == info->sw_index && port == info->port)
58 /* Do not propagate to other switches in the tree if the notifier was
59 * targeted for a single switch.
61 if (info->targeted_match)
64 if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
70 static int dsa_switch_mtu(struct dsa_switch *ds,
71 struct dsa_notifier_mtu_info *info)
75 if (!ds->ops->port_change_mtu)
78 for (port = 0; port < ds->num_ports; port++) {
79 if (dsa_switch_mtu_match(ds, port, info)) {
80 ret = ds->ops->port_change_mtu(ds, port, info->mtu);
89 static int dsa_switch_bridge_join(struct dsa_switch *ds,
90 struct dsa_notifier_bridge_info *info)
92 struct dsa_switch_tree *dst = ds->dst;
94 if (dst->index == info->tree_index && ds->index == info->sw_index &&
95 ds->ops->port_bridge_join)
96 return ds->ops->port_bridge_join(ds, info->port, info->br);
98 if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
99 ds->ops->crosschip_bridge_join)
100 return ds->ops->crosschip_bridge_join(ds, info->tree_index,
102 info->port, info->br);
107 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
108 struct dsa_notifier_bridge_info *info)
110 bool unset_vlan_filtering = br_vlan_enabled(info->br);
111 struct dsa_switch_tree *dst = ds->dst;
112 struct netlink_ext_ack extack = {0};
115 if (dst->index == info->tree_index && ds->index == info->sw_index &&
116 ds->ops->port_bridge_join)
117 ds->ops->port_bridge_leave(ds, info->port, info->br);
119 if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
120 ds->ops->crosschip_bridge_join)
121 ds->ops->crosschip_bridge_leave(ds, info->tree_index,
122 info->sw_index, info->port,
125 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an
126 * event for changing vlan_filtering setting upon slave ports leaving
127 * it. That is a good thing, because that lets us handle it and also
128 * handle the case where the switch's vlan_filtering setting is global
129 * (not per port). When that happens, the correct moment to trigger the
130 * vlan_filtering callback is only when the last port leaves the last
133 if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
134 for (port = 0; port < ds->num_ports; port++) {
135 struct net_device *bridge_dev;
137 bridge_dev = dsa_to_port(ds, port)->bridge_dev;
139 if (bridge_dev && br_vlan_enabled(bridge_dev)) {
140 unset_vlan_filtering = false;
145 if (unset_vlan_filtering) {
146 err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
149 dev_err(ds->dev, "port %d: %s\n", info->port,
151 if (err && err != EOPNOTSUPP)
157 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
158 * DSA links) that sit between the targeted port on which the notifier was
159 * emitted and its dedicated CPU port.
161 static bool dsa_switch_host_address_match(struct dsa_switch *ds, int port,
162 int info_sw_index, int info_port)
164 struct dsa_port *targeted_dp, *cpu_dp;
165 struct dsa_switch *targeted_ds;
167 targeted_ds = dsa_switch_find(ds->dst->index, info_sw_index);
168 targeted_dp = dsa_to_port(targeted_ds, info_port);
169 cpu_dp = targeted_dp->cpu_dp;
171 if (dsa_switch_is_upstream_of(ds, targeted_ds))
172 return port == dsa_towards_port(ds, cpu_dp->ds->index,
178 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
179 const unsigned char *addr,
182 struct dsa_mac_addr *a;
184 list_for_each_entry(a, addr_list, list)
185 if (ether_addr_equal(a->addr, addr) && a->vid == vid)
191 static int dsa_switch_do_mdb_add(struct dsa_switch *ds, int port,
192 const struct switchdev_obj_port_mdb *mdb)
194 struct dsa_port *dp = dsa_to_port(ds, port);
195 struct dsa_mac_addr *a;
198 /* No need to bother with refcounting for user ports */
199 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
200 return ds->ops->port_mdb_add(ds, port, mdb);
202 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
204 refcount_inc(&a->refcount);
208 a = kzalloc(sizeof(*a), GFP_KERNEL);
212 err = ds->ops->port_mdb_add(ds, port, mdb);
218 ether_addr_copy(a->addr, mdb->addr);
220 refcount_set(&a->refcount, 1);
221 list_add_tail(&a->list, &dp->mdbs);
226 static int dsa_switch_do_mdb_del(struct dsa_switch *ds, int port,
227 const struct switchdev_obj_port_mdb *mdb)
229 struct dsa_port *dp = dsa_to_port(ds, port);
230 struct dsa_mac_addr *a;
233 /* No need to bother with refcounting for user ports */
234 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
235 return ds->ops->port_mdb_del(ds, port, mdb);
237 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
241 if (!refcount_dec_and_test(&a->refcount))
244 err = ds->ops->port_mdb_del(ds, port, mdb);
246 refcount_inc(&a->refcount);
256 static int dsa_switch_fdb_add(struct dsa_switch *ds,
257 struct dsa_notifier_fdb_info *info)
259 int port = dsa_towards_port(ds, info->sw_index, info->port);
261 if (!ds->ops->port_fdb_add)
264 return ds->ops->port_fdb_add(ds, port, info->addr, info->vid);
267 static int dsa_switch_fdb_del(struct dsa_switch *ds,
268 struct dsa_notifier_fdb_info *info)
270 int port = dsa_towards_port(ds, info->sw_index, info->port);
272 if (!ds->ops->port_fdb_del)
275 return ds->ops->port_fdb_del(ds, port, info->addr, info->vid);
278 static int dsa_switch_hsr_join(struct dsa_switch *ds,
279 struct dsa_notifier_hsr_info *info)
281 if (ds->index == info->sw_index && ds->ops->port_hsr_join)
282 return ds->ops->port_hsr_join(ds, info->port, info->hsr);
287 static int dsa_switch_hsr_leave(struct dsa_switch *ds,
288 struct dsa_notifier_hsr_info *info)
290 if (ds->index == info->sw_index && ds->ops->port_hsr_leave)
291 return ds->ops->port_hsr_leave(ds, info->port, info->hsr);
296 static int dsa_switch_lag_change(struct dsa_switch *ds,
297 struct dsa_notifier_lag_info *info)
299 if (ds->index == info->sw_index && ds->ops->port_lag_change)
300 return ds->ops->port_lag_change(ds, info->port);
302 if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
303 return ds->ops->crosschip_lag_change(ds, info->sw_index,
309 static int dsa_switch_lag_join(struct dsa_switch *ds,
310 struct dsa_notifier_lag_info *info)
312 if (ds->index == info->sw_index && ds->ops->port_lag_join)
313 return ds->ops->port_lag_join(ds, info->port, info->lag,
316 if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
317 return ds->ops->crosschip_lag_join(ds, info->sw_index,
318 info->port, info->lag,
324 static int dsa_switch_lag_leave(struct dsa_switch *ds,
325 struct dsa_notifier_lag_info *info)
327 if (ds->index == info->sw_index && ds->ops->port_lag_leave)
328 return ds->ops->port_lag_leave(ds, info->port, info->lag);
330 if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
331 return ds->ops->crosschip_lag_leave(ds, info->sw_index,
332 info->port, info->lag);
337 static int dsa_switch_mdb_add(struct dsa_switch *ds,
338 struct dsa_notifier_mdb_info *info)
340 int port = dsa_towards_port(ds, info->sw_index, info->port);
342 if (!ds->ops->port_mdb_add)
345 return dsa_switch_do_mdb_add(ds, port, info->mdb);
348 static int dsa_switch_mdb_del(struct dsa_switch *ds,
349 struct dsa_notifier_mdb_info *info)
351 int port = dsa_towards_port(ds, info->sw_index, info->port);
353 if (!ds->ops->port_mdb_del)
356 return dsa_switch_do_mdb_del(ds, port, info->mdb);
359 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
360 struct dsa_notifier_mdb_info *info)
365 if (!ds->ops->port_mdb_add)
368 for (port = 0; port < ds->num_ports; port++) {
369 if (dsa_switch_host_address_match(ds, port, info->sw_index,
371 err = dsa_switch_do_mdb_add(ds, port, info->mdb);
380 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
381 struct dsa_notifier_mdb_info *info)
386 if (!ds->ops->port_mdb_del)
389 for (port = 0; port < ds->num_ports; port++) {
390 if (dsa_switch_host_address_match(ds, port, info->sw_index,
392 err = dsa_switch_do_mdb_del(ds, port, info->mdb);
401 static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
402 struct dsa_notifier_vlan_info *info)
404 if (ds->index == info->sw_index && port == info->port)
407 if (dsa_is_dsa_port(ds, port))
413 static int dsa_switch_vlan_add(struct dsa_switch *ds,
414 struct dsa_notifier_vlan_info *info)
418 if (!ds->ops->port_vlan_add)
421 for (port = 0; port < ds->num_ports; port++) {
422 if (dsa_switch_vlan_match(ds, port, info)) {
423 err = ds->ops->port_vlan_add(ds, port, info->vlan,
433 static int dsa_switch_vlan_del(struct dsa_switch *ds,
434 struct dsa_notifier_vlan_info *info)
436 if (!ds->ops->port_vlan_del)
439 if (ds->index == info->sw_index)
440 return ds->ops->port_vlan_del(ds, info->port, info->vlan);
442 /* Do not deprogram the DSA links as they may be used as conduit
443 * for other VLAN members in the fabric.
448 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
449 struct dsa_notifier_tag_proto_info *info)
451 const struct dsa_device_ops *tag_ops = info->tag_ops;
454 if (!ds->ops->change_tag_protocol)
459 for (port = 0; port < ds->num_ports; port++) {
460 if (!dsa_is_cpu_port(ds, port))
463 err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
467 dsa_port_set_tag_protocol(dsa_to_port(ds, port), tag_ops);
470 /* Now that changing the tag protocol can no longer fail, let's update
471 * the remaining bits which are "duplicated for faster access", and the
472 * bits that depend on the tagger, such as the MTU.
474 for (port = 0; port < ds->num_ports; port++) {
475 if (dsa_is_user_port(ds, port)) {
476 struct net_device *slave;
478 slave = dsa_to_port(ds, port)->slave;
479 dsa_slave_setup_tagger(slave);
481 /* rtnl_mutex is held in dsa_tree_change_tag_proto */
482 dsa_slave_change_mtu(slave, slave->mtu);
489 static int dsa_switch_mrp_add(struct dsa_switch *ds,
490 struct dsa_notifier_mrp_info *info)
492 if (!ds->ops->port_mrp_add)
495 if (ds->index == info->sw_index)
496 return ds->ops->port_mrp_add(ds, info->port, info->mrp);
501 static int dsa_switch_mrp_del(struct dsa_switch *ds,
502 struct dsa_notifier_mrp_info *info)
504 if (!ds->ops->port_mrp_del)
507 if (ds->index == info->sw_index)
508 return ds->ops->port_mrp_del(ds, info->port, info->mrp);
514 dsa_switch_mrp_add_ring_role(struct dsa_switch *ds,
515 struct dsa_notifier_mrp_ring_role_info *info)
517 if (!ds->ops->port_mrp_add)
520 if (ds->index == info->sw_index)
521 return ds->ops->port_mrp_add_ring_role(ds, info->port,
528 dsa_switch_mrp_del_ring_role(struct dsa_switch *ds,
529 struct dsa_notifier_mrp_ring_role_info *info)
531 if (!ds->ops->port_mrp_del)
534 if (ds->index == info->sw_index)
535 return ds->ops->port_mrp_del_ring_role(ds, info->port,
541 static int dsa_switch_event(struct notifier_block *nb,
542 unsigned long event, void *info)
544 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
548 case DSA_NOTIFIER_AGEING_TIME:
549 err = dsa_switch_ageing_time(ds, info);
551 case DSA_NOTIFIER_BRIDGE_JOIN:
552 err = dsa_switch_bridge_join(ds, info);
554 case DSA_NOTIFIER_BRIDGE_LEAVE:
555 err = dsa_switch_bridge_leave(ds, info);
557 case DSA_NOTIFIER_FDB_ADD:
558 err = dsa_switch_fdb_add(ds, info);
560 case DSA_NOTIFIER_FDB_DEL:
561 err = dsa_switch_fdb_del(ds, info);
563 case DSA_NOTIFIER_HSR_JOIN:
564 err = dsa_switch_hsr_join(ds, info);
566 case DSA_NOTIFIER_HSR_LEAVE:
567 err = dsa_switch_hsr_leave(ds, info);
569 case DSA_NOTIFIER_LAG_CHANGE:
570 err = dsa_switch_lag_change(ds, info);
572 case DSA_NOTIFIER_LAG_JOIN:
573 err = dsa_switch_lag_join(ds, info);
575 case DSA_NOTIFIER_LAG_LEAVE:
576 err = dsa_switch_lag_leave(ds, info);
578 case DSA_NOTIFIER_MDB_ADD:
579 err = dsa_switch_mdb_add(ds, info);
581 case DSA_NOTIFIER_MDB_DEL:
582 err = dsa_switch_mdb_del(ds, info);
584 case DSA_NOTIFIER_HOST_MDB_ADD:
585 err = dsa_switch_host_mdb_add(ds, info);
587 case DSA_NOTIFIER_HOST_MDB_DEL:
588 err = dsa_switch_host_mdb_del(ds, info);
590 case DSA_NOTIFIER_VLAN_ADD:
591 err = dsa_switch_vlan_add(ds, info);
593 case DSA_NOTIFIER_VLAN_DEL:
594 err = dsa_switch_vlan_del(ds, info);
596 case DSA_NOTIFIER_MTU:
597 err = dsa_switch_mtu(ds, info);
599 case DSA_NOTIFIER_TAG_PROTO:
600 err = dsa_switch_change_tag_proto(ds, info);
602 case DSA_NOTIFIER_MRP_ADD:
603 err = dsa_switch_mrp_add(ds, info);
605 case DSA_NOTIFIER_MRP_DEL:
606 err = dsa_switch_mrp_del(ds, info);
608 case DSA_NOTIFIER_MRP_ADD_RING_ROLE:
609 err = dsa_switch_mrp_add_ring_role(ds, info);
611 case DSA_NOTIFIER_MRP_DEL_RING_ROLE:
612 err = dsa_switch_mrp_del_ring_role(ds, info);
620 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
623 return notifier_from_errno(err);
626 int dsa_switch_register_notifier(struct dsa_switch *ds)
628 ds->nb.notifier_call = dsa_switch_event;
630 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
633 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
637 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
639 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);