1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Handling of a single switch chip, part of a switch fabric
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 unsigned int ageing_time)
22 for (i = 0; i < ds->num_ports; ++i) {
23 struct dsa_port *dp = dsa_to_port(ds, i);
25 if (dp->ageing_time && dp->ageing_time < ageing_time)
26 ageing_time = dp->ageing_time;
32 static int dsa_switch_ageing_time(struct dsa_switch *ds,
33 struct dsa_notifier_ageing_time_info *info)
35 unsigned int ageing_time = info->ageing_time;
37 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
40 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
43 /* Program the fastest ageing time in case of multiple bridges */
44 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
46 if (ds->ops->set_ageing_time)
47 return ds->ops->set_ageing_time(ds, ageing_time);
52 static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
53 struct dsa_notifier_mtu_info *info)
55 if (ds->index == info->sw_index)
56 return (port == info->port) || dsa_is_dsa_port(ds, port);
58 if (!info->propagate_upstream)
61 if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
67 static int dsa_switch_mtu(struct dsa_switch *ds,
68 struct dsa_notifier_mtu_info *info)
72 if (!ds->ops->port_change_mtu)
75 for (port = 0; port < ds->num_ports; port++) {
76 if (dsa_switch_mtu_match(ds, port, info)) {
77 ret = ds->ops->port_change_mtu(ds, port, info->mtu);
86 static int dsa_switch_bridge_join(struct dsa_switch *ds,
87 struct dsa_notifier_bridge_info *info)
89 struct dsa_switch_tree *dst = ds->dst;
91 if (dst->index == info->tree_index && ds->index == info->sw_index &&
92 ds->ops->port_bridge_join)
93 return ds->ops->port_bridge_join(ds, info->port, info->br);
95 if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
96 ds->ops->crosschip_bridge_join)
97 return ds->ops->crosschip_bridge_join(ds, info->tree_index,
99 info->port, info->br);
104 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
105 struct dsa_notifier_bridge_info *info)
107 bool unset_vlan_filtering = br_vlan_enabled(info->br);
108 struct dsa_switch_tree *dst = ds->dst;
111 if (dst->index == info->tree_index && ds->index == info->sw_index &&
112 ds->ops->port_bridge_join)
113 ds->ops->port_bridge_leave(ds, info->port, info->br);
115 if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
116 ds->ops->crosschip_bridge_join)
117 ds->ops->crosschip_bridge_leave(ds, info->tree_index,
118 info->sw_index, info->port,
121 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an
122 * event for changing vlan_filtering setting upon slave ports leaving
123 * it. That is a good thing, because that lets us handle it and also
124 * handle the case where the switch's vlan_filtering setting is global
125 * (not per port). When that happens, the correct moment to trigger the
126 * vlan_filtering callback is only when the last port left this bridge.
128 if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
129 for (i = 0; i < ds->num_ports; i++) {
132 if (dsa_to_port(ds, i)->bridge_dev == info->br) {
133 unset_vlan_filtering = false;
138 if (unset_vlan_filtering) {
139 err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
141 if (err && err != EOPNOTSUPP)
147 static int dsa_switch_fdb_add(struct dsa_switch *ds,
148 struct dsa_notifier_fdb_info *info)
150 int port = dsa_towards_port(ds, info->sw_index, info->port);
152 if (!ds->ops->port_fdb_add)
155 return ds->ops->port_fdb_add(ds, port, info->addr, info->vid);
158 static int dsa_switch_fdb_del(struct dsa_switch *ds,
159 struct dsa_notifier_fdb_info *info)
161 int port = dsa_towards_port(ds, info->sw_index, info->port);
163 if (!ds->ops->port_fdb_del)
166 return ds->ops->port_fdb_del(ds, port, info->addr, info->vid);
169 static int dsa_switch_hsr_join(struct dsa_switch *ds,
170 struct dsa_notifier_hsr_info *info)
172 if (ds->index == info->sw_index && ds->ops->port_hsr_join)
173 return ds->ops->port_hsr_join(ds, info->port, info->hsr);
178 static int dsa_switch_hsr_leave(struct dsa_switch *ds,
179 struct dsa_notifier_hsr_info *info)
181 if (ds->index == info->sw_index && ds->ops->port_hsr_leave)
182 return ds->ops->port_hsr_leave(ds, info->port, info->hsr);
187 static int dsa_switch_lag_change(struct dsa_switch *ds,
188 struct dsa_notifier_lag_info *info)
190 if (ds->index == info->sw_index && ds->ops->port_lag_change)
191 return ds->ops->port_lag_change(ds, info->port);
193 if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
194 return ds->ops->crosschip_lag_change(ds, info->sw_index,
200 static int dsa_switch_lag_join(struct dsa_switch *ds,
201 struct dsa_notifier_lag_info *info)
203 if (ds->index == info->sw_index && ds->ops->port_lag_join)
204 return ds->ops->port_lag_join(ds, info->port, info->lag,
207 if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
208 return ds->ops->crosschip_lag_join(ds, info->sw_index,
209 info->port, info->lag,
215 static int dsa_switch_lag_leave(struct dsa_switch *ds,
216 struct dsa_notifier_lag_info *info)
218 if (ds->index == info->sw_index && ds->ops->port_lag_leave)
219 return ds->ops->port_lag_leave(ds, info->port, info->lag);
221 if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
222 return ds->ops->crosschip_lag_leave(ds, info->sw_index,
223 info->port, info->lag);
228 static bool dsa_switch_mdb_match(struct dsa_switch *ds, int port,
229 struct dsa_notifier_mdb_info *info)
231 if (ds->index == info->sw_index && port == info->port)
234 if (dsa_is_dsa_port(ds, port))
240 static int dsa_switch_mdb_add(struct dsa_switch *ds,
241 struct dsa_notifier_mdb_info *info)
246 if (!ds->ops->port_mdb_add)
249 for (port = 0; port < ds->num_ports; port++) {
250 if (dsa_switch_mdb_match(ds, port, info)) {
251 err = ds->ops->port_mdb_add(ds, port, info->mdb);
260 static int dsa_switch_mdb_del(struct dsa_switch *ds,
261 struct dsa_notifier_mdb_info *info)
263 if (!ds->ops->port_mdb_del)
266 if (ds->index == info->sw_index)
267 return ds->ops->port_mdb_del(ds, info->port, info->mdb);
272 static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
273 struct dsa_notifier_vlan_info *info)
275 if (ds->index == info->sw_index && port == info->port)
278 if (dsa_is_dsa_port(ds, port))
284 static int dsa_switch_vlan_add(struct dsa_switch *ds,
285 struct dsa_notifier_vlan_info *info)
289 if (!ds->ops->port_vlan_add)
292 for (port = 0; port < ds->num_ports; port++) {
293 if (dsa_switch_vlan_match(ds, port, info)) {
294 err = ds->ops->port_vlan_add(ds, port, info->vlan);
303 static int dsa_switch_vlan_del(struct dsa_switch *ds,
304 struct dsa_notifier_vlan_info *info)
306 if (!ds->ops->port_vlan_del)
309 if (ds->index == info->sw_index)
310 return ds->ops->port_vlan_del(ds, info->port, info->vlan);
312 /* Do not deprogram the DSA links as they may be used as conduit
313 * for other VLAN members in the fabric.
318 static bool dsa_switch_tag_proto_match(struct dsa_switch *ds, int port,
319 struct dsa_notifier_tag_proto_info *info)
321 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
327 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
328 struct dsa_notifier_tag_proto_info *info)
330 const struct dsa_device_ops *tag_ops = info->tag_ops;
333 if (!ds->ops->change_tag_protocol)
338 for (port = 0; port < ds->num_ports; port++) {
339 if (dsa_switch_tag_proto_match(ds, port, info)) {
340 err = ds->ops->change_tag_protocol(ds, port,
345 if (dsa_is_cpu_port(ds, port))
346 dsa_port_set_tag_protocol(dsa_to_port(ds, port),
351 /* Now that changing the tag protocol can no longer fail, let's update
352 * the remaining bits which are "duplicated for faster access", and the
353 * bits that depend on the tagger, such as the MTU.
355 for (port = 0; port < ds->num_ports; port++) {
356 if (dsa_is_user_port(ds, port)) {
357 struct net_device *slave;
359 slave = dsa_to_port(ds, port)->slave;
360 dsa_slave_setup_tagger(slave);
362 /* rtnl_mutex is held in dsa_tree_change_tag_proto */
363 dsa_slave_change_mtu(slave, slave->mtu);
370 static int dsa_switch_event(struct notifier_block *nb,
371 unsigned long event, void *info)
373 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
377 case DSA_NOTIFIER_AGEING_TIME:
378 err = dsa_switch_ageing_time(ds, info);
380 case DSA_NOTIFIER_BRIDGE_JOIN:
381 err = dsa_switch_bridge_join(ds, info);
383 case DSA_NOTIFIER_BRIDGE_LEAVE:
384 err = dsa_switch_bridge_leave(ds, info);
386 case DSA_NOTIFIER_FDB_ADD:
387 err = dsa_switch_fdb_add(ds, info);
389 case DSA_NOTIFIER_FDB_DEL:
390 err = dsa_switch_fdb_del(ds, info);
392 case DSA_NOTIFIER_HSR_JOIN:
393 err = dsa_switch_hsr_join(ds, info);
395 case DSA_NOTIFIER_HSR_LEAVE:
396 err = dsa_switch_hsr_leave(ds, info);
398 case DSA_NOTIFIER_LAG_CHANGE:
399 err = dsa_switch_lag_change(ds, info);
401 case DSA_NOTIFIER_LAG_JOIN:
402 err = dsa_switch_lag_join(ds, info);
404 case DSA_NOTIFIER_LAG_LEAVE:
405 err = dsa_switch_lag_leave(ds, info);
407 case DSA_NOTIFIER_MDB_ADD:
408 err = dsa_switch_mdb_add(ds, info);
410 case DSA_NOTIFIER_MDB_DEL:
411 err = dsa_switch_mdb_del(ds, info);
413 case DSA_NOTIFIER_VLAN_ADD:
414 err = dsa_switch_vlan_add(ds, info);
416 case DSA_NOTIFIER_VLAN_DEL:
417 err = dsa_switch_vlan_del(ds, info);
419 case DSA_NOTIFIER_MTU:
420 err = dsa_switch_mtu(ds, info);
422 case DSA_NOTIFIER_TAG_PROTO:
423 err = dsa_switch_change_tag_proto(ds, info);
431 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
434 return notifier_from_errno(err);
437 int dsa_switch_register_notifier(struct dsa_switch *ds)
439 ds->nb.notifier_call = dsa_switch_event;
441 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
444 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
448 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
450 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);