2 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
3 * Copyright (c) 2008-2009 Marvell Semiconductor
4 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
5 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/list.h>
16 #include <linux/slab.h>
17 #include <linux/rtnetlink.h>
20 #include <linux/of_net.h>
23 static LIST_HEAD(dsa_switch_trees);
24 static DEFINE_MUTEX(dsa2_mutex);
26 static struct dsa_switch_tree *dsa_get_dst(u32 tree)
28 struct dsa_switch_tree *dst;
30 list_for_each_entry(dst, &dsa_switch_trees, list)
31 if (dst->tree == tree) {
32 kref_get(&dst->refcount);
38 static void dsa_free_dst(struct kref *ref)
40 struct dsa_switch_tree *dst = container_of(ref, struct dsa_switch_tree,
47 static void dsa_put_dst(struct dsa_switch_tree *dst)
49 kref_put(&dst->refcount, dsa_free_dst);
52 static struct dsa_switch_tree *dsa_add_dst(u32 tree)
54 struct dsa_switch_tree *dst;
56 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
60 INIT_LIST_HEAD(&dst->list);
61 list_add_tail(&dsa_switch_trees, &dst->list);
62 kref_init(&dst->refcount);
67 static void dsa_dst_add_ds(struct dsa_switch_tree *dst,
68 struct dsa_switch *ds, u32 index)
70 kref_get(&dst->refcount);
74 static void dsa_dst_del_ds(struct dsa_switch_tree *dst,
75 struct dsa_switch *ds, u32 index)
77 dst->ds[index] = NULL;
78 kref_put(&dst->refcount, dsa_free_dst);
81 static bool dsa_port_is_valid(struct dsa_port *port)
86 static bool dsa_port_is_dsa(struct dsa_port *port)
88 return !!of_parse_phandle(port->dn, "link", 0);
91 static bool dsa_port_is_cpu(struct dsa_port *port)
93 return !!of_parse_phandle(port->dn, "ethernet", 0);
96 static bool dsa_ds_find_port_dn(struct dsa_switch *ds,
97 struct device_node *port)
101 for (index = 0; index < DSA_MAX_PORTS; index++)
102 if (ds->ports[index].dn == port)
107 static struct dsa_switch *dsa_dst_find_port_dn(struct dsa_switch_tree *dst,
108 struct device_node *port)
110 struct dsa_switch *ds;
113 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
118 if (dsa_ds_find_port_dn(ds, port))
125 static int dsa_port_complete(struct dsa_switch_tree *dst,
126 struct dsa_switch *src_ds,
127 struct dsa_port *port,
130 struct device_node *link;
132 struct dsa_switch *dst_ds;
134 for (index = 0;; index++) {
135 link = of_parse_phandle(port->dn, "link", index);
139 dst_ds = dsa_dst_find_port_dn(dst, link);
145 src_ds->rtable[dst_ds->index] = src_port;
151 /* A switch is complete if all the DSA ports phandles point to ports
152 * known in the tree. A return value of 1 means the tree is not
153 * complete. This is not an error condition. A value of 0 is
156 static int dsa_ds_complete(struct dsa_switch_tree *dst, struct dsa_switch *ds)
158 struct dsa_port *port;
162 for (index = 0; index < DSA_MAX_PORTS; index++) {
163 port = &ds->ports[index];
164 if (!dsa_port_is_valid(port))
167 if (!dsa_port_is_dsa(port))
170 err = dsa_port_complete(dst, ds, port, index);
174 ds->dsa_port_mask |= BIT(index);
180 /* A tree is complete if all the DSA ports phandles point to ports
181 * known in the tree. A return value of 1 means the tree is not
182 * complete. This is not an error condition. A value of 0 is
185 static int dsa_dst_complete(struct dsa_switch_tree *dst)
187 struct dsa_switch *ds;
191 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
196 err = dsa_ds_complete(dst, ds);
204 static int dsa_dsa_port_apply(struct dsa_port *port, u32 index,
205 struct dsa_switch *ds)
209 err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
211 dev_warn(ds->dev, "Failed to setup dsa port %d: %d\n",
219 static void dsa_dsa_port_unapply(struct dsa_port *port, u32 index,
220 struct dsa_switch *ds)
222 dsa_cpu_dsa_destroy(port);
225 static int dsa_cpu_port_apply(struct dsa_port *port, u32 index,
226 struct dsa_switch *ds)
230 err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
232 dev_warn(ds->dev, "Failed to setup cpu port %d: %d\n",
237 ds->cpu_port_mask |= BIT(index);
242 static void dsa_cpu_port_unapply(struct dsa_port *port, u32 index,
243 struct dsa_switch *ds)
245 dsa_cpu_dsa_destroy(port);
246 ds->cpu_port_mask &= ~BIT(index);
250 static int dsa_user_port_apply(struct dsa_port *port, u32 index,
251 struct dsa_switch *ds)
256 name = of_get_property(port->dn, "label", NULL);
260 err = dsa_slave_create(ds, ds->dev, index, name);
262 dev_warn(ds->dev, "Failed to create slave %d: %d\n",
270 static void dsa_user_port_unapply(struct dsa_port *port, u32 index,
271 struct dsa_switch *ds)
273 if (ds->ports[index].netdev) {
274 dsa_slave_destroy(ds->ports[index].netdev);
275 ds->ports[index].netdev = NULL;
276 ds->enabled_port_mask &= ~(1 << index);
280 static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
282 struct dsa_port *port;
286 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
287 * driver and before ops->setup() has run, since the switch drivers and
288 * the slave MDIO bus driver rely on these values for probing PHY
291 ds->phys_mii_mask = ds->enabled_port_mask;
293 err = ds->ops->setup(ds);
297 if (ds->ops->set_addr) {
298 err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr);
303 if (!ds->slave_mii_bus && ds->ops->phy_read) {
304 ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
305 if (!ds->slave_mii_bus)
308 dsa_slave_mii_bus_init(ds);
310 err = mdiobus_register(ds->slave_mii_bus);
315 for (index = 0; index < DSA_MAX_PORTS; index++) {
316 port = &ds->ports[index];
317 if (!dsa_port_is_valid(port))
320 if (dsa_port_is_dsa(port)) {
321 err = dsa_dsa_port_apply(port, index, ds);
327 if (dsa_port_is_cpu(port)) {
328 err = dsa_cpu_port_apply(port, index, ds);
334 err = dsa_user_port_apply(port, index, ds);
342 static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
344 struct dsa_port *port;
347 for (index = 0; index < DSA_MAX_PORTS; index++) {
348 port = &ds->ports[index];
349 if (!dsa_port_is_valid(port))
352 if (dsa_port_is_dsa(port)) {
353 dsa_dsa_port_unapply(port, index, ds);
357 if (dsa_port_is_cpu(port)) {
358 dsa_cpu_port_unapply(port, index, ds);
362 dsa_user_port_unapply(port, index, ds);
365 if (ds->slave_mii_bus && ds->ops->phy_read)
366 mdiobus_unregister(ds->slave_mii_bus);
369 static int dsa_dst_apply(struct dsa_switch_tree *dst)
371 struct dsa_switch *ds;
375 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
380 err = dsa_ds_apply(dst, ds);
385 if (dst->cpu_switch) {
386 err = dsa_cpu_port_ethtool_setup(dst->cpu_switch);
391 /* If we use a tagging format that doesn't have an ethertype
392 * field, make sure that all packets from this point on get
393 * sent to the tag format's receive function.
396 dst->master_netdev->dsa_ptr = (void *)dst;
402 static void dsa_dst_unapply(struct dsa_switch_tree *dst)
404 struct dsa_switch *ds;
410 dst->master_netdev->dsa_ptr = NULL;
412 /* If we used a tagging format that doesn't have an ethertype
413 * field, make sure that all packets from this point get sent
414 * without the tag and go through the regular receive path.
418 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
423 dsa_ds_unapply(dst, ds);
427 dsa_cpu_port_ethtool_restore(dst->cpu_switch);
429 pr_info("DSA: tree %d unapplied\n", dst->tree);
430 dst->applied = false;
433 static int dsa_cpu_parse(struct dsa_port *port, u32 index,
434 struct dsa_switch_tree *dst,
435 struct dsa_switch *ds)
437 enum dsa_tag_protocol tag_protocol;
438 struct net_device *ethernet_dev;
439 struct device_node *ethernet;
441 ethernet = of_parse_phandle(port->dn, "ethernet", 0);
445 ethernet_dev = of_find_net_device_by_node(ethernet);
447 return -EPROBE_DEFER;
449 if (!ds->master_netdev)
450 ds->master_netdev = ethernet_dev;
452 if (!dst->master_netdev)
453 dst->master_netdev = ethernet_dev;
455 if (!dst->cpu_switch) {
456 dst->cpu_switch = ds;
457 dst->cpu_port = index;
460 tag_protocol = ds->ops->get_tag_protocol(ds);
461 dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol);
462 if (IS_ERR(dst->tag_ops)) {
463 dev_warn(ds->dev, "No tagger for this switch\n");
464 return PTR_ERR(dst->tag_ops);
467 dst->rcv = dst->tag_ops->rcv;
472 static int dsa_ds_parse(struct dsa_switch_tree *dst, struct dsa_switch *ds)
474 struct dsa_port *port;
478 for (index = 0; index < DSA_MAX_PORTS; index++) {
479 port = &ds->ports[index];
480 if (!dsa_port_is_valid(port))
483 if (dsa_port_is_cpu(port)) {
484 err = dsa_cpu_parse(port, index, dst, ds);
490 pr_info("DSA: switch %d %d parsed\n", dst->tree, ds->index);
495 static int dsa_dst_parse(struct dsa_switch_tree *dst)
497 struct dsa_switch *ds;
501 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
506 err = dsa_ds_parse(dst, ds);
511 if (!dst->master_netdev) {
512 pr_warn("Tree has no master device\n");
516 pr_info("DSA: tree %d parsed\n", dst->tree);
521 static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds)
523 struct device_node *port;
527 for_each_available_child_of_node(ports, port) {
528 err = of_property_read_u32(port, "reg", ®);
532 if (reg >= DSA_MAX_PORTS)
535 ds->ports[reg].dn = port;
537 /* Initialize enabled_port_mask now for ops->setup()
538 * to have access to a correct value, just like what
539 * net/dsa/dsa.c::dsa_switch_setup_one does.
541 if (!dsa_port_is_cpu(&ds->ports[reg]))
542 ds->enabled_port_mask |= 1 << reg;
548 static int dsa_parse_member_dn(struct device_node *np, u32 *tree, u32 *index)
554 err = of_property_read_u32_index(np, "dsa,member", 0, tree);
556 /* Does not exist, but it is optional */
562 err = of_property_read_u32_index(np, "dsa,member", 1, index);
566 if (*index >= DSA_MAX_SWITCHES)
572 static struct device_node *dsa_get_ports(struct dsa_switch *ds,
573 struct device_node *np)
575 struct device_node *ports;
577 ports = of_get_child_by_name(np, "ports");
579 dev_err(ds->dev, "no ports child node found\n");
580 return ERR_PTR(-EINVAL);
586 static int _dsa_register_switch(struct dsa_switch *ds, struct device *dev)
588 struct device_node *np = dev->of_node;
589 struct dsa_switch_tree *dst;
590 struct device_node *ports;
594 err = dsa_parse_member_dn(np, &tree, &index);
598 ports = dsa_get_ports(ds, np);
600 return PTR_ERR(ports);
602 err = dsa_parse_ports_dn(ports, ds);
606 dst = dsa_get_dst(tree);
608 dst = dsa_add_dst(tree);
613 if (dst->ds[index]) {
621 /* Initialize the routing table */
622 for (i = 0; i < DSA_MAX_SWITCHES; ++i)
623 ds->rtable[i] = DSA_RTABLE_NONE;
625 dsa_dst_add_ds(dst, ds, index);
627 err = dsa_dst_complete(dst);
632 /* Not all switches registered yet */
638 pr_info("DSA: Disjoint trees?\n");
642 err = dsa_dst_parse(dst);
644 if (err == -EPROBE_DEFER) {
645 dsa_dst_del_ds(dst, ds, ds->index);
652 err = dsa_dst_apply(dst);
654 dsa_dst_unapply(dst);
662 dsa_dst_del_ds(dst, ds, ds->index);
669 struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n)
671 size_t size = sizeof(struct dsa_switch) + n * sizeof(struct dsa_port);
672 struct dsa_switch *ds;
674 ds = devm_kzalloc(dev, size, GFP_KERNEL);
683 EXPORT_SYMBOL_GPL(dsa_switch_alloc);
685 int dsa_register_switch(struct dsa_switch *ds, struct device *dev)
689 mutex_lock(&dsa2_mutex);
690 err = _dsa_register_switch(ds, dev);
691 mutex_unlock(&dsa2_mutex);
695 EXPORT_SYMBOL_GPL(dsa_register_switch);
697 static void _dsa_unregister_switch(struct dsa_switch *ds)
699 struct dsa_switch_tree *dst = ds->dst;
701 dsa_dst_unapply(dst);
703 dsa_dst_del_ds(dst, ds, ds->index);
706 void dsa_unregister_switch(struct dsa_switch *ds)
708 mutex_lock(&dsa2_mutex);
709 _dsa_unregister_switch(ds);
710 mutex_unlock(&dsa2_mutex);
712 EXPORT_SYMBOL_GPL(dsa_unregister_switch);