Merge tag 'block-5.14-2021-08-20' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / net / dsa / dsa2.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/dsa2.c - Hardware switch handling, binding version 2
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
6  * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
7  */
8
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/list.h>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/of.h>
16 #include <linux/of_net.h>
17 #include <net/devlink.h>
18
19 #include "dsa_priv.h"
20
21 static DEFINE_MUTEX(dsa2_mutex);
22 LIST_HEAD(dsa_tree_list);
23
24 /**
25  * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
26  * @dst: collection of struct dsa_switch devices to notify.
27  * @e: event, must be of type DSA_NOTIFIER_*
28  * @v: event-specific value.
29  *
30  * Given a struct dsa_switch_tree, this can be used to run a function once for
31  * each member DSA switch. The other alternative of traversing the tree is only
32  * through its ports list, which does not uniquely list the switches.
33  */
34 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
35 {
36         struct raw_notifier_head *nh = &dst->nh;
37         int err;
38
39         err = raw_notifier_call_chain(nh, e, v);
40
41         return notifier_to_errno(err);
42 }
43
44 /**
45  * dsa_broadcast - Notify all DSA trees in the system.
46  * @e: event, must be of type DSA_NOTIFIER_*
47  * @v: event-specific value.
48  *
49  * Can be used to notify the switching fabric of events such as cross-chip
50  * bridging between disjoint trees (such as islands of tagger-compatible
51  * switches bridged by an incompatible middle switch).
52  */
53 int dsa_broadcast(unsigned long e, void *v)
54 {
55         struct dsa_switch_tree *dst;
56         int err = 0;
57
58         list_for_each_entry(dst, &dsa_tree_list, list) {
59                 err = dsa_tree_notify(dst, e, v);
60                 if (err)
61                         break;
62         }
63
64         return err;
65 }
66
67 /**
68  * dsa_lag_map() - Map LAG netdev to a linear LAG ID
69  * @dst: Tree in which to record the mapping.
70  * @lag: Netdev that is to be mapped to an ID.
71  *
72  * dsa_lag_id/dsa_lag_dev can then be used to translate between the
73  * two spaces. The size of the mapping space is determined by the
74  * driver by setting ds->num_lag_ids. It is perfectly legal to leave
75  * it unset if it is not needed, in which case these functions become
76  * no-ops.
77  */
78 void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
79 {
80         unsigned int id;
81
82         if (dsa_lag_id(dst, lag) >= 0)
83                 /* Already mapped */
84                 return;
85
86         for (id = 0; id < dst->lags_len; id++) {
87                 if (!dsa_lag_dev(dst, id)) {
88                         dst->lags[id] = lag;
89                         return;
90                 }
91         }
92
93         /* No IDs left, which is OK. Some drivers do not need it. The
94          * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
95          * returns an error for this device when joining the LAG. The
96          * driver can then return -EOPNOTSUPP back to DSA, which will
97          * fall back to a software LAG.
98          */
99 }
100
101 /**
102  * dsa_lag_unmap() - Remove a LAG ID mapping
103  * @dst: Tree in which the mapping is recorded.
104  * @lag: Netdev that was mapped.
105  *
106  * As there may be multiple users of the mapping, it is only removed
107  * if there are no other references to it.
108  */
109 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
110 {
111         struct dsa_port *dp;
112         unsigned int id;
113
114         dsa_lag_foreach_port(dp, dst, lag)
115                 /* There are remaining users of this mapping */
116                 return;
117
118         dsa_lags_foreach_id(id, dst) {
119                 if (dsa_lag_dev(dst, id) == lag) {
120                         dst->lags[id] = NULL;
121                         break;
122                 }
123         }
124 }
125
126 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
127 {
128         struct dsa_switch_tree *dst;
129         struct dsa_port *dp;
130
131         list_for_each_entry(dst, &dsa_tree_list, list) {
132                 if (dst->index != tree_index)
133                         continue;
134
135                 list_for_each_entry(dp, &dst->ports, list) {
136                         if (dp->ds->index != sw_index)
137                                 continue;
138
139                         return dp->ds;
140                 }
141         }
142
143         return NULL;
144 }
145 EXPORT_SYMBOL_GPL(dsa_switch_find);
146
147 static struct dsa_switch_tree *dsa_tree_find(int index)
148 {
149         struct dsa_switch_tree *dst;
150
151         list_for_each_entry(dst, &dsa_tree_list, list)
152                 if (dst->index == index)
153                         return dst;
154
155         return NULL;
156 }
157
158 static struct dsa_switch_tree *dsa_tree_alloc(int index)
159 {
160         struct dsa_switch_tree *dst;
161
162         dst = kzalloc(sizeof(*dst), GFP_KERNEL);
163         if (!dst)
164                 return NULL;
165
166         dst->index = index;
167
168         INIT_LIST_HEAD(&dst->rtable);
169
170         INIT_LIST_HEAD(&dst->ports);
171
172         INIT_LIST_HEAD(&dst->list);
173         list_add_tail(&dst->list, &dsa_tree_list);
174
175         kref_init(&dst->refcount);
176
177         return dst;
178 }
179
180 static void dsa_tree_free(struct dsa_switch_tree *dst)
181 {
182         if (dst->tag_ops)
183                 dsa_tag_driver_put(dst->tag_ops);
184         list_del(&dst->list);
185         kfree(dst);
186 }
187
188 static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
189 {
190         if (dst)
191                 kref_get(&dst->refcount);
192
193         return dst;
194 }
195
196 static struct dsa_switch_tree *dsa_tree_touch(int index)
197 {
198         struct dsa_switch_tree *dst;
199
200         dst = dsa_tree_find(index);
201         if (dst)
202                 return dsa_tree_get(dst);
203         else
204                 return dsa_tree_alloc(index);
205 }
206
207 static void dsa_tree_release(struct kref *ref)
208 {
209         struct dsa_switch_tree *dst;
210
211         dst = container_of(ref, struct dsa_switch_tree, refcount);
212
213         dsa_tree_free(dst);
214 }
215
216 static void dsa_tree_put(struct dsa_switch_tree *dst)
217 {
218         if (dst)
219                 kref_put(&dst->refcount, dsa_tree_release);
220 }
221
222 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
223                                                    struct device_node *dn)
224 {
225         struct dsa_port *dp;
226
227         list_for_each_entry(dp, &dst->ports, list)
228                 if (dp->dn == dn)
229                         return dp;
230
231         return NULL;
232 }
233
234 static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
235                                        struct dsa_port *link_dp)
236 {
237         struct dsa_switch *ds = dp->ds;
238         struct dsa_switch_tree *dst;
239         struct dsa_link *dl;
240
241         dst = ds->dst;
242
243         list_for_each_entry(dl, &dst->rtable, list)
244                 if (dl->dp == dp && dl->link_dp == link_dp)
245                         return dl;
246
247         dl = kzalloc(sizeof(*dl), GFP_KERNEL);
248         if (!dl)
249                 return NULL;
250
251         dl->dp = dp;
252         dl->link_dp = link_dp;
253
254         INIT_LIST_HEAD(&dl->list);
255         list_add_tail(&dl->list, &dst->rtable);
256
257         return dl;
258 }
259
260 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
261 {
262         struct dsa_switch *ds = dp->ds;
263         struct dsa_switch_tree *dst = ds->dst;
264         struct device_node *dn = dp->dn;
265         struct of_phandle_iterator it;
266         struct dsa_port *link_dp;
267         struct dsa_link *dl;
268         int err;
269
270         of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
271                 link_dp = dsa_tree_find_port_by_node(dst, it.node);
272                 if (!link_dp) {
273                         of_node_put(it.node);
274                         return false;
275                 }
276
277                 dl = dsa_link_touch(dp, link_dp);
278                 if (!dl) {
279                         of_node_put(it.node);
280                         return false;
281                 }
282         }
283
284         return true;
285 }
286
287 static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
288 {
289         bool complete = true;
290         struct dsa_port *dp;
291
292         list_for_each_entry(dp, &dst->ports, list) {
293                 if (dsa_port_is_dsa(dp)) {
294                         complete = dsa_port_setup_routing_table(dp);
295                         if (!complete)
296                                 break;
297                 }
298         }
299
300         return complete;
301 }
302
303 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
304 {
305         struct dsa_port *dp;
306
307         list_for_each_entry(dp, &dst->ports, list)
308                 if (dsa_port_is_cpu(dp))
309                         return dp;
310
311         return NULL;
312 }
313
314 static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
315 {
316         struct dsa_port *cpu_dp, *dp;
317
318         cpu_dp = dsa_tree_find_first_cpu(dst);
319         if (!cpu_dp) {
320                 pr_err("DSA: tree %d has no CPU port\n", dst->index);
321                 return -EINVAL;
322         }
323
324         /* Assign the default CPU port to all ports of the fabric */
325         list_for_each_entry(dp, &dst->ports, list)
326                 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
327                         dp->cpu_dp = cpu_dp;
328
329         return 0;
330 }
331
332 static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
333 {
334         struct dsa_port *dp;
335
336         list_for_each_entry(dp, &dst->ports, list)
337                 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
338                         dp->cpu_dp = NULL;
339 }
340
341 static int dsa_port_setup(struct dsa_port *dp)
342 {
343         struct devlink_port *dlp = &dp->devlink_port;
344         bool dsa_port_link_registered = false;
345         bool dsa_port_enabled = false;
346         int err = 0;
347
348         if (dp->setup)
349                 return 0;
350
351         INIT_LIST_HEAD(&dp->fdbs);
352         INIT_LIST_HEAD(&dp->mdbs);
353
354         switch (dp->type) {
355         case DSA_PORT_TYPE_UNUSED:
356                 dsa_port_disable(dp);
357                 break;
358         case DSA_PORT_TYPE_CPU:
359                 err = dsa_port_link_register_of(dp);
360                 if (err)
361                         break;
362                 dsa_port_link_registered = true;
363
364                 err = dsa_port_enable(dp, NULL);
365                 if (err)
366                         break;
367                 dsa_port_enabled = true;
368
369                 break;
370         case DSA_PORT_TYPE_DSA:
371                 err = dsa_port_link_register_of(dp);
372                 if (err)
373                         break;
374                 dsa_port_link_registered = true;
375
376                 err = dsa_port_enable(dp, NULL);
377                 if (err)
378                         break;
379                 dsa_port_enabled = true;
380
381                 break;
382         case DSA_PORT_TYPE_USER:
383                 of_get_mac_address(dp->dn, dp->mac);
384                 err = dsa_slave_create(dp);
385                 if (err)
386                         break;
387
388                 devlink_port_type_eth_set(dlp, dp->slave);
389                 break;
390         }
391
392         if (err && dsa_port_enabled)
393                 dsa_port_disable(dp);
394         if (err && dsa_port_link_registered)
395                 dsa_port_link_unregister_of(dp);
396         if (err)
397                 return err;
398
399         dp->setup = true;
400
401         return 0;
402 }
403
404 static int dsa_port_devlink_setup(struct dsa_port *dp)
405 {
406         struct devlink_port *dlp = &dp->devlink_port;
407         struct dsa_switch_tree *dst = dp->ds->dst;
408         struct devlink_port_attrs attrs = {};
409         struct devlink *dl = dp->ds->devlink;
410         const unsigned char *id;
411         unsigned char len;
412         int err;
413
414         id = (const unsigned char *)&dst->index;
415         len = sizeof(dst->index);
416
417         attrs.phys.port_number = dp->index;
418         memcpy(attrs.switch_id.id, id, len);
419         attrs.switch_id.id_len = len;
420         memset(dlp, 0, sizeof(*dlp));
421
422         switch (dp->type) {
423         case DSA_PORT_TYPE_UNUSED:
424                 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
425                 break;
426         case DSA_PORT_TYPE_CPU:
427                 attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
428                 break;
429         case DSA_PORT_TYPE_DSA:
430                 attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
431                 break;
432         case DSA_PORT_TYPE_USER:
433                 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
434                 break;
435         }
436
437         devlink_port_attrs_set(dlp, &attrs);
438         err = devlink_port_register(dl, dlp, dp->index);
439
440         if (!err)
441                 dp->devlink_port_setup = true;
442
443         return err;
444 }
445
446 static void dsa_port_teardown(struct dsa_port *dp)
447 {
448         struct devlink_port *dlp = &dp->devlink_port;
449         struct dsa_mac_addr *a, *tmp;
450
451         if (!dp->setup)
452                 return;
453
454         devlink_port_type_clear(dlp);
455
456         switch (dp->type) {
457         case DSA_PORT_TYPE_UNUSED:
458                 break;
459         case DSA_PORT_TYPE_CPU:
460                 dsa_port_disable(dp);
461                 dsa_port_link_unregister_of(dp);
462                 break;
463         case DSA_PORT_TYPE_DSA:
464                 dsa_port_disable(dp);
465                 dsa_port_link_unregister_of(dp);
466                 break;
467         case DSA_PORT_TYPE_USER:
468                 if (dp->slave) {
469                         dsa_slave_destroy(dp->slave);
470                         dp->slave = NULL;
471                 }
472                 break;
473         }
474
475         list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
476                 list_del(&a->list);
477                 kfree(a);
478         }
479
480         list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
481                 list_del(&a->list);
482                 kfree(a);
483         }
484
485         dp->setup = false;
486 }
487
488 static void dsa_port_devlink_teardown(struct dsa_port *dp)
489 {
490         struct devlink_port *dlp = &dp->devlink_port;
491
492         if (dp->devlink_port_setup)
493                 devlink_port_unregister(dlp);
494         dp->devlink_port_setup = false;
495 }
496
497 static int dsa_devlink_info_get(struct devlink *dl,
498                                 struct devlink_info_req *req,
499                                 struct netlink_ext_ack *extack)
500 {
501         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
502
503         if (ds->ops->devlink_info_get)
504                 return ds->ops->devlink_info_get(ds, req, extack);
505
506         return -EOPNOTSUPP;
507 }
508
509 static int dsa_devlink_sb_pool_get(struct devlink *dl,
510                                    unsigned int sb_index, u16 pool_index,
511                                    struct devlink_sb_pool_info *pool_info)
512 {
513         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
514
515         if (!ds->ops->devlink_sb_pool_get)
516                 return -EOPNOTSUPP;
517
518         return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
519                                             pool_info);
520 }
521
522 static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
523                                    u16 pool_index, u32 size,
524                                    enum devlink_sb_threshold_type threshold_type,
525                                    struct netlink_ext_ack *extack)
526 {
527         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
528
529         if (!ds->ops->devlink_sb_pool_set)
530                 return -EOPNOTSUPP;
531
532         return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
533                                             threshold_type, extack);
534 }
535
536 static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
537                                         unsigned int sb_index, u16 pool_index,
538                                         u32 *p_threshold)
539 {
540         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
541         int port = dsa_devlink_port_to_port(dlp);
542
543         if (!ds->ops->devlink_sb_port_pool_get)
544                 return -EOPNOTSUPP;
545
546         return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
547                                                  pool_index, p_threshold);
548 }
549
550 static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
551                                         unsigned int sb_index, u16 pool_index,
552                                         u32 threshold,
553                                         struct netlink_ext_ack *extack)
554 {
555         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
556         int port = dsa_devlink_port_to_port(dlp);
557
558         if (!ds->ops->devlink_sb_port_pool_set)
559                 return -EOPNOTSUPP;
560
561         return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
562                                                  pool_index, threshold, extack);
563 }
564
565 static int
566 dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
567                                 unsigned int sb_index, u16 tc_index,
568                                 enum devlink_sb_pool_type pool_type,
569                                 u16 *p_pool_index, u32 *p_threshold)
570 {
571         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
572         int port = dsa_devlink_port_to_port(dlp);
573
574         if (!ds->ops->devlink_sb_tc_pool_bind_get)
575                 return -EOPNOTSUPP;
576
577         return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
578                                                     tc_index, pool_type,
579                                                     p_pool_index, p_threshold);
580 }
581
582 static int
583 dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
584                                 unsigned int sb_index, u16 tc_index,
585                                 enum devlink_sb_pool_type pool_type,
586                                 u16 pool_index, u32 threshold,
587                                 struct netlink_ext_ack *extack)
588 {
589         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
590         int port = dsa_devlink_port_to_port(dlp);
591
592         if (!ds->ops->devlink_sb_tc_pool_bind_set)
593                 return -EOPNOTSUPP;
594
595         return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
596                                                     tc_index, pool_type,
597                                                     pool_index, threshold,
598                                                     extack);
599 }
600
601 static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
602                                        unsigned int sb_index)
603 {
604         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
605
606         if (!ds->ops->devlink_sb_occ_snapshot)
607                 return -EOPNOTSUPP;
608
609         return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
610 }
611
612 static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
613                                         unsigned int sb_index)
614 {
615         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
616
617         if (!ds->ops->devlink_sb_occ_max_clear)
618                 return -EOPNOTSUPP;
619
620         return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
621 }
622
623 static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
624                                             unsigned int sb_index,
625                                             u16 pool_index, u32 *p_cur,
626                                             u32 *p_max)
627 {
628         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
629         int port = dsa_devlink_port_to_port(dlp);
630
631         if (!ds->ops->devlink_sb_occ_port_pool_get)
632                 return -EOPNOTSUPP;
633
634         return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
635                                                      pool_index, p_cur, p_max);
636 }
637
638 static int
639 dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
640                                     unsigned int sb_index, u16 tc_index,
641                                     enum devlink_sb_pool_type pool_type,
642                                     u32 *p_cur, u32 *p_max)
643 {
644         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
645         int port = dsa_devlink_port_to_port(dlp);
646
647         if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
648                 return -EOPNOTSUPP;
649
650         return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
651                                                         sb_index, tc_index,
652                                                         pool_type, p_cur,
653                                                         p_max);
654 }
655
656 static const struct devlink_ops dsa_devlink_ops = {
657         .info_get                       = dsa_devlink_info_get,
658         .sb_pool_get                    = dsa_devlink_sb_pool_get,
659         .sb_pool_set                    = dsa_devlink_sb_pool_set,
660         .sb_port_pool_get               = dsa_devlink_sb_port_pool_get,
661         .sb_port_pool_set               = dsa_devlink_sb_port_pool_set,
662         .sb_tc_pool_bind_get            = dsa_devlink_sb_tc_pool_bind_get,
663         .sb_tc_pool_bind_set            = dsa_devlink_sb_tc_pool_bind_set,
664         .sb_occ_snapshot                = dsa_devlink_sb_occ_snapshot,
665         .sb_occ_max_clear               = dsa_devlink_sb_occ_max_clear,
666         .sb_occ_port_pool_get           = dsa_devlink_sb_occ_port_pool_get,
667         .sb_occ_tc_port_bind_get        = dsa_devlink_sb_occ_tc_port_bind_get,
668 };
669
670 static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
671 {
672         const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
673         struct dsa_switch_tree *dst = ds->dst;
674         int port, err;
675
676         if (tag_ops->proto == dst->default_proto)
677                 return 0;
678
679         for (port = 0; port < ds->num_ports; port++) {
680                 if (!dsa_is_cpu_port(ds, port))
681                         continue;
682
683                 err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
684                 if (err) {
685                         dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
686                                 tag_ops->name, ERR_PTR(err));
687                         return err;
688                 }
689         }
690
691         return 0;
692 }
693
694 static int dsa_switch_setup(struct dsa_switch *ds)
695 {
696         struct dsa_devlink_priv *dl_priv;
697         struct dsa_port *dp;
698         int err;
699
700         if (ds->setup)
701                 return 0;
702
703         /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
704          * driver and before ops->setup() has run, since the switch drivers and
705          * the slave MDIO bus driver rely on these values for probing PHY
706          * devices or not
707          */
708         ds->phys_mii_mask |= dsa_user_ports(ds);
709
710         /* Add the switch to devlink before calling setup, so that setup can
711          * add dpipe tables
712          */
713         ds->devlink = devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv));
714         if (!ds->devlink)
715                 return -ENOMEM;
716         dl_priv = devlink_priv(ds->devlink);
717         dl_priv->ds = ds;
718
719         err = devlink_register(ds->devlink, ds->dev);
720         if (err)
721                 goto free_devlink;
722
723         /* Setup devlink port instances now, so that the switch
724          * setup() can register regions etc, against the ports
725          */
726         list_for_each_entry(dp, &ds->dst->ports, list) {
727                 if (dp->ds == ds) {
728                         err = dsa_port_devlink_setup(dp);
729                         if (err)
730                                 goto unregister_devlink_ports;
731                 }
732         }
733
734         err = dsa_switch_register_notifier(ds);
735         if (err)
736                 goto unregister_devlink_ports;
737
738         ds->configure_vlan_while_not_filtering = true;
739
740         err = ds->ops->setup(ds);
741         if (err < 0)
742                 goto unregister_notifier;
743
744         err = dsa_switch_setup_tag_protocol(ds);
745         if (err)
746                 goto teardown;
747
748         devlink_params_publish(ds->devlink);
749
750         if (!ds->slave_mii_bus && ds->ops->phy_read) {
751                 ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
752                 if (!ds->slave_mii_bus) {
753                         err = -ENOMEM;
754                         goto teardown;
755                 }
756
757                 dsa_slave_mii_bus_init(ds);
758
759                 err = mdiobus_register(ds->slave_mii_bus);
760                 if (err < 0)
761                         goto teardown;
762         }
763
764         ds->setup = true;
765
766         return 0;
767
768 teardown:
769         if (ds->ops->teardown)
770                 ds->ops->teardown(ds);
771 unregister_notifier:
772         dsa_switch_unregister_notifier(ds);
773 unregister_devlink_ports:
774         list_for_each_entry(dp, &ds->dst->ports, list)
775                 if (dp->ds == ds)
776                         dsa_port_devlink_teardown(dp);
777         devlink_unregister(ds->devlink);
778 free_devlink:
779         devlink_free(ds->devlink);
780         ds->devlink = NULL;
781
782         return err;
783 }
784
785 static void dsa_switch_teardown(struct dsa_switch *ds)
786 {
787         struct dsa_port *dp;
788
789         if (!ds->setup)
790                 return;
791
792         if (ds->slave_mii_bus && ds->ops->phy_read)
793                 mdiobus_unregister(ds->slave_mii_bus);
794
795         dsa_switch_unregister_notifier(ds);
796
797         if (ds->ops->teardown)
798                 ds->ops->teardown(ds);
799
800         if (ds->devlink) {
801                 list_for_each_entry(dp, &ds->dst->ports, list)
802                         if (dp->ds == ds)
803                                 dsa_port_devlink_teardown(dp);
804                 devlink_unregister(ds->devlink);
805                 devlink_free(ds->devlink);
806                 ds->devlink = NULL;
807         }
808
809         ds->setup = false;
810 }
811
812 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
813 {
814         struct dsa_port *dp;
815         int err;
816
817         list_for_each_entry(dp, &dst->ports, list) {
818                 err = dsa_switch_setup(dp->ds);
819                 if (err)
820                         goto teardown;
821         }
822
823         list_for_each_entry(dp, &dst->ports, list) {
824                 err = dsa_port_setup(dp);
825                 if (err) {
826                         dsa_port_devlink_teardown(dp);
827                         dp->type = DSA_PORT_TYPE_UNUSED;
828                         err = dsa_port_devlink_setup(dp);
829                         if (err)
830                                 goto teardown;
831                         continue;
832                 }
833         }
834
835         return 0;
836
837 teardown:
838         list_for_each_entry(dp, &dst->ports, list)
839                 dsa_port_teardown(dp);
840
841         list_for_each_entry(dp, &dst->ports, list)
842                 dsa_switch_teardown(dp->ds);
843
844         return err;
845 }
846
847 static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
848 {
849         struct dsa_port *dp;
850
851         list_for_each_entry(dp, &dst->ports, list)
852                 dsa_port_teardown(dp);
853
854         list_for_each_entry(dp, &dst->ports, list)
855                 dsa_switch_teardown(dp->ds);
856 }
857
858 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
859 {
860         struct dsa_port *dp;
861         int err;
862
863         list_for_each_entry(dp, &dst->ports, list) {
864                 if (dsa_port_is_cpu(dp)) {
865                         err = dsa_master_setup(dp->master, dp);
866                         if (err)
867                                 return err;
868                 }
869         }
870
871         return 0;
872 }
873
874 static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
875 {
876         struct dsa_port *dp;
877
878         list_for_each_entry(dp, &dst->ports, list)
879                 if (dsa_port_is_cpu(dp))
880                         dsa_master_teardown(dp->master);
881 }
882
883 static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
884 {
885         unsigned int len = 0;
886         struct dsa_port *dp;
887
888         list_for_each_entry(dp, &dst->ports, list) {
889                 if (dp->ds->num_lag_ids > len)
890                         len = dp->ds->num_lag_ids;
891         }
892
893         if (!len)
894                 return 0;
895
896         dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
897         if (!dst->lags)
898                 return -ENOMEM;
899
900         dst->lags_len = len;
901         return 0;
902 }
903
904 static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
905 {
906         kfree(dst->lags);
907 }
908
909 static int dsa_tree_setup(struct dsa_switch_tree *dst)
910 {
911         bool complete;
912         int err;
913
914         if (dst->setup) {
915                 pr_err("DSA: tree %d already setup! Disjoint trees?\n",
916                        dst->index);
917                 return -EEXIST;
918         }
919
920         complete = dsa_tree_setup_routing_table(dst);
921         if (!complete)
922                 return 0;
923
924         err = dsa_tree_setup_default_cpu(dst);
925         if (err)
926                 return err;
927
928         err = dsa_tree_setup_switches(dst);
929         if (err)
930                 goto teardown_default_cpu;
931
932         err = dsa_tree_setup_master(dst);
933         if (err)
934                 goto teardown_switches;
935
936         err = dsa_tree_setup_lags(dst);
937         if (err)
938                 goto teardown_master;
939
940         dst->setup = true;
941
942         pr_info("DSA: tree %d setup\n", dst->index);
943
944         return 0;
945
946 teardown_master:
947         dsa_tree_teardown_master(dst);
948 teardown_switches:
949         dsa_tree_teardown_switches(dst);
950 teardown_default_cpu:
951         dsa_tree_teardown_default_cpu(dst);
952
953         return err;
954 }
955
956 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
957 {
958         struct dsa_link *dl, *next;
959
960         if (!dst->setup)
961                 return;
962
963         dsa_tree_teardown_lags(dst);
964
965         dsa_tree_teardown_master(dst);
966
967         dsa_tree_teardown_switches(dst);
968
969         dsa_tree_teardown_default_cpu(dst);
970
971         list_for_each_entry_safe(dl, next, &dst->rtable, list) {
972                 list_del(&dl->list);
973                 kfree(dl);
974         }
975
976         pr_info("DSA: tree %d torn down\n", dst->index);
977
978         dst->setup = false;
979 }
980
981 /* Since the dsa/tagging sysfs device attribute is per master, the assumption
982  * is that all DSA switches within a tree share the same tagger, otherwise
983  * they would have formed disjoint trees (different "dsa,member" values).
984  */
985 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
986                               struct net_device *master,
987                               const struct dsa_device_ops *tag_ops,
988                               const struct dsa_device_ops *old_tag_ops)
989 {
990         struct dsa_notifier_tag_proto_info info;
991         struct dsa_port *dp;
992         int err = -EBUSY;
993
994         if (!rtnl_trylock())
995                 return restart_syscall();
996
997         /* At the moment we don't allow changing the tag protocol under
998          * traffic. The rtnl_mutex also happens to serialize concurrent
999          * attempts to change the tagging protocol. If we ever lift the IFF_UP
1000          * restriction, there needs to be another mutex which serializes this.
1001          */
1002         if (master->flags & IFF_UP)
1003                 goto out_unlock;
1004
1005         list_for_each_entry(dp, &dst->ports, list) {
1006                 if (!dsa_is_user_port(dp->ds, dp->index))
1007                         continue;
1008
1009                 if (dp->slave->flags & IFF_UP)
1010                         goto out_unlock;
1011         }
1012
1013         info.tag_ops = tag_ops;
1014         err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1015         if (err)
1016                 goto out_unwind_tagger;
1017
1018         dst->tag_ops = tag_ops;
1019
1020         rtnl_unlock();
1021
1022         return 0;
1023
1024 out_unwind_tagger:
1025         info.tag_ops = old_tag_ops;
1026         dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1027 out_unlock:
1028         rtnl_unlock();
1029         return err;
1030 }
1031
1032 static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1033 {
1034         struct dsa_switch_tree *dst = ds->dst;
1035         struct dsa_port *dp;
1036
1037         list_for_each_entry(dp, &dst->ports, list)
1038                 if (dp->ds == ds && dp->index == index)
1039                         return dp;
1040
1041         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1042         if (!dp)
1043                 return NULL;
1044
1045         dp->ds = ds;
1046         dp->index = index;
1047
1048         INIT_LIST_HEAD(&dp->list);
1049         list_add_tail(&dp->list, &dst->ports);
1050
1051         return dp;
1052 }
1053
1054 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1055 {
1056         if (!name)
1057                 name = "eth%d";
1058
1059         dp->type = DSA_PORT_TYPE_USER;
1060         dp->name = name;
1061
1062         return 0;
1063 }
1064
1065 static int dsa_port_parse_dsa(struct dsa_port *dp)
1066 {
1067         dp->type = DSA_PORT_TYPE_DSA;
1068
1069         return 0;
1070 }
1071
1072 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1073                                                   struct net_device *master)
1074 {
1075         enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1076         struct dsa_switch *mds, *ds = dp->ds;
1077         unsigned int mdp_upstream;
1078         struct dsa_port *mdp;
1079
1080         /* It is possible to stack DSA switches onto one another when that
1081          * happens the switch driver may want to know if its tagging protocol
1082          * is going to work in such a configuration.
1083          */
1084         if (dsa_slave_dev_check(master)) {
1085                 mdp = dsa_slave_to_port(master);
1086                 mds = mdp->ds;
1087                 mdp_upstream = dsa_upstream_port(mds, mdp->index);
1088                 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1089                                                           DSA_TAG_PROTO_NONE);
1090         }
1091
1092         /* If the master device is not itself a DSA slave in a disjoint DSA
1093          * tree, then return immediately.
1094          */
1095         return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1096 }
1097
1098 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1099                               const char *user_protocol)
1100 {
1101         struct dsa_switch *ds = dp->ds;
1102         struct dsa_switch_tree *dst = ds->dst;
1103         const struct dsa_device_ops *tag_ops;
1104         enum dsa_tag_protocol default_proto;
1105
1106         /* Find out which protocol the switch would prefer. */
1107         default_proto = dsa_get_tag_protocol(dp, master);
1108         if (dst->default_proto) {
1109                 if (dst->default_proto != default_proto) {
1110                         dev_err(ds->dev,
1111                                 "A DSA switch tree can have only one tagging protocol\n");
1112                         return -EINVAL;
1113                 }
1114         } else {
1115                 dst->default_proto = default_proto;
1116         }
1117
1118         /* See if the user wants to override that preference. */
1119         if (user_protocol) {
1120                 if (!ds->ops->change_tag_protocol) {
1121                         dev_err(ds->dev, "Tag protocol cannot be modified\n");
1122                         return -EINVAL;
1123                 }
1124
1125                 tag_ops = dsa_find_tagger_by_name(user_protocol);
1126         } else {
1127                 tag_ops = dsa_tag_driver_get(default_proto);
1128         }
1129
1130         if (IS_ERR(tag_ops)) {
1131                 if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1132                         return -EPROBE_DEFER;
1133
1134                 dev_warn(ds->dev, "No tagger for this switch\n");
1135                 return PTR_ERR(tag_ops);
1136         }
1137
1138         if (dst->tag_ops) {
1139                 if (dst->tag_ops != tag_ops) {
1140                         dev_err(ds->dev,
1141                                 "A DSA switch tree can have only one tagging protocol\n");
1142
1143                         dsa_tag_driver_put(tag_ops);
1144                         return -EINVAL;
1145                 }
1146
1147                 /* In the case of multiple CPU ports per switch, the tagging
1148                  * protocol is still reference-counted only per switch tree.
1149                  */
1150                 dsa_tag_driver_put(tag_ops);
1151         } else {
1152                 dst->tag_ops = tag_ops;
1153         }
1154
1155         dp->master = master;
1156         dp->type = DSA_PORT_TYPE_CPU;
1157         dsa_port_set_tag_protocol(dp, dst->tag_ops);
1158         dp->dst = dst;
1159
1160         /* At this point, the tree may be configured to use a different
1161          * tagger than the one chosen by the switch driver during
1162          * .setup, in the case when a user selects a custom protocol
1163          * through the DT.
1164          *
1165          * This is resolved by syncing the driver with the tree in
1166          * dsa_switch_setup_tag_protocol once .setup has run and the
1167          * driver is ready to accept calls to .change_tag_protocol. If
1168          * the driver does not support the custom protocol at that
1169          * point, the tree is wholly rejected, thereby ensuring that the
1170          * tree and driver are always in agreement on the protocol to
1171          * use.
1172          */
1173         return 0;
1174 }
1175
1176 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1177 {
1178         struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1179         const char *name = of_get_property(dn, "label", NULL);
1180         bool link = of_property_read_bool(dn, "link");
1181
1182         dp->dn = dn;
1183
1184         if (ethernet) {
1185                 struct net_device *master;
1186                 const char *user_protocol;
1187
1188                 master = of_find_net_device_by_node(ethernet);
1189                 if (!master)
1190                         return -EPROBE_DEFER;
1191
1192                 user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1193                 return dsa_port_parse_cpu(dp, master, user_protocol);
1194         }
1195
1196         if (link)
1197                 return dsa_port_parse_dsa(dp);
1198
1199         return dsa_port_parse_user(dp, name);
1200 }
1201
1202 static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1203                                      struct device_node *dn)
1204 {
1205         struct device_node *ports, *port;
1206         struct dsa_port *dp;
1207         int err = 0;
1208         u32 reg;
1209
1210         ports = of_get_child_by_name(dn, "ports");
1211         if (!ports) {
1212                 /* The second possibility is "ethernet-ports" */
1213                 ports = of_get_child_by_name(dn, "ethernet-ports");
1214                 if (!ports) {
1215                         dev_err(ds->dev, "no ports child node found\n");
1216                         return -EINVAL;
1217                 }
1218         }
1219
1220         for_each_available_child_of_node(ports, port) {
1221                 err = of_property_read_u32(port, "reg", &reg);
1222                 if (err)
1223                         goto out_put_node;
1224
1225                 if (reg >= ds->num_ports) {
1226                         dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
1227                                 port, reg, ds->num_ports);
1228                         err = -EINVAL;
1229                         goto out_put_node;
1230                 }
1231
1232                 dp = dsa_to_port(ds, reg);
1233
1234                 err = dsa_port_parse_of(dp, port);
1235                 if (err)
1236                         goto out_put_node;
1237         }
1238
1239 out_put_node:
1240         of_node_put(ports);
1241         return err;
1242 }
1243
1244 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1245                                       struct device_node *dn)
1246 {
1247         u32 m[2] = { 0, 0 };
1248         int sz;
1249
1250         /* Don't error out if this optional property isn't found */
1251         sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1252         if (sz < 0 && sz != -EINVAL)
1253                 return sz;
1254
1255         ds->index = m[1];
1256
1257         ds->dst = dsa_tree_touch(m[0]);
1258         if (!ds->dst)
1259                 return -ENOMEM;
1260
1261         if (dsa_switch_find(ds->dst->index, ds->index)) {
1262                 dev_err(ds->dev,
1263                         "A DSA switch with index %d already exists in tree %d\n",
1264                         ds->index, ds->dst->index);
1265                 return -EEXIST;
1266         }
1267
1268         return 0;
1269 }
1270
1271 static int dsa_switch_touch_ports(struct dsa_switch *ds)
1272 {
1273         struct dsa_port *dp;
1274         int port;
1275
1276         for (port = 0; port < ds->num_ports; port++) {
1277                 dp = dsa_port_touch(ds, port);
1278                 if (!dp)
1279                         return -ENOMEM;
1280         }
1281
1282         return 0;
1283 }
1284
1285 static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1286 {
1287         int err;
1288
1289         err = dsa_switch_parse_member_of(ds, dn);
1290         if (err)
1291                 return err;
1292
1293         err = dsa_switch_touch_ports(ds);
1294         if (err)
1295                 return err;
1296
1297         return dsa_switch_parse_ports_of(ds, dn);
1298 }
1299
1300 static int dsa_port_parse(struct dsa_port *dp, const char *name,
1301                           struct device *dev)
1302 {
1303         if (!strcmp(name, "cpu")) {
1304                 struct net_device *master;
1305
1306                 master = dsa_dev_to_net_device(dev);
1307                 if (!master)
1308                         return -EPROBE_DEFER;
1309
1310                 dev_put(master);
1311
1312                 return dsa_port_parse_cpu(dp, master, NULL);
1313         }
1314
1315         if (!strcmp(name, "dsa"))
1316                 return dsa_port_parse_dsa(dp);
1317
1318         return dsa_port_parse_user(dp, name);
1319 }
1320
1321 static int dsa_switch_parse_ports(struct dsa_switch *ds,
1322                                   struct dsa_chip_data *cd)
1323 {
1324         bool valid_name_found = false;
1325         struct dsa_port *dp;
1326         struct device *dev;
1327         const char *name;
1328         unsigned int i;
1329         int err;
1330
1331         for (i = 0; i < DSA_MAX_PORTS; i++) {
1332                 name = cd->port_names[i];
1333                 dev = cd->netdev[i];
1334                 dp = dsa_to_port(ds, i);
1335
1336                 if (!name)
1337                         continue;
1338
1339                 err = dsa_port_parse(dp, name, dev);
1340                 if (err)
1341                         return err;
1342
1343                 valid_name_found = true;
1344         }
1345
1346         if (!valid_name_found && i == DSA_MAX_PORTS)
1347                 return -EINVAL;
1348
1349         return 0;
1350 }
1351
1352 static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1353 {
1354         int err;
1355
1356         ds->cd = cd;
1357
1358         /* We don't support interconnected switches nor multiple trees via
1359          * platform data, so this is the unique switch of the tree.
1360          */
1361         ds->index = 0;
1362         ds->dst = dsa_tree_touch(0);
1363         if (!ds->dst)
1364                 return -ENOMEM;
1365
1366         err = dsa_switch_touch_ports(ds);
1367         if (err)
1368                 return err;
1369
1370         return dsa_switch_parse_ports(ds, cd);
1371 }
1372
1373 static void dsa_switch_release_ports(struct dsa_switch *ds)
1374 {
1375         struct dsa_switch_tree *dst = ds->dst;
1376         struct dsa_port *dp, *next;
1377
1378         list_for_each_entry_safe(dp, next, &dst->ports, list) {
1379                 if (dp->ds != ds)
1380                         continue;
1381                 list_del(&dp->list);
1382                 kfree(dp);
1383         }
1384 }
1385
1386 static int dsa_switch_probe(struct dsa_switch *ds)
1387 {
1388         struct dsa_switch_tree *dst;
1389         struct dsa_chip_data *pdata;
1390         struct device_node *np;
1391         int err;
1392
1393         if (!ds->dev)
1394                 return -ENODEV;
1395
1396         pdata = ds->dev->platform_data;
1397         np = ds->dev->of_node;
1398
1399         if (!ds->num_ports)
1400                 return -EINVAL;
1401
1402         if (np) {
1403                 err = dsa_switch_parse_of(ds, np);
1404                 if (err)
1405                         dsa_switch_release_ports(ds);
1406         } else if (pdata) {
1407                 err = dsa_switch_parse(ds, pdata);
1408                 if (err)
1409                         dsa_switch_release_ports(ds);
1410         } else {
1411                 err = -ENODEV;
1412         }
1413
1414         if (err)
1415                 return err;
1416
1417         dst = ds->dst;
1418         dsa_tree_get(dst);
1419         err = dsa_tree_setup(dst);
1420         if (err) {
1421                 dsa_switch_release_ports(ds);
1422                 dsa_tree_put(dst);
1423         }
1424
1425         return err;
1426 }
1427
1428 int dsa_register_switch(struct dsa_switch *ds)
1429 {
1430         int err;
1431
1432         mutex_lock(&dsa2_mutex);
1433         err = dsa_switch_probe(ds);
1434         dsa_tree_put(ds->dst);
1435         mutex_unlock(&dsa2_mutex);
1436
1437         return err;
1438 }
1439 EXPORT_SYMBOL_GPL(dsa_register_switch);
1440
1441 static void dsa_switch_remove(struct dsa_switch *ds)
1442 {
1443         struct dsa_switch_tree *dst = ds->dst;
1444
1445         dsa_tree_teardown(dst);
1446         dsa_switch_release_ports(ds);
1447         dsa_tree_put(dst);
1448 }
1449
1450 void dsa_unregister_switch(struct dsa_switch *ds)
1451 {
1452         mutex_lock(&dsa2_mutex);
1453         dsa_switch_remove(ds);
1454         mutex_unlock(&dsa2_mutex);
1455 }
1456 EXPORT_SYMBOL_GPL(dsa_unregister_switch);