net: dsa: don't fast age bridge ports with learning turned off
[linux-2.6-microblaze.git] / net / dsa / port.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch port
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *      Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13
14 #include "dsa_priv.h"
15
16 /**
17  * dsa_port_notify - Notify the switching fabric of changes to a port
18  * @dp: port on which change occurred
19  * @e: event, must be of type DSA_NOTIFIER_*
20  * @v: event-specific value.
21  *
22  * Notify all switches in the DSA tree that this port's switch belongs to,
23  * including this switch itself, of an event. Allows the other switches to
24  * reconfigure themselves for cross-chip operations. Can also be used to
25  * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26  * a user port's state changes.
27  */
28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29 {
30         return dsa_tree_notify(dp->ds->dst, e, v);
31 }
32
33 static void dsa_port_fast_age(const struct dsa_port *dp)
34 {
35         struct dsa_switch *ds = dp->ds;
36
37         if (!ds->ops->port_fast_age)
38                 return;
39
40         ds->ops->port_fast_age(ds, dp->index);
41 }
42
43 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age)
44 {
45         struct dsa_switch *ds = dp->ds;
46         int port = dp->index;
47
48         if (!ds->ops->port_stp_state_set)
49                 return -EOPNOTSUPP;
50
51         ds->ops->port_stp_state_set(ds, port, state);
52
53         if (do_fast_age && dp->learning) {
54                 /* Fast age FDB entries or flush appropriate forwarding database
55                  * for the given port, if we are moving it from Learning or
56                  * Forwarding state, to Disabled or Blocking or Listening state.
57                  * Ports that were standalone before the STP state change don't
58                  * need to fast age the FDB, since address learning is off in
59                  * standalone mode.
60                  */
61
62                 if ((dp->stp_state == BR_STATE_LEARNING ||
63                      dp->stp_state == BR_STATE_FORWARDING) &&
64                     (state == BR_STATE_DISABLED ||
65                      state == BR_STATE_BLOCKING ||
66                      state == BR_STATE_LISTENING))
67                         dsa_port_fast_age(dp);
68         }
69
70         dp->stp_state = state;
71
72         return 0;
73 }
74
75 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state,
76                                    bool do_fast_age)
77 {
78         int err;
79
80         err = dsa_port_set_state(dp, state, do_fast_age);
81         if (err)
82                 pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
83 }
84
85 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
86 {
87         struct dsa_switch *ds = dp->ds;
88         int port = dp->index;
89         int err;
90
91         if (ds->ops->port_enable) {
92                 err = ds->ops->port_enable(ds, port, phy);
93                 if (err)
94                         return err;
95         }
96
97         if (!dp->bridge_dev)
98                 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false);
99
100         if (dp->pl)
101                 phylink_start(dp->pl);
102
103         return 0;
104 }
105
106 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
107 {
108         int err;
109
110         rtnl_lock();
111         err = dsa_port_enable_rt(dp, phy);
112         rtnl_unlock();
113
114         return err;
115 }
116
117 void dsa_port_disable_rt(struct dsa_port *dp)
118 {
119         struct dsa_switch *ds = dp->ds;
120         int port = dp->index;
121
122         if (dp->pl)
123                 phylink_stop(dp->pl);
124
125         if (!dp->bridge_dev)
126                 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false);
127
128         if (ds->ops->port_disable)
129                 ds->ops->port_disable(ds, port);
130 }
131
132 void dsa_port_disable(struct dsa_port *dp)
133 {
134         rtnl_lock();
135         dsa_port_disable_rt(dp);
136         rtnl_unlock();
137 }
138
139 static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
140                                          struct netlink_ext_ack *extack)
141 {
142         const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
143                                    BR_BCAST_FLOOD;
144         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
145         int flag, err;
146
147         for_each_set_bit(flag, &mask, 32) {
148                 struct switchdev_brport_flags flags = {0};
149
150                 flags.mask = BIT(flag);
151
152                 if (br_port_flag_is_set(brport_dev, BIT(flag)))
153                         flags.val = BIT(flag);
154
155                 err = dsa_port_bridge_flags(dp, flags, extack);
156                 if (err && err != -EOPNOTSUPP)
157                         return err;
158         }
159
160         return 0;
161 }
162
163 static void dsa_port_clear_brport_flags(struct dsa_port *dp)
164 {
165         const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
166         const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
167                                    BR_BCAST_FLOOD;
168         int flag, err;
169
170         for_each_set_bit(flag, &mask, 32) {
171                 struct switchdev_brport_flags flags = {0};
172
173                 flags.mask = BIT(flag);
174                 flags.val = val & BIT(flag);
175
176                 err = dsa_port_bridge_flags(dp, flags, NULL);
177                 if (err && err != -EOPNOTSUPP)
178                         dev_err(dp->ds->dev,
179                                 "failed to clear bridge port flag %lu: %pe\n",
180                                 flags.val, ERR_PTR(err));
181         }
182 }
183
184 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
185                                          struct netlink_ext_ack *extack)
186 {
187         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
188         struct net_device *br = dp->bridge_dev;
189         int err;
190
191         err = dsa_port_inherit_brport_flags(dp, extack);
192         if (err)
193                 return err;
194
195         err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false);
196         if (err && err != -EOPNOTSUPP)
197                 return err;
198
199         err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack);
200         if (err && err != -EOPNOTSUPP)
201                 return err;
202
203         err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
204         if (err && err != -EOPNOTSUPP)
205                 return err;
206
207         return 0;
208 }
209
210 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
211 {
212         /* Configure the port for standalone mode (no address learning,
213          * flood everything).
214          * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
215          * when the user requests it through netlink or sysfs, but not
216          * automatically at port join or leave, so we need to handle resetting
217          * the brport flags ourselves. But we even prefer it that way, because
218          * otherwise, some setups might never get the notification they need,
219          * for example, when a port leaves a LAG that offloads the bridge,
220          * it becomes standalone, but as far as the bridge is concerned, no
221          * port ever left.
222          */
223         dsa_port_clear_brport_flags(dp);
224
225         /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
226          * so allow it to be in BR_STATE_FORWARDING to be kept functional
227          */
228         dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true);
229
230         /* VLAN filtering is handled by dsa_switch_bridge_leave */
231
232         /* Ageing time may be global to the switch chip, so don't change it
233          * here because we have no good reason (or value) to change it to.
234          */
235 }
236
237 static int dsa_tree_find_bridge_num(struct dsa_switch_tree *dst,
238                                     struct net_device *bridge_dev)
239 {
240         struct dsa_port *dp;
241
242         /* When preparing the offload for a port, it will have a valid
243          * dp->bridge_dev pointer but a not yet valid dp->bridge_num.
244          * However there might be other ports having the same dp->bridge_dev
245          * and a valid dp->bridge_num, so just ignore this port.
246          */
247         list_for_each_entry(dp, &dst->ports, list)
248                 if (dp->bridge_dev == bridge_dev && dp->bridge_num != -1)
249                         return dp->bridge_num;
250
251         return -1;
252 }
253
254 static void dsa_port_bridge_tx_fwd_unoffload(struct dsa_port *dp,
255                                              struct net_device *bridge_dev)
256 {
257         struct dsa_switch_tree *dst = dp->ds->dst;
258         int bridge_num = dp->bridge_num;
259         struct dsa_switch *ds = dp->ds;
260
261         /* No bridge TX forwarding offload => do nothing */
262         if (!ds->ops->port_bridge_tx_fwd_unoffload || dp->bridge_num == -1)
263                 return;
264
265         dp->bridge_num = -1;
266
267         /* Check if the bridge is still in use, otherwise it is time
268          * to clean it up so we can reuse this bridge_num later.
269          */
270         if (!dsa_tree_find_bridge_num(dst, bridge_dev))
271                 clear_bit(bridge_num, &dst->fwd_offloading_bridges);
272
273         /* Notify the chips only once the offload has been deactivated, so
274          * that they can update their configuration accordingly.
275          */
276         ds->ops->port_bridge_tx_fwd_unoffload(ds, dp->index, bridge_dev,
277                                               bridge_num);
278 }
279
280 static bool dsa_port_bridge_tx_fwd_offload(struct dsa_port *dp,
281                                            struct net_device *bridge_dev)
282 {
283         struct dsa_switch_tree *dst = dp->ds->dst;
284         struct dsa_switch *ds = dp->ds;
285         int bridge_num, err;
286
287         if (!ds->ops->port_bridge_tx_fwd_offload)
288                 return false;
289
290         bridge_num = dsa_tree_find_bridge_num(dst, bridge_dev);
291         if (bridge_num < 0) {
292                 /* First port that offloads TX forwarding for this bridge */
293                 bridge_num = find_first_zero_bit(&dst->fwd_offloading_bridges,
294                                                  DSA_MAX_NUM_OFFLOADING_BRIDGES);
295                 if (bridge_num >= ds->num_fwd_offloading_bridges)
296                         return false;
297
298                 set_bit(bridge_num, &dst->fwd_offloading_bridges);
299         }
300
301         dp->bridge_num = bridge_num;
302
303         /* Notify the driver */
304         err = ds->ops->port_bridge_tx_fwd_offload(ds, dp->index, bridge_dev,
305                                                   bridge_num);
306         if (err) {
307                 dsa_port_bridge_tx_fwd_unoffload(dp, bridge_dev);
308                 return false;
309         }
310
311         return true;
312 }
313
314 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
315                          struct netlink_ext_ack *extack)
316 {
317         struct dsa_notifier_bridge_info info = {
318                 .tree_index = dp->ds->dst->index,
319                 .sw_index = dp->ds->index,
320                 .port = dp->index,
321                 .br = br,
322         };
323         struct net_device *dev = dp->slave;
324         struct net_device *brport_dev;
325         bool tx_fwd_offload;
326         int err;
327
328         /* Here the interface is already bridged. Reflect the current
329          * configuration so that drivers can program their chips accordingly.
330          */
331         dp->bridge_dev = br;
332
333         brport_dev = dsa_port_to_bridge_port(dp);
334
335         err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
336         if (err)
337                 goto out_rollback;
338
339         tx_fwd_offload = dsa_port_bridge_tx_fwd_offload(dp, br);
340
341         err = switchdev_bridge_port_offload(brport_dev, dev, dp,
342                                             &dsa_slave_switchdev_notifier,
343                                             &dsa_slave_switchdev_blocking_notifier,
344                                             tx_fwd_offload, extack);
345         if (err)
346                 goto out_rollback_unbridge;
347
348         err = dsa_port_switchdev_sync_attrs(dp, extack);
349         if (err)
350                 goto out_rollback_unoffload;
351
352         return 0;
353
354 out_rollback_unoffload:
355         switchdev_bridge_port_unoffload(brport_dev, dp,
356                                         &dsa_slave_switchdev_notifier,
357                                         &dsa_slave_switchdev_blocking_notifier);
358 out_rollback_unbridge:
359         dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
360 out_rollback:
361         dp->bridge_dev = NULL;
362         return err;
363 }
364
365 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
366 {
367         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
368
369         switchdev_bridge_port_unoffload(brport_dev, dp,
370                                         &dsa_slave_switchdev_notifier,
371                                         &dsa_slave_switchdev_blocking_notifier);
372 }
373
374 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
375 {
376         struct dsa_notifier_bridge_info info = {
377                 .tree_index = dp->ds->dst->index,
378                 .sw_index = dp->ds->index,
379                 .port = dp->index,
380                 .br = br,
381         };
382         int err;
383
384         /* Here the port is already unbridged. Reflect the current configuration
385          * so that drivers can program their chips accordingly.
386          */
387         dp->bridge_dev = NULL;
388
389         dsa_port_bridge_tx_fwd_unoffload(dp, br);
390
391         err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
392         if (err)
393                 pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
394
395         dsa_port_switchdev_unsync_attrs(dp);
396 }
397
398 int dsa_port_lag_change(struct dsa_port *dp,
399                         struct netdev_lag_lower_state_info *linfo)
400 {
401         struct dsa_notifier_lag_info info = {
402                 .sw_index = dp->ds->index,
403                 .port = dp->index,
404         };
405         bool tx_enabled;
406
407         if (!dp->lag_dev)
408                 return 0;
409
410         /* On statically configured aggregates (e.g. loadbalance
411          * without LACP) ports will always be tx_enabled, even if the
412          * link is down. Thus we require both link_up and tx_enabled
413          * in order to include it in the tx set.
414          */
415         tx_enabled = linfo->link_up && linfo->tx_enabled;
416
417         if (tx_enabled == dp->lag_tx_enabled)
418                 return 0;
419
420         dp->lag_tx_enabled = tx_enabled;
421
422         return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
423 }
424
425 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
426                       struct netdev_lag_upper_info *uinfo,
427                       struct netlink_ext_ack *extack)
428 {
429         struct dsa_notifier_lag_info info = {
430                 .sw_index = dp->ds->index,
431                 .port = dp->index,
432                 .lag = lag,
433                 .info = uinfo,
434         };
435         struct net_device *bridge_dev;
436         int err;
437
438         dsa_lag_map(dp->ds->dst, lag);
439         dp->lag_dev = lag;
440
441         err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
442         if (err)
443                 goto err_lag_join;
444
445         bridge_dev = netdev_master_upper_dev_get(lag);
446         if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
447                 return 0;
448
449         err = dsa_port_bridge_join(dp, bridge_dev, extack);
450         if (err)
451                 goto err_bridge_join;
452
453         return 0;
454
455 err_bridge_join:
456         dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
457 err_lag_join:
458         dp->lag_dev = NULL;
459         dsa_lag_unmap(dp->ds->dst, lag);
460         return err;
461 }
462
463 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag)
464 {
465         if (dp->bridge_dev)
466                 dsa_port_pre_bridge_leave(dp, dp->bridge_dev);
467 }
468
469 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
470 {
471         struct dsa_notifier_lag_info info = {
472                 .sw_index = dp->ds->index,
473                 .port = dp->index,
474                 .lag = lag,
475         };
476         int err;
477
478         if (!dp->lag_dev)
479                 return;
480
481         /* Port might have been part of a LAG that in turn was
482          * attached to a bridge.
483          */
484         if (dp->bridge_dev)
485                 dsa_port_bridge_leave(dp, dp->bridge_dev);
486
487         dp->lag_tx_enabled = false;
488         dp->lag_dev = NULL;
489
490         err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
491         if (err)
492                 pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n",
493                        err);
494
495         dsa_lag_unmap(dp->ds->dst, lag);
496 }
497
498 /* Must be called under rcu_read_lock() */
499 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
500                                               bool vlan_filtering,
501                                               struct netlink_ext_ack *extack)
502 {
503         struct dsa_switch *ds = dp->ds;
504         int err, i;
505
506         /* VLAN awareness was off, so the question is "can we turn it on".
507          * We may have had 8021q uppers, those need to go. Make sure we don't
508          * enter an inconsistent state: deny changing the VLAN awareness state
509          * as long as we have 8021q uppers.
510          */
511         if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
512                 struct net_device *upper_dev, *slave = dp->slave;
513                 struct net_device *br = dp->bridge_dev;
514                 struct list_head *iter;
515
516                 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
517                         struct bridge_vlan_info br_info;
518                         u16 vid;
519
520                         if (!is_vlan_dev(upper_dev))
521                                 continue;
522
523                         vid = vlan_dev_vlan_id(upper_dev);
524
525                         /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
526                          * device, respectively the VID is not found, returning
527                          * 0 means success, which is a failure for us here.
528                          */
529                         err = br_vlan_get_info(br, vid, &br_info);
530                         if (err == 0) {
531                                 NL_SET_ERR_MSG_MOD(extack,
532                                                    "Must first remove VLAN uppers having VIDs also present in bridge");
533                                 return false;
534                         }
535                 }
536         }
537
538         if (!ds->vlan_filtering_is_global)
539                 return true;
540
541         /* For cases where enabling/disabling VLAN awareness is global to the
542          * switch, we need to handle the case where multiple bridges span
543          * different ports of the same switch device and one of them has a
544          * different setting than what is being requested.
545          */
546         for (i = 0; i < ds->num_ports; i++) {
547                 struct net_device *other_bridge;
548
549                 other_bridge = dsa_to_port(ds, i)->bridge_dev;
550                 if (!other_bridge)
551                         continue;
552                 /* If it's the same bridge, it also has same
553                  * vlan_filtering setting => no need to check
554                  */
555                 if (other_bridge == dp->bridge_dev)
556                         continue;
557                 if (br_vlan_enabled(other_bridge) != vlan_filtering) {
558                         NL_SET_ERR_MSG_MOD(extack,
559                                            "VLAN filtering is a global setting");
560                         return false;
561                 }
562         }
563         return true;
564 }
565
566 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
567                             struct netlink_ext_ack *extack)
568 {
569         struct dsa_switch *ds = dp->ds;
570         bool apply;
571         int err;
572
573         if (!ds->ops->port_vlan_filtering)
574                 return -EOPNOTSUPP;
575
576         /* We are called from dsa_slave_switchdev_blocking_event(),
577          * which is not under rcu_read_lock(), unlike
578          * dsa_slave_switchdev_event().
579          */
580         rcu_read_lock();
581         apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
582         rcu_read_unlock();
583         if (!apply)
584                 return -EINVAL;
585
586         if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
587                 return 0;
588
589         err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
590                                            extack);
591         if (err)
592                 return err;
593
594         if (ds->vlan_filtering_is_global)
595                 ds->vlan_filtering = vlan_filtering;
596         else
597                 dp->vlan_filtering = vlan_filtering;
598
599         return 0;
600 }
601
602 /* This enforces legacy behavior for switch drivers which assume they can't
603  * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
604  */
605 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
606 {
607         struct dsa_switch *ds = dp->ds;
608
609         if (!dp->bridge_dev)
610                 return false;
611
612         return (!ds->configure_vlan_while_not_filtering &&
613                 !br_vlan_enabled(dp->bridge_dev));
614 }
615
616 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
617 {
618         unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
619         unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
620         struct dsa_notifier_ageing_time_info info;
621         int err;
622
623         info.ageing_time = ageing_time;
624
625         err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
626         if (err)
627                 return err;
628
629         dp->ageing_time = ageing_time;
630
631         return 0;
632 }
633
634 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
635                               struct switchdev_brport_flags flags,
636                               struct netlink_ext_ack *extack)
637 {
638         struct dsa_switch *ds = dp->ds;
639
640         if (!ds->ops->port_pre_bridge_flags)
641                 return -EINVAL;
642
643         return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
644 }
645
646 int dsa_port_bridge_flags(struct dsa_port *dp,
647                           struct switchdev_brport_flags flags,
648                           struct netlink_ext_ack *extack)
649 {
650         struct dsa_switch *ds = dp->ds;
651         int err;
652
653         if (!ds->ops->port_bridge_flags)
654                 return -EOPNOTSUPP;
655
656         err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
657         if (err)
658                 return err;
659
660         if (flags.mask & BR_LEARNING) {
661                 bool learning = flags.val & BR_LEARNING;
662
663                 if (learning == dp->learning)
664                         return 0;
665
666                 if (dp->learning && !learning)
667                         dsa_port_fast_age(dp);
668
669                 dp->learning = learning;
670         }
671
672         return 0;
673 }
674
675 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
676                         bool targeted_match)
677 {
678         struct dsa_notifier_mtu_info info = {
679                 .sw_index = dp->ds->index,
680                 .targeted_match = targeted_match,
681                 .port = dp->index,
682                 .mtu = new_mtu,
683         };
684
685         return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
686 }
687
688 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
689                      u16 vid)
690 {
691         struct dsa_notifier_fdb_info info = {
692                 .sw_index = dp->ds->index,
693                 .port = dp->index,
694                 .addr = addr,
695                 .vid = vid,
696         };
697
698         return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
699 }
700
701 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
702                      u16 vid)
703 {
704         struct dsa_notifier_fdb_info info = {
705                 .sw_index = dp->ds->index,
706                 .port = dp->index,
707                 .addr = addr,
708                 .vid = vid,
709
710         };
711
712         return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
713 }
714
715 int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
716                           u16 vid)
717 {
718         struct dsa_notifier_fdb_info info = {
719                 .sw_index = dp->ds->index,
720                 .port = dp->index,
721                 .addr = addr,
722                 .vid = vid,
723         };
724         struct dsa_port *cpu_dp = dp->cpu_dp;
725         int err;
726
727         err = dev_uc_add(cpu_dp->master, addr);
728         if (err)
729                 return err;
730
731         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
732 }
733
734 int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
735                           u16 vid)
736 {
737         struct dsa_notifier_fdb_info info = {
738                 .sw_index = dp->ds->index,
739                 .port = dp->index,
740                 .addr = addr,
741                 .vid = vid,
742         };
743         struct dsa_port *cpu_dp = dp->cpu_dp;
744         int err;
745
746         err = dev_uc_del(cpu_dp->master, addr);
747         if (err)
748                 return err;
749
750         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
751 }
752
753 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
754 {
755         struct dsa_switch *ds = dp->ds;
756         int port = dp->index;
757
758         if (!ds->ops->port_fdb_dump)
759                 return -EOPNOTSUPP;
760
761         return ds->ops->port_fdb_dump(ds, port, cb, data);
762 }
763
764 int dsa_port_mdb_add(const struct dsa_port *dp,
765                      const struct switchdev_obj_port_mdb *mdb)
766 {
767         struct dsa_notifier_mdb_info info = {
768                 .sw_index = dp->ds->index,
769                 .port = dp->index,
770                 .mdb = mdb,
771         };
772
773         return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
774 }
775
776 int dsa_port_mdb_del(const struct dsa_port *dp,
777                      const struct switchdev_obj_port_mdb *mdb)
778 {
779         struct dsa_notifier_mdb_info info = {
780                 .sw_index = dp->ds->index,
781                 .port = dp->index,
782                 .mdb = mdb,
783         };
784
785         return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
786 }
787
788 int dsa_port_host_mdb_add(const struct dsa_port *dp,
789                           const struct switchdev_obj_port_mdb *mdb)
790 {
791         struct dsa_notifier_mdb_info info = {
792                 .sw_index = dp->ds->index,
793                 .port = dp->index,
794                 .mdb = mdb,
795         };
796         struct dsa_port *cpu_dp = dp->cpu_dp;
797         int err;
798
799         err = dev_mc_add(cpu_dp->master, mdb->addr);
800         if (err)
801                 return err;
802
803         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
804 }
805
806 int dsa_port_host_mdb_del(const struct dsa_port *dp,
807                           const struct switchdev_obj_port_mdb *mdb)
808 {
809         struct dsa_notifier_mdb_info info = {
810                 .sw_index = dp->ds->index,
811                 .port = dp->index,
812                 .mdb = mdb,
813         };
814         struct dsa_port *cpu_dp = dp->cpu_dp;
815         int err;
816
817         err = dev_mc_del(cpu_dp->master, mdb->addr);
818         if (err)
819                 return err;
820
821         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
822 }
823
824 int dsa_port_vlan_add(struct dsa_port *dp,
825                       const struct switchdev_obj_port_vlan *vlan,
826                       struct netlink_ext_ack *extack)
827 {
828         struct dsa_notifier_vlan_info info = {
829                 .sw_index = dp->ds->index,
830                 .port = dp->index,
831                 .vlan = vlan,
832                 .extack = extack,
833         };
834
835         return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
836 }
837
838 int dsa_port_vlan_del(struct dsa_port *dp,
839                       const struct switchdev_obj_port_vlan *vlan)
840 {
841         struct dsa_notifier_vlan_info info = {
842                 .sw_index = dp->ds->index,
843                 .port = dp->index,
844                 .vlan = vlan,
845         };
846
847         return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
848 }
849
850 int dsa_port_mrp_add(const struct dsa_port *dp,
851                      const struct switchdev_obj_mrp *mrp)
852 {
853         struct dsa_notifier_mrp_info info = {
854                 .sw_index = dp->ds->index,
855                 .port = dp->index,
856                 .mrp = mrp,
857         };
858
859         return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD, &info);
860 }
861
862 int dsa_port_mrp_del(const struct dsa_port *dp,
863                      const struct switchdev_obj_mrp *mrp)
864 {
865         struct dsa_notifier_mrp_info info = {
866                 .sw_index = dp->ds->index,
867                 .port = dp->index,
868                 .mrp = mrp,
869         };
870
871         return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL, &info);
872 }
873
874 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
875                                const struct switchdev_obj_ring_role_mrp *mrp)
876 {
877         struct dsa_notifier_mrp_ring_role_info info = {
878                 .sw_index = dp->ds->index,
879                 .port = dp->index,
880                 .mrp = mrp,
881         };
882
883         return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD_RING_ROLE, &info);
884 }
885
886 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
887                                const struct switchdev_obj_ring_role_mrp *mrp)
888 {
889         struct dsa_notifier_mrp_ring_role_info info = {
890                 .sw_index = dp->ds->index,
891                 .port = dp->index,
892                 .mrp = mrp,
893         };
894
895         return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL_RING_ROLE, &info);
896 }
897
898 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
899                                const struct dsa_device_ops *tag_ops)
900 {
901         cpu_dp->rcv = tag_ops->rcv;
902         cpu_dp->tag_ops = tag_ops;
903 }
904
905 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
906 {
907         struct device_node *phy_dn;
908         struct phy_device *phydev;
909
910         phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
911         if (!phy_dn)
912                 return NULL;
913
914         phydev = of_phy_find_device(phy_dn);
915         if (!phydev) {
916                 of_node_put(phy_dn);
917                 return ERR_PTR(-EPROBE_DEFER);
918         }
919
920         of_node_put(phy_dn);
921         return phydev;
922 }
923
924 static void dsa_port_phylink_validate(struct phylink_config *config,
925                                       unsigned long *supported,
926                                       struct phylink_link_state *state)
927 {
928         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
929         struct dsa_switch *ds = dp->ds;
930
931         if (!ds->ops->phylink_validate)
932                 return;
933
934         ds->ops->phylink_validate(ds, dp->index, supported, state);
935 }
936
937 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
938                                                struct phylink_link_state *state)
939 {
940         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
941         struct dsa_switch *ds = dp->ds;
942         int err;
943
944         /* Only called for inband modes */
945         if (!ds->ops->phylink_mac_link_state) {
946                 state->link = 0;
947                 return;
948         }
949
950         err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
951         if (err < 0) {
952                 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
953                         dp->index, err);
954                 state->link = 0;
955         }
956 }
957
958 static void dsa_port_phylink_mac_config(struct phylink_config *config,
959                                         unsigned int mode,
960                                         const struct phylink_link_state *state)
961 {
962         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
963         struct dsa_switch *ds = dp->ds;
964
965         if (!ds->ops->phylink_mac_config)
966                 return;
967
968         ds->ops->phylink_mac_config(ds, dp->index, mode, state);
969 }
970
971 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
972 {
973         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
974         struct dsa_switch *ds = dp->ds;
975
976         if (!ds->ops->phylink_mac_an_restart)
977                 return;
978
979         ds->ops->phylink_mac_an_restart(ds, dp->index);
980 }
981
982 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
983                                            unsigned int mode,
984                                            phy_interface_t interface)
985 {
986         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
987         struct phy_device *phydev = NULL;
988         struct dsa_switch *ds = dp->ds;
989
990         if (dsa_is_user_port(ds, dp->index))
991                 phydev = dp->slave->phydev;
992
993         if (!ds->ops->phylink_mac_link_down) {
994                 if (ds->ops->adjust_link && phydev)
995                         ds->ops->adjust_link(ds, dp->index, phydev);
996                 return;
997         }
998
999         ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
1000 }
1001
1002 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
1003                                          struct phy_device *phydev,
1004                                          unsigned int mode,
1005                                          phy_interface_t interface,
1006                                          int speed, int duplex,
1007                                          bool tx_pause, bool rx_pause)
1008 {
1009         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1010         struct dsa_switch *ds = dp->ds;
1011
1012         if (!ds->ops->phylink_mac_link_up) {
1013                 if (ds->ops->adjust_link && phydev)
1014                         ds->ops->adjust_link(ds, dp->index, phydev);
1015                 return;
1016         }
1017
1018         ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
1019                                      speed, duplex, tx_pause, rx_pause);
1020 }
1021
1022 const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
1023         .validate = dsa_port_phylink_validate,
1024         .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
1025         .mac_config = dsa_port_phylink_mac_config,
1026         .mac_an_restart = dsa_port_phylink_mac_an_restart,
1027         .mac_link_down = dsa_port_phylink_mac_link_down,
1028         .mac_link_up = dsa_port_phylink_mac_link_up,
1029 };
1030
1031 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
1032 {
1033         struct dsa_switch *ds = dp->ds;
1034         struct phy_device *phydev;
1035         int port = dp->index;
1036         int err = 0;
1037
1038         phydev = dsa_port_get_phy_device(dp);
1039         if (!phydev)
1040                 return 0;
1041
1042         if (IS_ERR(phydev))
1043                 return PTR_ERR(phydev);
1044
1045         if (enable) {
1046                 err = genphy_resume(phydev);
1047                 if (err < 0)
1048                         goto err_put_dev;
1049
1050                 err = genphy_read_status(phydev);
1051                 if (err < 0)
1052                         goto err_put_dev;
1053         } else {
1054                 err = genphy_suspend(phydev);
1055                 if (err < 0)
1056                         goto err_put_dev;
1057         }
1058
1059         if (ds->ops->adjust_link)
1060                 ds->ops->adjust_link(ds, port, phydev);
1061
1062         dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
1063
1064 err_put_dev:
1065         put_device(&phydev->mdio.dev);
1066         return err;
1067 }
1068
1069 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
1070 {
1071         struct device_node *dn = dp->dn;
1072         struct dsa_switch *ds = dp->ds;
1073         struct phy_device *phydev;
1074         int port = dp->index;
1075         phy_interface_t mode;
1076         int err;
1077
1078         err = of_phy_register_fixed_link(dn);
1079         if (err) {
1080                 dev_err(ds->dev,
1081                         "failed to register the fixed PHY of port %d\n",
1082                         port);
1083                 return err;
1084         }
1085
1086         phydev = of_phy_find_device(dn);
1087
1088         err = of_get_phy_mode(dn, &mode);
1089         if (err)
1090                 mode = PHY_INTERFACE_MODE_NA;
1091         phydev->interface = mode;
1092
1093         genphy_read_status(phydev);
1094
1095         if (ds->ops->adjust_link)
1096                 ds->ops->adjust_link(ds, port, phydev);
1097
1098         put_device(&phydev->mdio.dev);
1099
1100         return 0;
1101 }
1102
1103 static int dsa_port_phylink_register(struct dsa_port *dp)
1104 {
1105         struct dsa_switch *ds = dp->ds;
1106         struct device_node *port_dn = dp->dn;
1107         phy_interface_t mode;
1108         int err;
1109
1110         err = of_get_phy_mode(port_dn, &mode);
1111         if (err)
1112                 mode = PHY_INTERFACE_MODE_NA;
1113
1114         dp->pl_config.dev = ds->dev;
1115         dp->pl_config.type = PHYLINK_DEV;
1116         dp->pl_config.pcs_poll = ds->pcs_poll;
1117
1118         dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
1119                                 mode, &dsa_port_phylink_mac_ops);
1120         if (IS_ERR(dp->pl)) {
1121                 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1122                 return PTR_ERR(dp->pl);
1123         }
1124
1125         err = phylink_of_phy_connect(dp->pl, port_dn, 0);
1126         if (err && err != -ENODEV) {
1127                 pr_err("could not attach to PHY: %d\n", err);
1128                 goto err_phy_connect;
1129         }
1130
1131         return 0;
1132
1133 err_phy_connect:
1134         phylink_destroy(dp->pl);
1135         return err;
1136 }
1137
1138 int dsa_port_link_register_of(struct dsa_port *dp)
1139 {
1140         struct dsa_switch *ds = dp->ds;
1141         struct device_node *phy_np;
1142         int port = dp->index;
1143
1144         if (!ds->ops->adjust_link) {
1145                 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
1146                 if (of_phy_is_fixed_link(dp->dn) || phy_np) {
1147                         if (ds->ops->phylink_mac_link_down)
1148                                 ds->ops->phylink_mac_link_down(ds, port,
1149                                         MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
1150                         return dsa_port_phylink_register(dp);
1151                 }
1152                 return 0;
1153         }
1154
1155         dev_warn(ds->dev,
1156                  "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
1157
1158         if (of_phy_is_fixed_link(dp->dn))
1159                 return dsa_port_fixed_link_register_of(dp);
1160         else
1161                 return dsa_port_setup_phy_of(dp, true);
1162 }
1163
1164 void dsa_port_link_unregister_of(struct dsa_port *dp)
1165 {
1166         struct dsa_switch *ds = dp->ds;
1167
1168         if (!ds->ops->adjust_link && dp->pl) {
1169                 rtnl_lock();
1170                 phylink_disconnect_phy(dp->pl);
1171                 rtnl_unlock();
1172                 phylink_destroy(dp->pl);
1173                 dp->pl = NULL;
1174                 return;
1175         }
1176
1177         if (of_phy_is_fixed_link(dp->dn))
1178                 of_phy_deregister_fixed_link(dp->dn);
1179         else
1180                 dsa_port_setup_phy_of(dp, false);
1181 }
1182
1183 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
1184 {
1185         struct phy_device *phydev;
1186         int ret = -EOPNOTSUPP;
1187
1188         if (of_phy_is_fixed_link(dp->dn))
1189                 return ret;
1190
1191         phydev = dsa_port_get_phy_device(dp);
1192         if (IS_ERR_OR_NULL(phydev))
1193                 return ret;
1194
1195         ret = phy_ethtool_get_strings(phydev, data);
1196         put_device(&phydev->mdio.dev);
1197
1198         return ret;
1199 }
1200 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
1201
1202 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
1203 {
1204         struct phy_device *phydev;
1205         int ret = -EOPNOTSUPP;
1206
1207         if (of_phy_is_fixed_link(dp->dn))
1208                 return ret;
1209
1210         phydev = dsa_port_get_phy_device(dp);
1211         if (IS_ERR_OR_NULL(phydev))
1212                 return ret;
1213
1214         ret = phy_ethtool_get_stats(phydev, NULL, data);
1215         put_device(&phydev->mdio.dev);
1216
1217         return ret;
1218 }
1219 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
1220
1221 int dsa_port_get_phy_sset_count(struct dsa_port *dp)
1222 {
1223         struct phy_device *phydev;
1224         int ret = -EOPNOTSUPP;
1225
1226         if (of_phy_is_fixed_link(dp->dn))
1227                 return ret;
1228
1229         phydev = dsa_port_get_phy_device(dp);
1230         if (IS_ERR_OR_NULL(phydev))
1231                 return ret;
1232
1233         ret = phy_ethtool_get_sset_count(phydev);
1234         put_device(&phydev->mdio.dev);
1235
1236         return ret;
1237 }
1238 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
1239
1240 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
1241 {
1242         struct dsa_notifier_hsr_info info = {
1243                 .sw_index = dp->ds->index,
1244                 .port = dp->index,
1245                 .hsr = hsr,
1246         };
1247         int err;
1248
1249         dp->hsr_dev = hsr;
1250
1251         err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info);
1252         if (err)
1253                 dp->hsr_dev = NULL;
1254
1255         return err;
1256 }
1257
1258 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
1259 {
1260         struct dsa_notifier_hsr_info info = {
1261                 .sw_index = dp->ds->index,
1262                 .port = dp->index,
1263                 .hsr = hsr,
1264         };
1265         int err;
1266
1267         dp->hsr_dev = NULL;
1268
1269         err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info);
1270         if (err)
1271                 pr_err("DSA: failed to notify DSA_NOTIFIER_HSR_LEAVE\n");
1272 }
1273
1274 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid)
1275 {
1276         struct dsa_notifier_tag_8021q_vlan_info info = {
1277                 .tree_index = dp->ds->dst->index,
1278                 .sw_index = dp->ds->index,
1279                 .port = dp->index,
1280                 .vid = vid,
1281         };
1282
1283         return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1284 }
1285
1286 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid)
1287 {
1288         struct dsa_notifier_tag_8021q_vlan_info info = {
1289                 .tree_index = dp->ds->dst->index,
1290                 .sw_index = dp->ds->index,
1291                 .port = dp->index,
1292                 .vid = vid,
1293         };
1294         int err;
1295
1296         err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1297         if (err)
1298                 pr_err("DSA: failed to notify tag_8021q VLAN deletion: %pe\n",
1299                        ERR_PTR(err));
1300 }