Merge branch 'sja1105-fast-ageing'
[linux-2.6-microblaze.git] / net / dsa / port.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch port
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *      Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13
14 #include "dsa_priv.h"
15
16 /**
17  * dsa_port_notify - Notify the switching fabric of changes to a port
18  * @dp: port on which change occurred
19  * @e: event, must be of type DSA_NOTIFIER_*
20  * @v: event-specific value.
21  *
22  * Notify all switches in the DSA tree that this port's switch belongs to,
23  * including this switch itself, of an event. Allows the other switches to
24  * reconfigure themselves for cross-chip operations. Can also be used to
25  * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26  * a user port's state changes.
27  */
28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29 {
30         return dsa_tree_notify(dp->ds->dst, e, v);
31 }
32
33 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp)
34 {
35         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
36         struct switchdev_notifier_fdb_info info = {
37                 /* flush all VLANs */
38                 .vid = 0,
39         };
40
41         /* When the port becomes standalone it has already left the bridge.
42          * Don't notify the bridge in that case.
43          */
44         if (!brport_dev)
45                 return;
46
47         call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
48                                  brport_dev, &info.info, NULL);
49 }
50
51 static void dsa_port_fast_age(const struct dsa_port *dp)
52 {
53         struct dsa_switch *ds = dp->ds;
54
55         if (!ds->ops->port_fast_age)
56                 return;
57
58         ds->ops->port_fast_age(ds, dp->index);
59
60         dsa_port_notify_bridge_fdb_flush(dp);
61 }
62
63 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age)
64 {
65         struct dsa_switch *ds = dp->ds;
66         int port = dp->index;
67
68         if (!ds->ops->port_stp_state_set)
69                 return -EOPNOTSUPP;
70
71         ds->ops->port_stp_state_set(ds, port, state);
72
73         if (do_fast_age && dp->learning) {
74                 /* Fast age FDB entries or flush appropriate forwarding database
75                  * for the given port, if we are moving it from Learning or
76                  * Forwarding state, to Disabled or Blocking or Listening state.
77                  * Ports that were standalone before the STP state change don't
78                  * need to fast age the FDB, since address learning is off in
79                  * standalone mode.
80                  */
81
82                 if ((dp->stp_state == BR_STATE_LEARNING ||
83                      dp->stp_state == BR_STATE_FORWARDING) &&
84                     (state == BR_STATE_DISABLED ||
85                      state == BR_STATE_BLOCKING ||
86                      state == BR_STATE_LISTENING))
87                         dsa_port_fast_age(dp);
88         }
89
90         dp->stp_state = state;
91
92         return 0;
93 }
94
95 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state,
96                                    bool do_fast_age)
97 {
98         int err;
99
100         err = dsa_port_set_state(dp, state, do_fast_age);
101         if (err)
102                 pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
103 }
104
105 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
106 {
107         struct dsa_switch *ds = dp->ds;
108         int port = dp->index;
109         int err;
110
111         if (ds->ops->port_enable) {
112                 err = ds->ops->port_enable(ds, port, phy);
113                 if (err)
114                         return err;
115         }
116
117         if (!dp->bridge_dev)
118                 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false);
119
120         if (dp->pl)
121                 phylink_start(dp->pl);
122
123         return 0;
124 }
125
126 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
127 {
128         int err;
129
130         rtnl_lock();
131         err = dsa_port_enable_rt(dp, phy);
132         rtnl_unlock();
133
134         return err;
135 }
136
137 void dsa_port_disable_rt(struct dsa_port *dp)
138 {
139         struct dsa_switch *ds = dp->ds;
140         int port = dp->index;
141
142         if (dp->pl)
143                 phylink_stop(dp->pl);
144
145         if (!dp->bridge_dev)
146                 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false);
147
148         if (ds->ops->port_disable)
149                 ds->ops->port_disable(ds, port);
150 }
151
152 void dsa_port_disable(struct dsa_port *dp)
153 {
154         rtnl_lock();
155         dsa_port_disable_rt(dp);
156         rtnl_unlock();
157 }
158
159 static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
160                                          struct netlink_ext_ack *extack)
161 {
162         const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
163                                    BR_BCAST_FLOOD;
164         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
165         int flag, err;
166
167         for_each_set_bit(flag, &mask, 32) {
168                 struct switchdev_brport_flags flags = {0};
169
170                 flags.mask = BIT(flag);
171
172                 if (br_port_flag_is_set(brport_dev, BIT(flag)))
173                         flags.val = BIT(flag);
174
175                 err = dsa_port_bridge_flags(dp, flags, extack);
176                 if (err && err != -EOPNOTSUPP)
177                         return err;
178         }
179
180         return 0;
181 }
182
183 static void dsa_port_clear_brport_flags(struct dsa_port *dp)
184 {
185         const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
186         const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
187                                    BR_BCAST_FLOOD;
188         int flag, err;
189
190         for_each_set_bit(flag, &mask, 32) {
191                 struct switchdev_brport_flags flags = {0};
192
193                 flags.mask = BIT(flag);
194                 flags.val = val & BIT(flag);
195
196                 err = dsa_port_bridge_flags(dp, flags, NULL);
197                 if (err && err != -EOPNOTSUPP)
198                         dev_err(dp->ds->dev,
199                                 "failed to clear bridge port flag %lu: %pe\n",
200                                 flags.val, ERR_PTR(err));
201         }
202 }
203
204 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
205                                          struct netlink_ext_ack *extack)
206 {
207         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
208         struct net_device *br = dp->bridge_dev;
209         int err;
210
211         err = dsa_port_inherit_brport_flags(dp, extack);
212         if (err)
213                 return err;
214
215         err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false);
216         if (err && err != -EOPNOTSUPP)
217                 return err;
218
219         err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack);
220         if (err && err != -EOPNOTSUPP)
221                 return err;
222
223         err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
224         if (err && err != -EOPNOTSUPP)
225                 return err;
226
227         return 0;
228 }
229
230 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
231 {
232         /* Configure the port for standalone mode (no address learning,
233          * flood everything).
234          * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
235          * when the user requests it through netlink or sysfs, but not
236          * automatically at port join or leave, so we need to handle resetting
237          * the brport flags ourselves. But we even prefer it that way, because
238          * otherwise, some setups might never get the notification they need,
239          * for example, when a port leaves a LAG that offloads the bridge,
240          * it becomes standalone, but as far as the bridge is concerned, no
241          * port ever left.
242          */
243         dsa_port_clear_brport_flags(dp);
244
245         /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
246          * so allow it to be in BR_STATE_FORWARDING to be kept functional
247          */
248         dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true);
249
250         /* VLAN filtering is handled by dsa_switch_bridge_leave */
251
252         /* Ageing time may be global to the switch chip, so don't change it
253          * here because we have no good reason (or value) to change it to.
254          */
255 }
256
257 static int dsa_tree_find_bridge_num(struct dsa_switch_tree *dst,
258                                     struct net_device *bridge_dev)
259 {
260         struct dsa_port *dp;
261
262         /* When preparing the offload for a port, it will have a valid
263          * dp->bridge_dev pointer but a not yet valid dp->bridge_num.
264          * However there might be other ports having the same dp->bridge_dev
265          * and a valid dp->bridge_num, so just ignore this port.
266          */
267         list_for_each_entry(dp, &dst->ports, list)
268                 if (dp->bridge_dev == bridge_dev && dp->bridge_num != -1)
269                         return dp->bridge_num;
270
271         return -1;
272 }
273
274 static void dsa_port_bridge_tx_fwd_unoffload(struct dsa_port *dp,
275                                              struct net_device *bridge_dev)
276 {
277         struct dsa_switch_tree *dst = dp->ds->dst;
278         int bridge_num = dp->bridge_num;
279         struct dsa_switch *ds = dp->ds;
280
281         /* No bridge TX forwarding offload => do nothing */
282         if (!ds->ops->port_bridge_tx_fwd_unoffload || dp->bridge_num == -1)
283                 return;
284
285         dp->bridge_num = -1;
286
287         /* Check if the bridge is still in use, otherwise it is time
288          * to clean it up so we can reuse this bridge_num later.
289          */
290         if (!dsa_tree_find_bridge_num(dst, bridge_dev))
291                 clear_bit(bridge_num, &dst->fwd_offloading_bridges);
292
293         /* Notify the chips only once the offload has been deactivated, so
294          * that they can update their configuration accordingly.
295          */
296         ds->ops->port_bridge_tx_fwd_unoffload(ds, dp->index, bridge_dev,
297                                               bridge_num);
298 }
299
300 static bool dsa_port_bridge_tx_fwd_offload(struct dsa_port *dp,
301                                            struct net_device *bridge_dev)
302 {
303         struct dsa_switch_tree *dst = dp->ds->dst;
304         struct dsa_switch *ds = dp->ds;
305         int bridge_num, err;
306
307         if (!ds->ops->port_bridge_tx_fwd_offload)
308                 return false;
309
310         bridge_num = dsa_tree_find_bridge_num(dst, bridge_dev);
311         if (bridge_num < 0) {
312                 /* First port that offloads TX forwarding for this bridge */
313                 bridge_num = find_first_zero_bit(&dst->fwd_offloading_bridges,
314                                                  DSA_MAX_NUM_OFFLOADING_BRIDGES);
315                 if (bridge_num >= ds->num_fwd_offloading_bridges)
316                         return false;
317
318                 set_bit(bridge_num, &dst->fwd_offloading_bridges);
319         }
320
321         dp->bridge_num = bridge_num;
322
323         /* Notify the driver */
324         err = ds->ops->port_bridge_tx_fwd_offload(ds, dp->index, bridge_dev,
325                                                   bridge_num);
326         if (err) {
327                 dsa_port_bridge_tx_fwd_unoffload(dp, bridge_dev);
328                 return false;
329         }
330
331         return true;
332 }
333
334 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
335                          struct netlink_ext_ack *extack)
336 {
337         struct dsa_notifier_bridge_info info = {
338                 .tree_index = dp->ds->dst->index,
339                 .sw_index = dp->ds->index,
340                 .port = dp->index,
341                 .br = br,
342         };
343         struct net_device *dev = dp->slave;
344         struct net_device *brport_dev;
345         bool tx_fwd_offload;
346         int err;
347
348         /* Here the interface is already bridged. Reflect the current
349          * configuration so that drivers can program their chips accordingly.
350          */
351         dp->bridge_dev = br;
352
353         brport_dev = dsa_port_to_bridge_port(dp);
354
355         err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
356         if (err)
357                 goto out_rollback;
358
359         tx_fwd_offload = dsa_port_bridge_tx_fwd_offload(dp, br);
360
361         err = switchdev_bridge_port_offload(brport_dev, dev, dp,
362                                             &dsa_slave_switchdev_notifier,
363                                             &dsa_slave_switchdev_blocking_notifier,
364                                             tx_fwd_offload, extack);
365         if (err)
366                 goto out_rollback_unbridge;
367
368         err = dsa_port_switchdev_sync_attrs(dp, extack);
369         if (err)
370                 goto out_rollback_unoffload;
371
372         return 0;
373
374 out_rollback_unoffload:
375         switchdev_bridge_port_unoffload(brport_dev, dp,
376                                         &dsa_slave_switchdev_notifier,
377                                         &dsa_slave_switchdev_blocking_notifier);
378 out_rollback_unbridge:
379         dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
380 out_rollback:
381         dp->bridge_dev = NULL;
382         return err;
383 }
384
385 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
386 {
387         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
388
389         switchdev_bridge_port_unoffload(brport_dev, dp,
390                                         &dsa_slave_switchdev_notifier,
391                                         &dsa_slave_switchdev_blocking_notifier);
392 }
393
394 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
395 {
396         struct dsa_notifier_bridge_info info = {
397                 .tree_index = dp->ds->dst->index,
398                 .sw_index = dp->ds->index,
399                 .port = dp->index,
400                 .br = br,
401         };
402         int err;
403
404         /* Here the port is already unbridged. Reflect the current configuration
405          * so that drivers can program their chips accordingly.
406          */
407         dp->bridge_dev = NULL;
408
409         dsa_port_bridge_tx_fwd_unoffload(dp, br);
410
411         err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
412         if (err)
413                 pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
414
415         dsa_port_switchdev_unsync_attrs(dp);
416 }
417
418 int dsa_port_lag_change(struct dsa_port *dp,
419                         struct netdev_lag_lower_state_info *linfo)
420 {
421         struct dsa_notifier_lag_info info = {
422                 .sw_index = dp->ds->index,
423                 .port = dp->index,
424         };
425         bool tx_enabled;
426
427         if (!dp->lag_dev)
428                 return 0;
429
430         /* On statically configured aggregates (e.g. loadbalance
431          * without LACP) ports will always be tx_enabled, even if the
432          * link is down. Thus we require both link_up and tx_enabled
433          * in order to include it in the tx set.
434          */
435         tx_enabled = linfo->link_up && linfo->tx_enabled;
436
437         if (tx_enabled == dp->lag_tx_enabled)
438                 return 0;
439
440         dp->lag_tx_enabled = tx_enabled;
441
442         return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
443 }
444
445 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
446                       struct netdev_lag_upper_info *uinfo,
447                       struct netlink_ext_ack *extack)
448 {
449         struct dsa_notifier_lag_info info = {
450                 .sw_index = dp->ds->index,
451                 .port = dp->index,
452                 .lag = lag,
453                 .info = uinfo,
454         };
455         struct net_device *bridge_dev;
456         int err;
457
458         dsa_lag_map(dp->ds->dst, lag);
459         dp->lag_dev = lag;
460
461         err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
462         if (err)
463                 goto err_lag_join;
464
465         bridge_dev = netdev_master_upper_dev_get(lag);
466         if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
467                 return 0;
468
469         err = dsa_port_bridge_join(dp, bridge_dev, extack);
470         if (err)
471                 goto err_bridge_join;
472
473         return 0;
474
475 err_bridge_join:
476         dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
477 err_lag_join:
478         dp->lag_dev = NULL;
479         dsa_lag_unmap(dp->ds->dst, lag);
480         return err;
481 }
482
483 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag)
484 {
485         if (dp->bridge_dev)
486                 dsa_port_pre_bridge_leave(dp, dp->bridge_dev);
487 }
488
489 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
490 {
491         struct dsa_notifier_lag_info info = {
492                 .sw_index = dp->ds->index,
493                 .port = dp->index,
494                 .lag = lag,
495         };
496         int err;
497
498         if (!dp->lag_dev)
499                 return;
500
501         /* Port might have been part of a LAG that in turn was
502          * attached to a bridge.
503          */
504         if (dp->bridge_dev)
505                 dsa_port_bridge_leave(dp, dp->bridge_dev);
506
507         dp->lag_tx_enabled = false;
508         dp->lag_dev = NULL;
509
510         err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
511         if (err)
512                 pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n",
513                        err);
514
515         dsa_lag_unmap(dp->ds->dst, lag);
516 }
517
518 /* Must be called under rcu_read_lock() */
519 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
520                                               bool vlan_filtering,
521                                               struct netlink_ext_ack *extack)
522 {
523         struct dsa_switch *ds = dp->ds;
524         int err, i;
525
526         /* VLAN awareness was off, so the question is "can we turn it on".
527          * We may have had 8021q uppers, those need to go. Make sure we don't
528          * enter an inconsistent state: deny changing the VLAN awareness state
529          * as long as we have 8021q uppers.
530          */
531         if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
532                 struct net_device *upper_dev, *slave = dp->slave;
533                 struct net_device *br = dp->bridge_dev;
534                 struct list_head *iter;
535
536                 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
537                         struct bridge_vlan_info br_info;
538                         u16 vid;
539
540                         if (!is_vlan_dev(upper_dev))
541                                 continue;
542
543                         vid = vlan_dev_vlan_id(upper_dev);
544
545                         /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
546                          * device, respectively the VID is not found, returning
547                          * 0 means success, which is a failure for us here.
548                          */
549                         err = br_vlan_get_info(br, vid, &br_info);
550                         if (err == 0) {
551                                 NL_SET_ERR_MSG_MOD(extack,
552                                                    "Must first remove VLAN uppers having VIDs also present in bridge");
553                                 return false;
554                         }
555                 }
556         }
557
558         if (!ds->vlan_filtering_is_global)
559                 return true;
560
561         /* For cases where enabling/disabling VLAN awareness is global to the
562          * switch, we need to handle the case where multiple bridges span
563          * different ports of the same switch device and one of them has a
564          * different setting than what is being requested.
565          */
566         for (i = 0; i < ds->num_ports; i++) {
567                 struct net_device *other_bridge;
568
569                 other_bridge = dsa_to_port(ds, i)->bridge_dev;
570                 if (!other_bridge)
571                         continue;
572                 /* If it's the same bridge, it also has same
573                  * vlan_filtering setting => no need to check
574                  */
575                 if (other_bridge == dp->bridge_dev)
576                         continue;
577                 if (br_vlan_enabled(other_bridge) != vlan_filtering) {
578                         NL_SET_ERR_MSG_MOD(extack,
579                                            "VLAN filtering is a global setting");
580                         return false;
581                 }
582         }
583         return true;
584 }
585
586 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
587                             struct netlink_ext_ack *extack)
588 {
589         struct dsa_switch *ds = dp->ds;
590         bool apply;
591         int err;
592
593         if (!ds->ops->port_vlan_filtering)
594                 return -EOPNOTSUPP;
595
596         /* We are called from dsa_slave_switchdev_blocking_event(),
597          * which is not under rcu_read_lock(), unlike
598          * dsa_slave_switchdev_event().
599          */
600         rcu_read_lock();
601         apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
602         rcu_read_unlock();
603         if (!apply)
604                 return -EINVAL;
605
606         if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
607                 return 0;
608
609         err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
610                                            extack);
611         if (err)
612                 return err;
613
614         if (ds->vlan_filtering_is_global)
615                 ds->vlan_filtering = vlan_filtering;
616         else
617                 dp->vlan_filtering = vlan_filtering;
618
619         return 0;
620 }
621
622 /* This enforces legacy behavior for switch drivers which assume they can't
623  * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
624  */
625 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
626 {
627         struct dsa_switch *ds = dp->ds;
628
629         if (!dp->bridge_dev)
630                 return false;
631
632         return (!ds->configure_vlan_while_not_filtering &&
633                 !br_vlan_enabled(dp->bridge_dev));
634 }
635
636 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
637 {
638         unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
639         unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
640         struct dsa_notifier_ageing_time_info info;
641         int err;
642
643         info.ageing_time = ageing_time;
644
645         err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
646         if (err)
647                 return err;
648
649         dp->ageing_time = ageing_time;
650
651         return 0;
652 }
653
654 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
655                               struct switchdev_brport_flags flags,
656                               struct netlink_ext_ack *extack)
657 {
658         struct dsa_switch *ds = dp->ds;
659
660         if (!ds->ops->port_pre_bridge_flags)
661                 return -EINVAL;
662
663         return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
664 }
665
666 int dsa_port_bridge_flags(struct dsa_port *dp,
667                           struct switchdev_brport_flags flags,
668                           struct netlink_ext_ack *extack)
669 {
670         struct dsa_switch *ds = dp->ds;
671         int err;
672
673         if (!ds->ops->port_bridge_flags)
674                 return -EOPNOTSUPP;
675
676         err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
677         if (err)
678                 return err;
679
680         if (flags.mask & BR_LEARNING) {
681                 bool learning = flags.val & BR_LEARNING;
682
683                 if (learning == dp->learning)
684                         return 0;
685
686                 if (dp->learning && !learning)
687                         dsa_port_fast_age(dp);
688
689                 dp->learning = learning;
690         }
691
692         return 0;
693 }
694
695 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
696                         bool targeted_match)
697 {
698         struct dsa_notifier_mtu_info info = {
699                 .sw_index = dp->ds->index,
700                 .targeted_match = targeted_match,
701                 .port = dp->index,
702                 .mtu = new_mtu,
703         };
704
705         return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
706 }
707
708 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
709                      u16 vid)
710 {
711         struct dsa_notifier_fdb_info info = {
712                 .sw_index = dp->ds->index,
713                 .port = dp->index,
714                 .addr = addr,
715                 .vid = vid,
716         };
717
718         return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
719 }
720
721 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
722                      u16 vid)
723 {
724         struct dsa_notifier_fdb_info info = {
725                 .sw_index = dp->ds->index,
726                 .port = dp->index,
727                 .addr = addr,
728                 .vid = vid,
729
730         };
731
732         return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
733 }
734
735 int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
736                           u16 vid)
737 {
738         struct dsa_notifier_fdb_info info = {
739                 .sw_index = dp->ds->index,
740                 .port = dp->index,
741                 .addr = addr,
742                 .vid = vid,
743         };
744         struct dsa_port *cpu_dp = dp->cpu_dp;
745         int err;
746
747         err = dev_uc_add(cpu_dp->master, addr);
748         if (err)
749                 return err;
750
751         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
752 }
753
754 int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
755                           u16 vid)
756 {
757         struct dsa_notifier_fdb_info info = {
758                 .sw_index = dp->ds->index,
759                 .port = dp->index,
760                 .addr = addr,
761                 .vid = vid,
762         };
763         struct dsa_port *cpu_dp = dp->cpu_dp;
764         int err;
765
766         err = dev_uc_del(cpu_dp->master, addr);
767         if (err)
768                 return err;
769
770         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
771 }
772
773 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
774 {
775         struct dsa_switch *ds = dp->ds;
776         int port = dp->index;
777
778         if (!ds->ops->port_fdb_dump)
779                 return -EOPNOTSUPP;
780
781         return ds->ops->port_fdb_dump(ds, port, cb, data);
782 }
783
784 int dsa_port_mdb_add(const struct dsa_port *dp,
785                      const struct switchdev_obj_port_mdb *mdb)
786 {
787         struct dsa_notifier_mdb_info info = {
788                 .sw_index = dp->ds->index,
789                 .port = dp->index,
790                 .mdb = mdb,
791         };
792
793         return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
794 }
795
796 int dsa_port_mdb_del(const struct dsa_port *dp,
797                      const struct switchdev_obj_port_mdb *mdb)
798 {
799         struct dsa_notifier_mdb_info info = {
800                 .sw_index = dp->ds->index,
801                 .port = dp->index,
802                 .mdb = mdb,
803         };
804
805         return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
806 }
807
808 int dsa_port_host_mdb_add(const struct dsa_port *dp,
809                           const struct switchdev_obj_port_mdb *mdb)
810 {
811         struct dsa_notifier_mdb_info info = {
812                 .sw_index = dp->ds->index,
813                 .port = dp->index,
814                 .mdb = mdb,
815         };
816         struct dsa_port *cpu_dp = dp->cpu_dp;
817         int err;
818
819         err = dev_mc_add(cpu_dp->master, mdb->addr);
820         if (err)
821                 return err;
822
823         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
824 }
825
826 int dsa_port_host_mdb_del(const struct dsa_port *dp,
827                           const struct switchdev_obj_port_mdb *mdb)
828 {
829         struct dsa_notifier_mdb_info info = {
830                 .sw_index = dp->ds->index,
831                 .port = dp->index,
832                 .mdb = mdb,
833         };
834         struct dsa_port *cpu_dp = dp->cpu_dp;
835         int err;
836
837         err = dev_mc_del(cpu_dp->master, mdb->addr);
838         if (err)
839                 return err;
840
841         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
842 }
843
844 int dsa_port_vlan_add(struct dsa_port *dp,
845                       const struct switchdev_obj_port_vlan *vlan,
846                       struct netlink_ext_ack *extack)
847 {
848         struct dsa_notifier_vlan_info info = {
849                 .sw_index = dp->ds->index,
850                 .port = dp->index,
851                 .vlan = vlan,
852                 .extack = extack,
853         };
854
855         return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
856 }
857
858 int dsa_port_vlan_del(struct dsa_port *dp,
859                       const struct switchdev_obj_port_vlan *vlan)
860 {
861         struct dsa_notifier_vlan_info info = {
862                 .sw_index = dp->ds->index,
863                 .port = dp->index,
864                 .vlan = vlan,
865         };
866
867         return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
868 }
869
870 int dsa_port_mrp_add(const struct dsa_port *dp,
871                      const struct switchdev_obj_mrp *mrp)
872 {
873         struct dsa_notifier_mrp_info info = {
874                 .sw_index = dp->ds->index,
875                 .port = dp->index,
876                 .mrp = mrp,
877         };
878
879         return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD, &info);
880 }
881
882 int dsa_port_mrp_del(const struct dsa_port *dp,
883                      const struct switchdev_obj_mrp *mrp)
884 {
885         struct dsa_notifier_mrp_info info = {
886                 .sw_index = dp->ds->index,
887                 .port = dp->index,
888                 .mrp = mrp,
889         };
890
891         return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL, &info);
892 }
893
894 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
895                                const struct switchdev_obj_ring_role_mrp *mrp)
896 {
897         struct dsa_notifier_mrp_ring_role_info info = {
898                 .sw_index = dp->ds->index,
899                 .port = dp->index,
900                 .mrp = mrp,
901         };
902
903         return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD_RING_ROLE, &info);
904 }
905
906 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
907                                const struct switchdev_obj_ring_role_mrp *mrp)
908 {
909         struct dsa_notifier_mrp_ring_role_info info = {
910                 .sw_index = dp->ds->index,
911                 .port = dp->index,
912                 .mrp = mrp,
913         };
914
915         return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL_RING_ROLE, &info);
916 }
917
918 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
919                                const struct dsa_device_ops *tag_ops)
920 {
921         cpu_dp->rcv = tag_ops->rcv;
922         cpu_dp->tag_ops = tag_ops;
923 }
924
925 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
926 {
927         struct device_node *phy_dn;
928         struct phy_device *phydev;
929
930         phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
931         if (!phy_dn)
932                 return NULL;
933
934         phydev = of_phy_find_device(phy_dn);
935         if (!phydev) {
936                 of_node_put(phy_dn);
937                 return ERR_PTR(-EPROBE_DEFER);
938         }
939
940         of_node_put(phy_dn);
941         return phydev;
942 }
943
944 static void dsa_port_phylink_validate(struct phylink_config *config,
945                                       unsigned long *supported,
946                                       struct phylink_link_state *state)
947 {
948         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
949         struct dsa_switch *ds = dp->ds;
950
951         if (!ds->ops->phylink_validate)
952                 return;
953
954         ds->ops->phylink_validate(ds, dp->index, supported, state);
955 }
956
957 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
958                                                struct phylink_link_state *state)
959 {
960         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
961         struct dsa_switch *ds = dp->ds;
962         int err;
963
964         /* Only called for inband modes */
965         if (!ds->ops->phylink_mac_link_state) {
966                 state->link = 0;
967                 return;
968         }
969
970         err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
971         if (err < 0) {
972                 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
973                         dp->index, err);
974                 state->link = 0;
975         }
976 }
977
978 static void dsa_port_phylink_mac_config(struct phylink_config *config,
979                                         unsigned int mode,
980                                         const struct phylink_link_state *state)
981 {
982         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
983         struct dsa_switch *ds = dp->ds;
984
985         if (!ds->ops->phylink_mac_config)
986                 return;
987
988         ds->ops->phylink_mac_config(ds, dp->index, mode, state);
989 }
990
991 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
992 {
993         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
994         struct dsa_switch *ds = dp->ds;
995
996         if (!ds->ops->phylink_mac_an_restart)
997                 return;
998
999         ds->ops->phylink_mac_an_restart(ds, dp->index);
1000 }
1001
1002 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
1003                                            unsigned int mode,
1004                                            phy_interface_t interface)
1005 {
1006         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1007         struct phy_device *phydev = NULL;
1008         struct dsa_switch *ds = dp->ds;
1009
1010         if (dsa_is_user_port(ds, dp->index))
1011                 phydev = dp->slave->phydev;
1012
1013         if (!ds->ops->phylink_mac_link_down) {
1014                 if (ds->ops->adjust_link && phydev)
1015                         ds->ops->adjust_link(ds, dp->index, phydev);
1016                 return;
1017         }
1018
1019         ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
1020 }
1021
1022 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
1023                                          struct phy_device *phydev,
1024                                          unsigned int mode,
1025                                          phy_interface_t interface,
1026                                          int speed, int duplex,
1027                                          bool tx_pause, bool rx_pause)
1028 {
1029         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1030         struct dsa_switch *ds = dp->ds;
1031
1032         if (!ds->ops->phylink_mac_link_up) {
1033                 if (ds->ops->adjust_link && phydev)
1034                         ds->ops->adjust_link(ds, dp->index, phydev);
1035                 return;
1036         }
1037
1038         ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
1039                                      speed, duplex, tx_pause, rx_pause);
1040 }
1041
1042 const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
1043         .validate = dsa_port_phylink_validate,
1044         .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
1045         .mac_config = dsa_port_phylink_mac_config,
1046         .mac_an_restart = dsa_port_phylink_mac_an_restart,
1047         .mac_link_down = dsa_port_phylink_mac_link_down,
1048         .mac_link_up = dsa_port_phylink_mac_link_up,
1049 };
1050
1051 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
1052 {
1053         struct dsa_switch *ds = dp->ds;
1054         struct phy_device *phydev;
1055         int port = dp->index;
1056         int err = 0;
1057
1058         phydev = dsa_port_get_phy_device(dp);
1059         if (!phydev)
1060                 return 0;
1061
1062         if (IS_ERR(phydev))
1063                 return PTR_ERR(phydev);
1064
1065         if (enable) {
1066                 err = genphy_resume(phydev);
1067                 if (err < 0)
1068                         goto err_put_dev;
1069
1070                 err = genphy_read_status(phydev);
1071                 if (err < 0)
1072                         goto err_put_dev;
1073         } else {
1074                 err = genphy_suspend(phydev);
1075                 if (err < 0)
1076                         goto err_put_dev;
1077         }
1078
1079         if (ds->ops->adjust_link)
1080                 ds->ops->adjust_link(ds, port, phydev);
1081
1082         dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
1083
1084 err_put_dev:
1085         put_device(&phydev->mdio.dev);
1086         return err;
1087 }
1088
1089 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
1090 {
1091         struct device_node *dn = dp->dn;
1092         struct dsa_switch *ds = dp->ds;
1093         struct phy_device *phydev;
1094         int port = dp->index;
1095         phy_interface_t mode;
1096         int err;
1097
1098         err = of_phy_register_fixed_link(dn);
1099         if (err) {
1100                 dev_err(ds->dev,
1101                         "failed to register the fixed PHY of port %d\n",
1102                         port);
1103                 return err;
1104         }
1105
1106         phydev = of_phy_find_device(dn);
1107
1108         err = of_get_phy_mode(dn, &mode);
1109         if (err)
1110                 mode = PHY_INTERFACE_MODE_NA;
1111         phydev->interface = mode;
1112
1113         genphy_read_status(phydev);
1114
1115         if (ds->ops->adjust_link)
1116                 ds->ops->adjust_link(ds, port, phydev);
1117
1118         put_device(&phydev->mdio.dev);
1119
1120         return 0;
1121 }
1122
1123 static int dsa_port_phylink_register(struct dsa_port *dp)
1124 {
1125         struct dsa_switch *ds = dp->ds;
1126         struct device_node *port_dn = dp->dn;
1127         phy_interface_t mode;
1128         int err;
1129
1130         err = of_get_phy_mode(port_dn, &mode);
1131         if (err)
1132                 mode = PHY_INTERFACE_MODE_NA;
1133
1134         dp->pl_config.dev = ds->dev;
1135         dp->pl_config.type = PHYLINK_DEV;
1136         dp->pl_config.pcs_poll = ds->pcs_poll;
1137
1138         dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
1139                                 mode, &dsa_port_phylink_mac_ops);
1140         if (IS_ERR(dp->pl)) {
1141                 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1142                 return PTR_ERR(dp->pl);
1143         }
1144
1145         err = phylink_of_phy_connect(dp->pl, port_dn, 0);
1146         if (err && err != -ENODEV) {
1147                 pr_err("could not attach to PHY: %d\n", err);
1148                 goto err_phy_connect;
1149         }
1150
1151         return 0;
1152
1153 err_phy_connect:
1154         phylink_destroy(dp->pl);
1155         return err;
1156 }
1157
1158 int dsa_port_link_register_of(struct dsa_port *dp)
1159 {
1160         struct dsa_switch *ds = dp->ds;
1161         struct device_node *phy_np;
1162         int port = dp->index;
1163
1164         if (!ds->ops->adjust_link) {
1165                 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
1166                 if (of_phy_is_fixed_link(dp->dn) || phy_np) {
1167                         if (ds->ops->phylink_mac_link_down)
1168                                 ds->ops->phylink_mac_link_down(ds, port,
1169                                         MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
1170                         return dsa_port_phylink_register(dp);
1171                 }
1172                 return 0;
1173         }
1174
1175         dev_warn(ds->dev,
1176                  "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
1177
1178         if (of_phy_is_fixed_link(dp->dn))
1179                 return dsa_port_fixed_link_register_of(dp);
1180         else
1181                 return dsa_port_setup_phy_of(dp, true);
1182 }
1183
1184 void dsa_port_link_unregister_of(struct dsa_port *dp)
1185 {
1186         struct dsa_switch *ds = dp->ds;
1187
1188         if (!ds->ops->adjust_link && dp->pl) {
1189                 rtnl_lock();
1190                 phylink_disconnect_phy(dp->pl);
1191                 rtnl_unlock();
1192                 phylink_destroy(dp->pl);
1193                 dp->pl = NULL;
1194                 return;
1195         }
1196
1197         if (of_phy_is_fixed_link(dp->dn))
1198                 of_phy_deregister_fixed_link(dp->dn);
1199         else
1200                 dsa_port_setup_phy_of(dp, false);
1201 }
1202
1203 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
1204 {
1205         struct phy_device *phydev;
1206         int ret = -EOPNOTSUPP;
1207
1208         if (of_phy_is_fixed_link(dp->dn))
1209                 return ret;
1210
1211         phydev = dsa_port_get_phy_device(dp);
1212         if (IS_ERR_OR_NULL(phydev))
1213                 return ret;
1214
1215         ret = phy_ethtool_get_strings(phydev, data);
1216         put_device(&phydev->mdio.dev);
1217
1218         return ret;
1219 }
1220 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
1221
1222 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
1223 {
1224         struct phy_device *phydev;
1225         int ret = -EOPNOTSUPP;
1226
1227         if (of_phy_is_fixed_link(dp->dn))
1228                 return ret;
1229
1230         phydev = dsa_port_get_phy_device(dp);
1231         if (IS_ERR_OR_NULL(phydev))
1232                 return ret;
1233
1234         ret = phy_ethtool_get_stats(phydev, NULL, data);
1235         put_device(&phydev->mdio.dev);
1236
1237         return ret;
1238 }
1239 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
1240
1241 int dsa_port_get_phy_sset_count(struct dsa_port *dp)
1242 {
1243         struct phy_device *phydev;
1244         int ret = -EOPNOTSUPP;
1245
1246         if (of_phy_is_fixed_link(dp->dn))
1247                 return ret;
1248
1249         phydev = dsa_port_get_phy_device(dp);
1250         if (IS_ERR_OR_NULL(phydev))
1251                 return ret;
1252
1253         ret = phy_ethtool_get_sset_count(phydev);
1254         put_device(&phydev->mdio.dev);
1255
1256         return ret;
1257 }
1258 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
1259
1260 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
1261 {
1262         struct dsa_notifier_hsr_info info = {
1263                 .sw_index = dp->ds->index,
1264                 .port = dp->index,
1265                 .hsr = hsr,
1266         };
1267         int err;
1268
1269         dp->hsr_dev = hsr;
1270
1271         err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info);
1272         if (err)
1273                 dp->hsr_dev = NULL;
1274
1275         return err;
1276 }
1277
1278 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
1279 {
1280         struct dsa_notifier_hsr_info info = {
1281                 .sw_index = dp->ds->index,
1282                 .port = dp->index,
1283                 .hsr = hsr,
1284         };
1285         int err;
1286
1287         dp->hsr_dev = NULL;
1288
1289         err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info);
1290         if (err)
1291                 pr_err("DSA: failed to notify DSA_NOTIFIER_HSR_LEAVE\n");
1292 }
1293
1294 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid)
1295 {
1296         struct dsa_notifier_tag_8021q_vlan_info info = {
1297                 .tree_index = dp->ds->dst->index,
1298                 .sw_index = dp->ds->index,
1299                 .port = dp->index,
1300                 .vid = vid,
1301         };
1302
1303         return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1304 }
1305
1306 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid)
1307 {
1308         struct dsa_notifier_tag_8021q_vlan_info info = {
1309                 .tree_index = dp->ds->dst->index,
1310                 .sw_index = dp->ds->index,
1311                 .port = dp->index,
1312                 .vid = vid,
1313         };
1314         int err;
1315
1316         err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1317         if (err)
1318                 pr_err("DSA: failed to notify tag_8021q VLAN deletion: %pe\n",
1319                        ERR_PTR(err));
1320 }