Merge tag 'batadv-next-pullrequest-20210819' of git://git.open-mesh.org/linux-merge
[linux-2.6-microblaze.git] / net / dsa / port.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch port
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *      Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13
14 #include "dsa_priv.h"
15
16 /**
17  * dsa_port_notify - Notify the switching fabric of changes to a port
18  * @dp: port on which change occurred
19  * @e: event, must be of type DSA_NOTIFIER_*
20  * @v: event-specific value.
21  *
22  * Notify all switches in the DSA tree that this port's switch belongs to,
23  * including this switch itself, of an event. Allows the other switches to
24  * reconfigure themselves for cross-chip operations. Can also be used to
25  * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26  * a user port's state changes.
27  */
28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29 {
30         return dsa_tree_notify(dp->ds->dst, e, v);
31 }
32
33 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp)
34 {
35         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
36         struct switchdev_notifier_fdb_info info = {
37                 /* flush all VLANs */
38                 .vid = 0,
39         };
40
41         /* When the port becomes standalone it has already left the bridge.
42          * Don't notify the bridge in that case.
43          */
44         if (!brport_dev)
45                 return;
46
47         call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
48                                  brport_dev, &info.info, NULL);
49 }
50
51 static void dsa_port_fast_age(const struct dsa_port *dp)
52 {
53         struct dsa_switch *ds = dp->ds;
54
55         if (!ds->ops->port_fast_age)
56                 return;
57
58         ds->ops->port_fast_age(ds, dp->index);
59
60         dsa_port_notify_bridge_fdb_flush(dp);
61 }
62
63 static bool dsa_port_can_configure_learning(struct dsa_port *dp)
64 {
65         struct switchdev_brport_flags flags = {
66                 .mask = BR_LEARNING,
67         };
68         struct dsa_switch *ds = dp->ds;
69         int err;
70
71         if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags)
72                 return false;
73
74         err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL);
75         return !err;
76 }
77
78 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age)
79 {
80         struct dsa_switch *ds = dp->ds;
81         int port = dp->index;
82
83         if (!ds->ops->port_stp_state_set)
84                 return -EOPNOTSUPP;
85
86         ds->ops->port_stp_state_set(ds, port, state);
87
88         if (!dsa_port_can_configure_learning(dp) ||
89             (do_fast_age && dp->learning)) {
90                 /* Fast age FDB entries or flush appropriate forwarding database
91                  * for the given port, if we are moving it from Learning or
92                  * Forwarding state, to Disabled or Blocking or Listening state.
93                  * Ports that were standalone before the STP state change don't
94                  * need to fast age the FDB, since address learning is off in
95                  * standalone mode.
96                  */
97
98                 if ((dp->stp_state == BR_STATE_LEARNING ||
99                      dp->stp_state == BR_STATE_FORWARDING) &&
100                     (state == BR_STATE_DISABLED ||
101                      state == BR_STATE_BLOCKING ||
102                      state == BR_STATE_LISTENING))
103                         dsa_port_fast_age(dp);
104         }
105
106         dp->stp_state = state;
107
108         return 0;
109 }
110
111 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state,
112                                    bool do_fast_age)
113 {
114         int err;
115
116         err = dsa_port_set_state(dp, state, do_fast_age);
117         if (err)
118                 pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
119 }
120
121 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
122 {
123         struct dsa_switch *ds = dp->ds;
124         int port = dp->index;
125         int err;
126
127         if (ds->ops->port_enable) {
128                 err = ds->ops->port_enable(ds, port, phy);
129                 if (err)
130                         return err;
131         }
132
133         if (!dp->bridge_dev)
134                 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false);
135
136         if (dp->pl)
137                 phylink_start(dp->pl);
138
139         return 0;
140 }
141
142 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
143 {
144         int err;
145
146         rtnl_lock();
147         err = dsa_port_enable_rt(dp, phy);
148         rtnl_unlock();
149
150         return err;
151 }
152
153 void dsa_port_disable_rt(struct dsa_port *dp)
154 {
155         struct dsa_switch *ds = dp->ds;
156         int port = dp->index;
157
158         if (dp->pl)
159                 phylink_stop(dp->pl);
160
161         if (!dp->bridge_dev)
162                 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false);
163
164         if (ds->ops->port_disable)
165                 ds->ops->port_disable(ds, port);
166 }
167
168 void dsa_port_disable(struct dsa_port *dp)
169 {
170         rtnl_lock();
171         dsa_port_disable_rt(dp);
172         rtnl_unlock();
173 }
174
175 static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
176                                          struct netlink_ext_ack *extack)
177 {
178         const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
179                                    BR_BCAST_FLOOD;
180         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
181         int flag, err;
182
183         for_each_set_bit(flag, &mask, 32) {
184                 struct switchdev_brport_flags flags = {0};
185
186                 flags.mask = BIT(flag);
187
188                 if (br_port_flag_is_set(brport_dev, BIT(flag)))
189                         flags.val = BIT(flag);
190
191                 err = dsa_port_bridge_flags(dp, flags, extack);
192                 if (err && err != -EOPNOTSUPP)
193                         return err;
194         }
195
196         return 0;
197 }
198
199 static void dsa_port_clear_brport_flags(struct dsa_port *dp)
200 {
201         const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
202         const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
203                                    BR_BCAST_FLOOD;
204         int flag, err;
205
206         for_each_set_bit(flag, &mask, 32) {
207                 struct switchdev_brport_flags flags = {0};
208
209                 flags.mask = BIT(flag);
210                 flags.val = val & BIT(flag);
211
212                 err = dsa_port_bridge_flags(dp, flags, NULL);
213                 if (err && err != -EOPNOTSUPP)
214                         dev_err(dp->ds->dev,
215                                 "failed to clear bridge port flag %lu: %pe\n",
216                                 flags.val, ERR_PTR(err));
217         }
218 }
219
220 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
221                                          struct netlink_ext_ack *extack)
222 {
223         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
224         struct net_device *br = dp->bridge_dev;
225         int err;
226
227         err = dsa_port_inherit_brport_flags(dp, extack);
228         if (err)
229                 return err;
230
231         err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false);
232         if (err && err != -EOPNOTSUPP)
233                 return err;
234
235         err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack);
236         if (err && err != -EOPNOTSUPP)
237                 return err;
238
239         err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
240         if (err && err != -EOPNOTSUPP)
241                 return err;
242
243         return 0;
244 }
245
246 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
247 {
248         /* Configure the port for standalone mode (no address learning,
249          * flood everything).
250          * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
251          * when the user requests it through netlink or sysfs, but not
252          * automatically at port join or leave, so we need to handle resetting
253          * the brport flags ourselves. But we even prefer it that way, because
254          * otherwise, some setups might never get the notification they need,
255          * for example, when a port leaves a LAG that offloads the bridge,
256          * it becomes standalone, but as far as the bridge is concerned, no
257          * port ever left.
258          */
259         dsa_port_clear_brport_flags(dp);
260
261         /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
262          * so allow it to be in BR_STATE_FORWARDING to be kept functional
263          */
264         dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true);
265
266         /* VLAN filtering is handled by dsa_switch_bridge_leave */
267
268         /* Ageing time may be global to the switch chip, so don't change it
269          * here because we have no good reason (or value) to change it to.
270          */
271 }
272
273 static int dsa_tree_find_bridge_num(struct dsa_switch_tree *dst,
274                                     struct net_device *bridge_dev)
275 {
276         struct dsa_port *dp;
277
278         /* When preparing the offload for a port, it will have a valid
279          * dp->bridge_dev pointer but a not yet valid dp->bridge_num.
280          * However there might be other ports having the same dp->bridge_dev
281          * and a valid dp->bridge_num, so just ignore this port.
282          */
283         list_for_each_entry(dp, &dst->ports, list)
284                 if (dp->bridge_dev == bridge_dev && dp->bridge_num != -1)
285                         return dp->bridge_num;
286
287         return -1;
288 }
289
290 static void dsa_port_bridge_tx_fwd_unoffload(struct dsa_port *dp,
291                                              struct net_device *bridge_dev)
292 {
293         struct dsa_switch_tree *dst = dp->ds->dst;
294         int bridge_num = dp->bridge_num;
295         struct dsa_switch *ds = dp->ds;
296
297         /* No bridge TX forwarding offload => do nothing */
298         if (!ds->ops->port_bridge_tx_fwd_unoffload || dp->bridge_num == -1)
299                 return;
300
301         dp->bridge_num = -1;
302
303         /* Check if the bridge is still in use, otherwise it is time
304          * to clean it up so we can reuse this bridge_num later.
305          */
306         if (!dsa_tree_find_bridge_num(dst, bridge_dev))
307                 clear_bit(bridge_num, &dst->fwd_offloading_bridges);
308
309         /* Notify the chips only once the offload has been deactivated, so
310          * that they can update their configuration accordingly.
311          */
312         ds->ops->port_bridge_tx_fwd_unoffload(ds, dp->index, bridge_dev,
313                                               bridge_num);
314 }
315
316 static bool dsa_port_bridge_tx_fwd_offload(struct dsa_port *dp,
317                                            struct net_device *bridge_dev)
318 {
319         struct dsa_switch_tree *dst = dp->ds->dst;
320         struct dsa_switch *ds = dp->ds;
321         int bridge_num, err;
322
323         if (!ds->ops->port_bridge_tx_fwd_offload)
324                 return false;
325
326         bridge_num = dsa_tree_find_bridge_num(dst, bridge_dev);
327         if (bridge_num < 0) {
328                 /* First port that offloads TX forwarding for this bridge */
329                 bridge_num = find_first_zero_bit(&dst->fwd_offloading_bridges,
330                                                  DSA_MAX_NUM_OFFLOADING_BRIDGES);
331                 if (bridge_num >= ds->num_fwd_offloading_bridges)
332                         return false;
333
334                 set_bit(bridge_num, &dst->fwd_offloading_bridges);
335         }
336
337         dp->bridge_num = bridge_num;
338
339         /* Notify the driver */
340         err = ds->ops->port_bridge_tx_fwd_offload(ds, dp->index, bridge_dev,
341                                                   bridge_num);
342         if (err) {
343                 dsa_port_bridge_tx_fwd_unoffload(dp, bridge_dev);
344                 return false;
345         }
346
347         return true;
348 }
349
350 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
351                          struct netlink_ext_ack *extack)
352 {
353         struct dsa_notifier_bridge_info info = {
354                 .tree_index = dp->ds->dst->index,
355                 .sw_index = dp->ds->index,
356                 .port = dp->index,
357                 .br = br,
358         };
359         struct net_device *dev = dp->slave;
360         struct net_device *brport_dev;
361         bool tx_fwd_offload;
362         int err;
363
364         /* Here the interface is already bridged. Reflect the current
365          * configuration so that drivers can program their chips accordingly.
366          */
367         dp->bridge_dev = br;
368
369         brport_dev = dsa_port_to_bridge_port(dp);
370
371         err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
372         if (err)
373                 goto out_rollback;
374
375         tx_fwd_offload = dsa_port_bridge_tx_fwd_offload(dp, br);
376
377         err = switchdev_bridge_port_offload(brport_dev, dev, dp,
378                                             &dsa_slave_switchdev_notifier,
379                                             &dsa_slave_switchdev_blocking_notifier,
380                                             tx_fwd_offload, extack);
381         if (err)
382                 goto out_rollback_unbridge;
383
384         err = dsa_port_switchdev_sync_attrs(dp, extack);
385         if (err)
386                 goto out_rollback_unoffload;
387
388         return 0;
389
390 out_rollback_unoffload:
391         switchdev_bridge_port_unoffload(brport_dev, dp,
392                                         &dsa_slave_switchdev_notifier,
393                                         &dsa_slave_switchdev_blocking_notifier);
394 out_rollback_unbridge:
395         dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
396 out_rollback:
397         dp->bridge_dev = NULL;
398         return err;
399 }
400
401 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
402 {
403         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
404
405         switchdev_bridge_port_unoffload(brport_dev, dp,
406                                         &dsa_slave_switchdev_notifier,
407                                         &dsa_slave_switchdev_blocking_notifier);
408 }
409
410 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
411 {
412         struct dsa_notifier_bridge_info info = {
413                 .tree_index = dp->ds->dst->index,
414                 .sw_index = dp->ds->index,
415                 .port = dp->index,
416                 .br = br,
417         };
418         int err;
419
420         /* Here the port is already unbridged. Reflect the current configuration
421          * so that drivers can program their chips accordingly.
422          */
423         dp->bridge_dev = NULL;
424
425         dsa_port_bridge_tx_fwd_unoffload(dp, br);
426
427         err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
428         if (err)
429                 dev_err(dp->ds->dev,
430                         "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n",
431                         dp->index, ERR_PTR(err));
432
433         dsa_port_switchdev_unsync_attrs(dp);
434 }
435
436 int dsa_port_lag_change(struct dsa_port *dp,
437                         struct netdev_lag_lower_state_info *linfo)
438 {
439         struct dsa_notifier_lag_info info = {
440                 .sw_index = dp->ds->index,
441                 .port = dp->index,
442         };
443         bool tx_enabled;
444
445         if (!dp->lag_dev)
446                 return 0;
447
448         /* On statically configured aggregates (e.g. loadbalance
449          * without LACP) ports will always be tx_enabled, even if the
450          * link is down. Thus we require both link_up and tx_enabled
451          * in order to include it in the tx set.
452          */
453         tx_enabled = linfo->link_up && linfo->tx_enabled;
454
455         if (tx_enabled == dp->lag_tx_enabled)
456                 return 0;
457
458         dp->lag_tx_enabled = tx_enabled;
459
460         return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
461 }
462
463 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
464                       struct netdev_lag_upper_info *uinfo,
465                       struct netlink_ext_ack *extack)
466 {
467         struct dsa_notifier_lag_info info = {
468                 .sw_index = dp->ds->index,
469                 .port = dp->index,
470                 .lag = lag,
471                 .info = uinfo,
472         };
473         struct net_device *bridge_dev;
474         int err;
475
476         dsa_lag_map(dp->ds->dst, lag);
477         dp->lag_dev = lag;
478
479         err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
480         if (err)
481                 goto err_lag_join;
482
483         bridge_dev = netdev_master_upper_dev_get(lag);
484         if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
485                 return 0;
486
487         err = dsa_port_bridge_join(dp, bridge_dev, extack);
488         if (err)
489                 goto err_bridge_join;
490
491         return 0;
492
493 err_bridge_join:
494         dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
495 err_lag_join:
496         dp->lag_dev = NULL;
497         dsa_lag_unmap(dp->ds->dst, lag);
498         return err;
499 }
500
501 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag)
502 {
503         if (dp->bridge_dev)
504                 dsa_port_pre_bridge_leave(dp, dp->bridge_dev);
505 }
506
507 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
508 {
509         struct dsa_notifier_lag_info info = {
510                 .sw_index = dp->ds->index,
511                 .port = dp->index,
512                 .lag = lag,
513         };
514         int err;
515
516         if (!dp->lag_dev)
517                 return;
518
519         /* Port might have been part of a LAG that in turn was
520          * attached to a bridge.
521          */
522         if (dp->bridge_dev)
523                 dsa_port_bridge_leave(dp, dp->bridge_dev);
524
525         dp->lag_tx_enabled = false;
526         dp->lag_dev = NULL;
527
528         err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
529         if (err)
530                 dev_err(dp->ds->dev,
531                         "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n",
532                         dp->index, ERR_PTR(err));
533
534         dsa_lag_unmap(dp->ds->dst, lag);
535 }
536
537 /* Must be called under rcu_read_lock() */
538 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
539                                               bool vlan_filtering,
540                                               struct netlink_ext_ack *extack)
541 {
542         struct dsa_switch *ds = dp->ds;
543         int err, i;
544
545         /* VLAN awareness was off, so the question is "can we turn it on".
546          * We may have had 8021q uppers, those need to go. Make sure we don't
547          * enter an inconsistent state: deny changing the VLAN awareness state
548          * as long as we have 8021q uppers.
549          */
550         if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
551                 struct net_device *upper_dev, *slave = dp->slave;
552                 struct net_device *br = dp->bridge_dev;
553                 struct list_head *iter;
554
555                 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
556                         struct bridge_vlan_info br_info;
557                         u16 vid;
558
559                         if (!is_vlan_dev(upper_dev))
560                                 continue;
561
562                         vid = vlan_dev_vlan_id(upper_dev);
563
564                         /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
565                          * device, respectively the VID is not found, returning
566                          * 0 means success, which is a failure for us here.
567                          */
568                         err = br_vlan_get_info(br, vid, &br_info);
569                         if (err == 0) {
570                                 NL_SET_ERR_MSG_MOD(extack,
571                                                    "Must first remove VLAN uppers having VIDs also present in bridge");
572                                 return false;
573                         }
574                 }
575         }
576
577         if (!ds->vlan_filtering_is_global)
578                 return true;
579
580         /* For cases where enabling/disabling VLAN awareness is global to the
581          * switch, we need to handle the case where multiple bridges span
582          * different ports of the same switch device and one of them has a
583          * different setting than what is being requested.
584          */
585         for (i = 0; i < ds->num_ports; i++) {
586                 struct net_device *other_bridge;
587
588                 other_bridge = dsa_to_port(ds, i)->bridge_dev;
589                 if (!other_bridge)
590                         continue;
591                 /* If it's the same bridge, it also has same
592                  * vlan_filtering setting => no need to check
593                  */
594                 if (other_bridge == dp->bridge_dev)
595                         continue;
596                 if (br_vlan_enabled(other_bridge) != vlan_filtering) {
597                         NL_SET_ERR_MSG_MOD(extack,
598                                            "VLAN filtering is a global setting");
599                         return false;
600                 }
601         }
602         return true;
603 }
604
605 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
606                             struct netlink_ext_ack *extack)
607 {
608         struct dsa_switch *ds = dp->ds;
609         bool apply;
610         int err;
611
612         if (!ds->ops->port_vlan_filtering)
613                 return -EOPNOTSUPP;
614
615         /* We are called from dsa_slave_switchdev_blocking_event(),
616          * which is not under rcu_read_lock(), unlike
617          * dsa_slave_switchdev_event().
618          */
619         rcu_read_lock();
620         apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
621         rcu_read_unlock();
622         if (!apply)
623                 return -EINVAL;
624
625         if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
626                 return 0;
627
628         err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
629                                            extack);
630         if (err)
631                 return err;
632
633         if (ds->vlan_filtering_is_global)
634                 ds->vlan_filtering = vlan_filtering;
635         else
636                 dp->vlan_filtering = vlan_filtering;
637
638         return 0;
639 }
640
641 /* This enforces legacy behavior for switch drivers which assume they can't
642  * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
643  */
644 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
645 {
646         struct dsa_switch *ds = dp->ds;
647
648         if (!dp->bridge_dev)
649                 return false;
650
651         return (!ds->configure_vlan_while_not_filtering &&
652                 !br_vlan_enabled(dp->bridge_dev));
653 }
654
655 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
656 {
657         unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
658         unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
659         struct dsa_notifier_ageing_time_info info;
660         int err;
661
662         info.ageing_time = ageing_time;
663
664         err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
665         if (err)
666                 return err;
667
668         dp->ageing_time = ageing_time;
669
670         return 0;
671 }
672
673 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
674                               struct switchdev_brport_flags flags,
675                               struct netlink_ext_ack *extack)
676 {
677         struct dsa_switch *ds = dp->ds;
678
679         if (!ds->ops->port_pre_bridge_flags)
680                 return -EINVAL;
681
682         return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
683 }
684
685 int dsa_port_bridge_flags(struct dsa_port *dp,
686                           struct switchdev_brport_flags flags,
687                           struct netlink_ext_ack *extack)
688 {
689         struct dsa_switch *ds = dp->ds;
690         int err;
691
692         if (!ds->ops->port_bridge_flags)
693                 return -EOPNOTSUPP;
694
695         err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
696         if (err)
697                 return err;
698
699         if (flags.mask & BR_LEARNING) {
700                 bool learning = flags.val & BR_LEARNING;
701
702                 if (learning == dp->learning)
703                         return 0;
704
705                 if ((dp->learning && !learning) &&
706                     (dp->stp_state == BR_STATE_LEARNING ||
707                      dp->stp_state == BR_STATE_FORWARDING))
708                         dsa_port_fast_age(dp);
709
710                 dp->learning = learning;
711         }
712
713         return 0;
714 }
715
716 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
717                         bool targeted_match)
718 {
719         struct dsa_notifier_mtu_info info = {
720                 .sw_index = dp->ds->index,
721                 .targeted_match = targeted_match,
722                 .port = dp->index,
723                 .mtu = new_mtu,
724         };
725
726         return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
727 }
728
729 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
730                      u16 vid)
731 {
732         struct dsa_notifier_fdb_info info = {
733                 .sw_index = dp->ds->index,
734                 .port = dp->index,
735                 .addr = addr,
736                 .vid = vid,
737         };
738
739         return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
740 }
741
742 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
743                      u16 vid)
744 {
745         struct dsa_notifier_fdb_info info = {
746                 .sw_index = dp->ds->index,
747                 .port = dp->index,
748                 .addr = addr,
749                 .vid = vid,
750
751         };
752
753         return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
754 }
755
756 int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
757                           u16 vid)
758 {
759         struct dsa_notifier_fdb_info info = {
760                 .sw_index = dp->ds->index,
761                 .port = dp->index,
762                 .addr = addr,
763                 .vid = vid,
764         };
765         struct dsa_port *cpu_dp = dp->cpu_dp;
766         int err;
767
768         err = dev_uc_add(cpu_dp->master, addr);
769         if (err)
770                 return err;
771
772         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
773 }
774
775 int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
776                           u16 vid)
777 {
778         struct dsa_notifier_fdb_info info = {
779                 .sw_index = dp->ds->index,
780                 .port = dp->index,
781                 .addr = addr,
782                 .vid = vid,
783         };
784         struct dsa_port *cpu_dp = dp->cpu_dp;
785         int err;
786
787         err = dev_uc_del(cpu_dp->master, addr);
788         if (err)
789                 return err;
790
791         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
792 }
793
794 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
795 {
796         struct dsa_switch *ds = dp->ds;
797         int port = dp->index;
798
799         if (!ds->ops->port_fdb_dump)
800                 return -EOPNOTSUPP;
801
802         return ds->ops->port_fdb_dump(ds, port, cb, data);
803 }
804
805 int dsa_port_mdb_add(const struct dsa_port *dp,
806                      const struct switchdev_obj_port_mdb *mdb)
807 {
808         struct dsa_notifier_mdb_info info = {
809                 .sw_index = dp->ds->index,
810                 .port = dp->index,
811                 .mdb = mdb,
812         };
813
814         return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
815 }
816
817 int dsa_port_mdb_del(const struct dsa_port *dp,
818                      const struct switchdev_obj_port_mdb *mdb)
819 {
820         struct dsa_notifier_mdb_info info = {
821                 .sw_index = dp->ds->index,
822                 .port = dp->index,
823                 .mdb = mdb,
824         };
825
826         return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
827 }
828
829 int dsa_port_host_mdb_add(const struct dsa_port *dp,
830                           const struct switchdev_obj_port_mdb *mdb)
831 {
832         struct dsa_notifier_mdb_info info = {
833                 .sw_index = dp->ds->index,
834                 .port = dp->index,
835                 .mdb = mdb,
836         };
837         struct dsa_port *cpu_dp = dp->cpu_dp;
838         int err;
839
840         err = dev_mc_add(cpu_dp->master, mdb->addr);
841         if (err)
842                 return err;
843
844         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
845 }
846
847 int dsa_port_host_mdb_del(const struct dsa_port *dp,
848                           const struct switchdev_obj_port_mdb *mdb)
849 {
850         struct dsa_notifier_mdb_info info = {
851                 .sw_index = dp->ds->index,
852                 .port = dp->index,
853                 .mdb = mdb,
854         };
855         struct dsa_port *cpu_dp = dp->cpu_dp;
856         int err;
857
858         err = dev_mc_del(cpu_dp->master, mdb->addr);
859         if (err)
860                 return err;
861
862         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
863 }
864
865 int dsa_port_vlan_add(struct dsa_port *dp,
866                       const struct switchdev_obj_port_vlan *vlan,
867                       struct netlink_ext_ack *extack)
868 {
869         struct dsa_notifier_vlan_info info = {
870                 .sw_index = dp->ds->index,
871                 .port = dp->index,
872                 .vlan = vlan,
873                 .extack = extack,
874         };
875
876         return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
877 }
878
879 int dsa_port_vlan_del(struct dsa_port *dp,
880                       const struct switchdev_obj_port_vlan *vlan)
881 {
882         struct dsa_notifier_vlan_info info = {
883                 .sw_index = dp->ds->index,
884                 .port = dp->index,
885                 .vlan = vlan,
886         };
887
888         return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
889 }
890
891 int dsa_port_mrp_add(const struct dsa_port *dp,
892                      const struct switchdev_obj_mrp *mrp)
893 {
894         struct dsa_notifier_mrp_info info = {
895                 .sw_index = dp->ds->index,
896                 .port = dp->index,
897                 .mrp = mrp,
898         };
899
900         return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD, &info);
901 }
902
903 int dsa_port_mrp_del(const struct dsa_port *dp,
904                      const struct switchdev_obj_mrp *mrp)
905 {
906         struct dsa_notifier_mrp_info info = {
907                 .sw_index = dp->ds->index,
908                 .port = dp->index,
909                 .mrp = mrp,
910         };
911
912         return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL, &info);
913 }
914
915 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
916                                const struct switchdev_obj_ring_role_mrp *mrp)
917 {
918         struct dsa_notifier_mrp_ring_role_info info = {
919                 .sw_index = dp->ds->index,
920                 .port = dp->index,
921                 .mrp = mrp,
922         };
923
924         return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD_RING_ROLE, &info);
925 }
926
927 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
928                                const struct switchdev_obj_ring_role_mrp *mrp)
929 {
930         struct dsa_notifier_mrp_ring_role_info info = {
931                 .sw_index = dp->ds->index,
932                 .port = dp->index,
933                 .mrp = mrp,
934         };
935
936         return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL_RING_ROLE, &info);
937 }
938
939 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
940                                const struct dsa_device_ops *tag_ops)
941 {
942         cpu_dp->rcv = tag_ops->rcv;
943         cpu_dp->tag_ops = tag_ops;
944 }
945
946 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
947 {
948         struct device_node *phy_dn;
949         struct phy_device *phydev;
950
951         phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
952         if (!phy_dn)
953                 return NULL;
954
955         phydev = of_phy_find_device(phy_dn);
956         if (!phydev) {
957                 of_node_put(phy_dn);
958                 return ERR_PTR(-EPROBE_DEFER);
959         }
960
961         of_node_put(phy_dn);
962         return phydev;
963 }
964
965 static void dsa_port_phylink_validate(struct phylink_config *config,
966                                       unsigned long *supported,
967                                       struct phylink_link_state *state)
968 {
969         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
970         struct dsa_switch *ds = dp->ds;
971
972         if (!ds->ops->phylink_validate)
973                 return;
974
975         ds->ops->phylink_validate(ds, dp->index, supported, state);
976 }
977
978 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
979                                                struct phylink_link_state *state)
980 {
981         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
982         struct dsa_switch *ds = dp->ds;
983         int err;
984
985         /* Only called for inband modes */
986         if (!ds->ops->phylink_mac_link_state) {
987                 state->link = 0;
988                 return;
989         }
990
991         err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
992         if (err < 0) {
993                 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
994                         dp->index, err);
995                 state->link = 0;
996         }
997 }
998
999 static void dsa_port_phylink_mac_config(struct phylink_config *config,
1000                                         unsigned int mode,
1001                                         const struct phylink_link_state *state)
1002 {
1003         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1004         struct dsa_switch *ds = dp->ds;
1005
1006         if (!ds->ops->phylink_mac_config)
1007                 return;
1008
1009         ds->ops->phylink_mac_config(ds, dp->index, mode, state);
1010 }
1011
1012 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
1013 {
1014         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1015         struct dsa_switch *ds = dp->ds;
1016
1017         if (!ds->ops->phylink_mac_an_restart)
1018                 return;
1019
1020         ds->ops->phylink_mac_an_restart(ds, dp->index);
1021 }
1022
1023 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
1024                                            unsigned int mode,
1025                                            phy_interface_t interface)
1026 {
1027         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1028         struct phy_device *phydev = NULL;
1029         struct dsa_switch *ds = dp->ds;
1030
1031         if (dsa_is_user_port(ds, dp->index))
1032                 phydev = dp->slave->phydev;
1033
1034         if (!ds->ops->phylink_mac_link_down) {
1035                 if (ds->ops->adjust_link && phydev)
1036                         ds->ops->adjust_link(ds, dp->index, phydev);
1037                 return;
1038         }
1039
1040         ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
1041 }
1042
1043 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
1044                                          struct phy_device *phydev,
1045                                          unsigned int mode,
1046                                          phy_interface_t interface,
1047                                          int speed, int duplex,
1048                                          bool tx_pause, bool rx_pause)
1049 {
1050         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1051         struct dsa_switch *ds = dp->ds;
1052
1053         if (!ds->ops->phylink_mac_link_up) {
1054                 if (ds->ops->adjust_link && phydev)
1055                         ds->ops->adjust_link(ds, dp->index, phydev);
1056                 return;
1057         }
1058
1059         ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
1060                                      speed, duplex, tx_pause, rx_pause);
1061 }
1062
1063 const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
1064         .validate = dsa_port_phylink_validate,
1065         .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
1066         .mac_config = dsa_port_phylink_mac_config,
1067         .mac_an_restart = dsa_port_phylink_mac_an_restart,
1068         .mac_link_down = dsa_port_phylink_mac_link_down,
1069         .mac_link_up = dsa_port_phylink_mac_link_up,
1070 };
1071
1072 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
1073 {
1074         struct dsa_switch *ds = dp->ds;
1075         struct phy_device *phydev;
1076         int port = dp->index;
1077         int err = 0;
1078
1079         phydev = dsa_port_get_phy_device(dp);
1080         if (!phydev)
1081                 return 0;
1082
1083         if (IS_ERR(phydev))
1084                 return PTR_ERR(phydev);
1085
1086         if (enable) {
1087                 err = genphy_resume(phydev);
1088                 if (err < 0)
1089                         goto err_put_dev;
1090
1091                 err = genphy_read_status(phydev);
1092                 if (err < 0)
1093                         goto err_put_dev;
1094         } else {
1095                 err = genphy_suspend(phydev);
1096                 if (err < 0)
1097                         goto err_put_dev;
1098         }
1099
1100         if (ds->ops->adjust_link)
1101                 ds->ops->adjust_link(ds, port, phydev);
1102
1103         dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
1104
1105 err_put_dev:
1106         put_device(&phydev->mdio.dev);
1107         return err;
1108 }
1109
1110 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
1111 {
1112         struct device_node *dn = dp->dn;
1113         struct dsa_switch *ds = dp->ds;
1114         struct phy_device *phydev;
1115         int port = dp->index;
1116         phy_interface_t mode;
1117         int err;
1118
1119         err = of_phy_register_fixed_link(dn);
1120         if (err) {
1121                 dev_err(ds->dev,
1122                         "failed to register the fixed PHY of port %d\n",
1123                         port);
1124                 return err;
1125         }
1126
1127         phydev = of_phy_find_device(dn);
1128
1129         err = of_get_phy_mode(dn, &mode);
1130         if (err)
1131                 mode = PHY_INTERFACE_MODE_NA;
1132         phydev->interface = mode;
1133
1134         genphy_read_status(phydev);
1135
1136         if (ds->ops->adjust_link)
1137                 ds->ops->adjust_link(ds, port, phydev);
1138
1139         put_device(&phydev->mdio.dev);
1140
1141         return 0;
1142 }
1143
1144 static int dsa_port_phylink_register(struct dsa_port *dp)
1145 {
1146         struct dsa_switch *ds = dp->ds;
1147         struct device_node *port_dn = dp->dn;
1148         phy_interface_t mode;
1149         int err;
1150
1151         err = of_get_phy_mode(port_dn, &mode);
1152         if (err)
1153                 mode = PHY_INTERFACE_MODE_NA;
1154
1155         dp->pl_config.dev = ds->dev;
1156         dp->pl_config.type = PHYLINK_DEV;
1157         dp->pl_config.pcs_poll = ds->pcs_poll;
1158
1159         dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
1160                                 mode, &dsa_port_phylink_mac_ops);
1161         if (IS_ERR(dp->pl)) {
1162                 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1163                 return PTR_ERR(dp->pl);
1164         }
1165
1166         err = phylink_of_phy_connect(dp->pl, port_dn, 0);
1167         if (err && err != -ENODEV) {
1168                 pr_err("could not attach to PHY: %d\n", err);
1169                 goto err_phy_connect;
1170         }
1171
1172         return 0;
1173
1174 err_phy_connect:
1175         phylink_destroy(dp->pl);
1176         return err;
1177 }
1178
1179 int dsa_port_link_register_of(struct dsa_port *dp)
1180 {
1181         struct dsa_switch *ds = dp->ds;
1182         struct device_node *phy_np;
1183         int port = dp->index;
1184
1185         if (!ds->ops->adjust_link) {
1186                 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
1187                 if (of_phy_is_fixed_link(dp->dn) || phy_np) {
1188                         if (ds->ops->phylink_mac_link_down)
1189                                 ds->ops->phylink_mac_link_down(ds, port,
1190                                         MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
1191                         return dsa_port_phylink_register(dp);
1192                 }
1193                 return 0;
1194         }
1195
1196         dev_warn(ds->dev,
1197                  "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
1198
1199         if (of_phy_is_fixed_link(dp->dn))
1200                 return dsa_port_fixed_link_register_of(dp);
1201         else
1202                 return dsa_port_setup_phy_of(dp, true);
1203 }
1204
1205 void dsa_port_link_unregister_of(struct dsa_port *dp)
1206 {
1207         struct dsa_switch *ds = dp->ds;
1208
1209         if (!ds->ops->adjust_link && dp->pl) {
1210                 rtnl_lock();
1211                 phylink_disconnect_phy(dp->pl);
1212                 rtnl_unlock();
1213                 phylink_destroy(dp->pl);
1214                 dp->pl = NULL;
1215                 return;
1216         }
1217
1218         if (of_phy_is_fixed_link(dp->dn))
1219                 of_phy_deregister_fixed_link(dp->dn);
1220         else
1221                 dsa_port_setup_phy_of(dp, false);
1222 }
1223
1224 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
1225 {
1226         struct phy_device *phydev;
1227         int ret = -EOPNOTSUPP;
1228
1229         if (of_phy_is_fixed_link(dp->dn))
1230                 return ret;
1231
1232         phydev = dsa_port_get_phy_device(dp);
1233         if (IS_ERR_OR_NULL(phydev))
1234                 return ret;
1235
1236         ret = phy_ethtool_get_strings(phydev, data);
1237         put_device(&phydev->mdio.dev);
1238
1239         return ret;
1240 }
1241 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
1242
1243 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
1244 {
1245         struct phy_device *phydev;
1246         int ret = -EOPNOTSUPP;
1247
1248         if (of_phy_is_fixed_link(dp->dn))
1249                 return ret;
1250
1251         phydev = dsa_port_get_phy_device(dp);
1252         if (IS_ERR_OR_NULL(phydev))
1253                 return ret;
1254
1255         ret = phy_ethtool_get_stats(phydev, NULL, data);
1256         put_device(&phydev->mdio.dev);
1257
1258         return ret;
1259 }
1260 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
1261
1262 int dsa_port_get_phy_sset_count(struct dsa_port *dp)
1263 {
1264         struct phy_device *phydev;
1265         int ret = -EOPNOTSUPP;
1266
1267         if (of_phy_is_fixed_link(dp->dn))
1268                 return ret;
1269
1270         phydev = dsa_port_get_phy_device(dp);
1271         if (IS_ERR_OR_NULL(phydev))
1272                 return ret;
1273
1274         ret = phy_ethtool_get_sset_count(phydev);
1275         put_device(&phydev->mdio.dev);
1276
1277         return ret;
1278 }
1279 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
1280
1281 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
1282 {
1283         struct dsa_notifier_hsr_info info = {
1284                 .sw_index = dp->ds->index,
1285                 .port = dp->index,
1286                 .hsr = hsr,
1287         };
1288         int err;
1289
1290         dp->hsr_dev = hsr;
1291
1292         err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info);
1293         if (err)
1294                 dp->hsr_dev = NULL;
1295
1296         return err;
1297 }
1298
1299 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
1300 {
1301         struct dsa_notifier_hsr_info info = {
1302                 .sw_index = dp->ds->index,
1303                 .port = dp->index,
1304                 .hsr = hsr,
1305         };
1306         int err;
1307
1308         dp->hsr_dev = NULL;
1309
1310         err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info);
1311         if (err)
1312                 dev_err(dp->ds->dev,
1313                         "port %d failed to notify DSA_NOTIFIER_HSR_LEAVE: %pe\n",
1314                         dp->index, ERR_PTR(err));
1315 }
1316
1317 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast)
1318 {
1319         struct dsa_notifier_tag_8021q_vlan_info info = {
1320                 .tree_index = dp->ds->dst->index,
1321                 .sw_index = dp->ds->index,
1322                 .port = dp->index,
1323                 .vid = vid,
1324         };
1325
1326         if (broadcast)
1327                 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1328
1329         return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1330 }
1331
1332 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast)
1333 {
1334         struct dsa_notifier_tag_8021q_vlan_info info = {
1335                 .tree_index = dp->ds->dst->index,
1336                 .sw_index = dp->ds->index,
1337                 .port = dp->index,
1338                 .vid = vid,
1339         };
1340         int err;
1341
1342         if (broadcast)
1343                 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1344         else
1345                 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1346         if (err)
1347                 dev_err(dp->ds->dev,
1348                         "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n",
1349                         dp->index, vid, ERR_PTR(err));
1350 }