Merge branches 'acpi-pm', 'acpi-pci', 'acpi-sysfs' and 'acpi-tables'
[linux-2.6-microblaze.git] / net / dsa / port.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch port
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *      Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13
14 #include "dsa_priv.h"
15
16 /**
17  * dsa_port_notify - Notify the switching fabric of changes to a port
18  * @dp: port on which change occurred
19  * @e: event, must be of type DSA_NOTIFIER_*
20  * @v: event-specific value.
21  *
22  * Notify all switches in the DSA tree that this port's switch belongs to,
23  * including this switch itself, of an event. Allows the other switches to
24  * reconfigure themselves for cross-chip operations. Can also be used to
25  * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26  * a user port's state changes.
27  */
28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29 {
30         return dsa_tree_notify(dp->ds->dst, e, v);
31 }
32
33 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp, u16 vid)
34 {
35         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
36         struct switchdev_notifier_fdb_info info = {
37                 .vid = vid,
38         };
39
40         /* When the port becomes standalone it has already left the bridge.
41          * Don't notify the bridge in that case.
42          */
43         if (!brport_dev)
44                 return;
45
46         call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
47                                  brport_dev, &info.info, NULL);
48 }
49
50 static void dsa_port_fast_age(const struct dsa_port *dp)
51 {
52         struct dsa_switch *ds = dp->ds;
53
54         if (!ds->ops->port_fast_age)
55                 return;
56
57         ds->ops->port_fast_age(ds, dp->index);
58
59         /* flush all VLANs */
60         dsa_port_notify_bridge_fdb_flush(dp, 0);
61 }
62
63 static int dsa_port_vlan_fast_age(const struct dsa_port *dp, u16 vid)
64 {
65         struct dsa_switch *ds = dp->ds;
66         int err;
67
68         if (!ds->ops->port_vlan_fast_age)
69                 return -EOPNOTSUPP;
70
71         err = ds->ops->port_vlan_fast_age(ds, dp->index, vid);
72
73         if (!err)
74                 dsa_port_notify_bridge_fdb_flush(dp, vid);
75
76         return err;
77 }
78
79 static int dsa_port_msti_fast_age(const struct dsa_port *dp, u16 msti)
80 {
81         DECLARE_BITMAP(vids, VLAN_N_VID) = { 0 };
82         int err, vid;
83
84         err = br_mst_get_info(dsa_port_bridge_dev_get(dp), msti, vids);
85         if (err)
86                 return err;
87
88         for_each_set_bit(vid, vids, VLAN_N_VID) {
89                 err = dsa_port_vlan_fast_age(dp, vid);
90                 if (err)
91                         return err;
92         }
93
94         return 0;
95 }
96
97 static bool dsa_port_can_configure_learning(struct dsa_port *dp)
98 {
99         struct switchdev_brport_flags flags = {
100                 .mask = BR_LEARNING,
101         };
102         struct dsa_switch *ds = dp->ds;
103         int err;
104
105         if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags)
106                 return false;
107
108         err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL);
109         return !err;
110 }
111
112 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age)
113 {
114         struct dsa_switch *ds = dp->ds;
115         int port = dp->index;
116
117         if (!ds->ops->port_stp_state_set)
118                 return -EOPNOTSUPP;
119
120         ds->ops->port_stp_state_set(ds, port, state);
121
122         if (!dsa_port_can_configure_learning(dp) ||
123             (do_fast_age && dp->learning)) {
124                 /* Fast age FDB entries or flush appropriate forwarding database
125                  * for the given port, if we are moving it from Learning or
126                  * Forwarding state, to Disabled or Blocking or Listening state.
127                  * Ports that were standalone before the STP state change don't
128                  * need to fast age the FDB, since address learning is off in
129                  * standalone mode.
130                  */
131
132                 if ((dp->stp_state == BR_STATE_LEARNING ||
133                      dp->stp_state == BR_STATE_FORWARDING) &&
134                     (state == BR_STATE_DISABLED ||
135                      state == BR_STATE_BLOCKING ||
136                      state == BR_STATE_LISTENING))
137                         dsa_port_fast_age(dp);
138         }
139
140         dp->stp_state = state;
141
142         return 0;
143 }
144
145 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state,
146                                    bool do_fast_age)
147 {
148         int err;
149
150         err = dsa_port_set_state(dp, state, do_fast_age);
151         if (err)
152                 pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
153 }
154
155 int dsa_port_set_mst_state(struct dsa_port *dp,
156                            const struct switchdev_mst_state *state,
157                            struct netlink_ext_ack *extack)
158 {
159         struct dsa_switch *ds = dp->ds;
160         u8 prev_state;
161         int err;
162
163         if (!ds->ops->port_mst_state_set)
164                 return -EOPNOTSUPP;
165
166         err = br_mst_get_state(dsa_port_to_bridge_port(dp), state->msti,
167                                &prev_state);
168         if (err)
169                 return err;
170
171         err = ds->ops->port_mst_state_set(ds, dp->index, state);
172         if (err)
173                 return err;
174
175         if (!(dp->learning &&
176               (prev_state == BR_STATE_LEARNING ||
177                prev_state == BR_STATE_FORWARDING) &&
178               (state->state == BR_STATE_DISABLED ||
179                state->state == BR_STATE_BLOCKING ||
180                state->state == BR_STATE_LISTENING)))
181                 return 0;
182
183         err = dsa_port_msti_fast_age(dp, state->msti);
184         if (err)
185                 NL_SET_ERR_MSG_MOD(extack,
186                                    "Unable to flush associated VLANs");
187
188         return 0;
189 }
190
191 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
192 {
193         struct dsa_switch *ds = dp->ds;
194         int port = dp->index;
195         int err;
196
197         if (ds->ops->port_enable) {
198                 err = ds->ops->port_enable(ds, port, phy);
199                 if (err)
200                         return err;
201         }
202
203         if (!dp->bridge)
204                 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false);
205
206         if (dp->pl)
207                 phylink_start(dp->pl);
208
209         return 0;
210 }
211
212 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
213 {
214         int err;
215
216         rtnl_lock();
217         err = dsa_port_enable_rt(dp, phy);
218         rtnl_unlock();
219
220         return err;
221 }
222
223 void dsa_port_disable_rt(struct dsa_port *dp)
224 {
225         struct dsa_switch *ds = dp->ds;
226         int port = dp->index;
227
228         if (dp->pl)
229                 phylink_stop(dp->pl);
230
231         if (!dp->bridge)
232                 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false);
233
234         if (ds->ops->port_disable)
235                 ds->ops->port_disable(ds, port);
236 }
237
238 void dsa_port_disable(struct dsa_port *dp)
239 {
240         rtnl_lock();
241         dsa_port_disable_rt(dp);
242         rtnl_unlock();
243 }
244
245 static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
246                                          struct netlink_ext_ack *extack)
247 {
248         const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
249                                    BR_BCAST_FLOOD | BR_PORT_LOCKED;
250         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
251         int flag, err;
252
253         for_each_set_bit(flag, &mask, 32) {
254                 struct switchdev_brport_flags flags = {0};
255
256                 flags.mask = BIT(flag);
257
258                 if (br_port_flag_is_set(brport_dev, BIT(flag)))
259                         flags.val = BIT(flag);
260
261                 err = dsa_port_bridge_flags(dp, flags, extack);
262                 if (err && err != -EOPNOTSUPP)
263                         return err;
264         }
265
266         return 0;
267 }
268
269 static void dsa_port_clear_brport_flags(struct dsa_port *dp)
270 {
271         const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
272         const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
273                                    BR_BCAST_FLOOD | BR_PORT_LOCKED;
274         int flag, err;
275
276         for_each_set_bit(flag, &mask, 32) {
277                 struct switchdev_brport_flags flags = {0};
278
279                 flags.mask = BIT(flag);
280                 flags.val = val & BIT(flag);
281
282                 err = dsa_port_bridge_flags(dp, flags, NULL);
283                 if (err && err != -EOPNOTSUPP)
284                         dev_err(dp->ds->dev,
285                                 "failed to clear bridge port flag %lu: %pe\n",
286                                 flags.val, ERR_PTR(err));
287         }
288 }
289
290 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
291                                          struct netlink_ext_ack *extack)
292 {
293         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
294         struct net_device *br = dsa_port_bridge_dev_get(dp);
295         int err;
296
297         err = dsa_port_inherit_brport_flags(dp, extack);
298         if (err)
299                 return err;
300
301         err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false);
302         if (err && err != -EOPNOTSUPP)
303                 return err;
304
305         err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack);
306         if (err && err != -EOPNOTSUPP)
307                 return err;
308
309         err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
310         if (err && err != -EOPNOTSUPP)
311                 return err;
312
313         return 0;
314 }
315
316 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
317 {
318         /* Configure the port for standalone mode (no address learning,
319          * flood everything).
320          * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
321          * when the user requests it through netlink or sysfs, but not
322          * automatically at port join or leave, so we need to handle resetting
323          * the brport flags ourselves. But we even prefer it that way, because
324          * otherwise, some setups might never get the notification they need,
325          * for example, when a port leaves a LAG that offloads the bridge,
326          * it becomes standalone, but as far as the bridge is concerned, no
327          * port ever left.
328          */
329         dsa_port_clear_brport_flags(dp);
330
331         /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
332          * so allow it to be in BR_STATE_FORWARDING to be kept functional
333          */
334         dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true);
335
336         /* VLAN filtering is handled by dsa_switch_bridge_leave */
337
338         /* Ageing time may be global to the switch chip, so don't change it
339          * here because we have no good reason (or value) to change it to.
340          */
341 }
342
343 static int dsa_port_bridge_create(struct dsa_port *dp,
344                                   struct net_device *br,
345                                   struct netlink_ext_ack *extack)
346 {
347         struct dsa_switch *ds = dp->ds;
348         struct dsa_bridge *bridge;
349
350         bridge = dsa_tree_bridge_find(ds->dst, br);
351         if (bridge) {
352                 refcount_inc(&bridge->refcount);
353                 dp->bridge = bridge;
354                 return 0;
355         }
356
357         bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
358         if (!bridge)
359                 return -ENOMEM;
360
361         refcount_set(&bridge->refcount, 1);
362
363         bridge->dev = br;
364
365         bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges);
366         if (ds->max_num_bridges && !bridge->num) {
367                 NL_SET_ERR_MSG_MOD(extack,
368                                    "Range of offloadable bridges exceeded");
369                 kfree(bridge);
370                 return -EOPNOTSUPP;
371         }
372
373         dp->bridge = bridge;
374
375         return 0;
376 }
377
378 static void dsa_port_bridge_destroy(struct dsa_port *dp,
379                                     const struct net_device *br)
380 {
381         struct dsa_bridge *bridge = dp->bridge;
382
383         dp->bridge = NULL;
384
385         if (!refcount_dec_and_test(&bridge->refcount))
386                 return;
387
388         if (bridge->num)
389                 dsa_bridge_num_put(br, bridge->num);
390
391         kfree(bridge);
392 }
393
394 static bool dsa_port_supports_mst(struct dsa_port *dp)
395 {
396         struct dsa_switch *ds = dp->ds;
397
398         return ds->ops->vlan_msti_set &&
399                 ds->ops->port_mst_state_set &&
400                 ds->ops->port_vlan_fast_age &&
401                 dsa_port_can_configure_learning(dp);
402 }
403
404 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
405                          struct netlink_ext_ack *extack)
406 {
407         struct dsa_notifier_bridge_info info = {
408                 .tree_index = dp->ds->dst->index,
409                 .sw_index = dp->ds->index,
410                 .port = dp->index,
411                 .extack = extack,
412         };
413         struct net_device *dev = dp->slave;
414         struct net_device *brport_dev;
415         int err;
416
417         if (br_mst_enabled(br) && !dsa_port_supports_mst(dp))
418                 return -EOPNOTSUPP;
419
420         /* Here the interface is already bridged. Reflect the current
421          * configuration so that drivers can program their chips accordingly.
422          */
423         err = dsa_port_bridge_create(dp, br, extack);
424         if (err)
425                 return err;
426
427         brport_dev = dsa_port_to_bridge_port(dp);
428
429         info.bridge = *dp->bridge;
430         err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
431         if (err)
432                 goto out_rollback;
433
434         /* Drivers which support bridge TX forwarding should set this */
435         dp->bridge->tx_fwd_offload = info.tx_fwd_offload;
436
437         err = switchdev_bridge_port_offload(brport_dev, dev, dp,
438                                             &dsa_slave_switchdev_notifier,
439                                             &dsa_slave_switchdev_blocking_notifier,
440                                             dp->bridge->tx_fwd_offload, extack);
441         if (err)
442                 goto out_rollback_unbridge;
443
444         err = dsa_port_switchdev_sync_attrs(dp, extack);
445         if (err)
446                 goto out_rollback_unoffload;
447
448         return 0;
449
450 out_rollback_unoffload:
451         switchdev_bridge_port_unoffload(brport_dev, dp,
452                                         &dsa_slave_switchdev_notifier,
453                                         &dsa_slave_switchdev_blocking_notifier);
454         dsa_flush_workqueue();
455 out_rollback_unbridge:
456         dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
457 out_rollback:
458         dsa_port_bridge_destroy(dp, br);
459         return err;
460 }
461
462 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
463 {
464         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
465
466         /* Don't try to unoffload something that is not offloaded */
467         if (!brport_dev)
468                 return;
469
470         switchdev_bridge_port_unoffload(brport_dev, dp,
471                                         &dsa_slave_switchdev_notifier,
472                                         &dsa_slave_switchdev_blocking_notifier);
473
474         dsa_flush_workqueue();
475 }
476
477 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
478 {
479         struct dsa_notifier_bridge_info info = {
480                 .tree_index = dp->ds->dst->index,
481                 .sw_index = dp->ds->index,
482                 .port = dp->index,
483         };
484         int err;
485
486         /* If the port could not be offloaded to begin with, then
487          * there is nothing to do.
488          */
489         if (!dp->bridge)
490                 return;
491
492         info.bridge = *dp->bridge;
493
494         /* Here the port is already unbridged. Reflect the current configuration
495          * so that drivers can program their chips accordingly.
496          */
497         dsa_port_bridge_destroy(dp, br);
498
499         err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
500         if (err)
501                 dev_err(dp->ds->dev,
502                         "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n",
503                         dp->index, ERR_PTR(err));
504
505         dsa_port_switchdev_unsync_attrs(dp);
506 }
507
508 int dsa_port_lag_change(struct dsa_port *dp,
509                         struct netdev_lag_lower_state_info *linfo)
510 {
511         struct dsa_notifier_lag_info info = {
512                 .sw_index = dp->ds->index,
513                 .port = dp->index,
514         };
515         bool tx_enabled;
516
517         if (!dp->lag)
518                 return 0;
519
520         /* On statically configured aggregates (e.g. loadbalance
521          * without LACP) ports will always be tx_enabled, even if the
522          * link is down. Thus we require both link_up and tx_enabled
523          * in order to include it in the tx set.
524          */
525         tx_enabled = linfo->link_up && linfo->tx_enabled;
526
527         if (tx_enabled == dp->lag_tx_enabled)
528                 return 0;
529
530         dp->lag_tx_enabled = tx_enabled;
531
532         return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
533 }
534
535 static int dsa_port_lag_create(struct dsa_port *dp,
536                                struct net_device *lag_dev)
537 {
538         struct dsa_switch *ds = dp->ds;
539         struct dsa_lag *lag;
540
541         lag = dsa_tree_lag_find(ds->dst, lag_dev);
542         if (lag) {
543                 refcount_inc(&lag->refcount);
544                 dp->lag = lag;
545                 return 0;
546         }
547
548         lag = kzalloc(sizeof(*lag), GFP_KERNEL);
549         if (!lag)
550                 return -ENOMEM;
551
552         refcount_set(&lag->refcount, 1);
553         mutex_init(&lag->fdb_lock);
554         INIT_LIST_HEAD(&lag->fdbs);
555         lag->dev = lag_dev;
556         dsa_lag_map(ds->dst, lag);
557         dp->lag = lag;
558
559         return 0;
560 }
561
562 static void dsa_port_lag_destroy(struct dsa_port *dp)
563 {
564         struct dsa_lag *lag = dp->lag;
565
566         dp->lag = NULL;
567         dp->lag_tx_enabled = false;
568
569         if (!refcount_dec_and_test(&lag->refcount))
570                 return;
571
572         WARN_ON(!list_empty(&lag->fdbs));
573         dsa_lag_unmap(dp->ds->dst, lag);
574         kfree(lag);
575 }
576
577 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
578                       struct netdev_lag_upper_info *uinfo,
579                       struct netlink_ext_ack *extack)
580 {
581         struct dsa_notifier_lag_info info = {
582                 .sw_index = dp->ds->index,
583                 .port = dp->index,
584                 .info = uinfo,
585         };
586         struct net_device *bridge_dev;
587         int err;
588
589         err = dsa_port_lag_create(dp, lag_dev);
590         if (err)
591                 goto err_lag_create;
592
593         info.lag = *dp->lag;
594         err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
595         if (err)
596                 goto err_lag_join;
597
598         bridge_dev = netdev_master_upper_dev_get(lag_dev);
599         if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
600                 return 0;
601
602         err = dsa_port_bridge_join(dp, bridge_dev, extack);
603         if (err)
604                 goto err_bridge_join;
605
606         return 0;
607
608 err_bridge_join:
609         dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
610 err_lag_join:
611         dsa_port_lag_destroy(dp);
612 err_lag_create:
613         return err;
614 }
615
616 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
617 {
618         struct net_device *br = dsa_port_bridge_dev_get(dp);
619
620         if (br)
621                 dsa_port_pre_bridge_leave(dp, br);
622 }
623
624 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
625 {
626         struct net_device *br = dsa_port_bridge_dev_get(dp);
627         struct dsa_notifier_lag_info info = {
628                 .sw_index = dp->ds->index,
629                 .port = dp->index,
630         };
631         int err;
632
633         if (!dp->lag)
634                 return;
635
636         /* Port might have been part of a LAG that in turn was
637          * attached to a bridge.
638          */
639         if (br)
640                 dsa_port_bridge_leave(dp, br);
641
642         info.lag = *dp->lag;
643
644         dsa_port_lag_destroy(dp);
645
646         err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
647         if (err)
648                 dev_err(dp->ds->dev,
649                         "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n",
650                         dp->index, ERR_PTR(err));
651 }
652
653 /* Must be called under rcu_read_lock() */
654 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
655                                               bool vlan_filtering,
656                                               struct netlink_ext_ack *extack)
657 {
658         struct dsa_switch *ds = dp->ds;
659         struct dsa_port *other_dp;
660         int err;
661
662         /* VLAN awareness was off, so the question is "can we turn it on".
663          * We may have had 8021q uppers, those need to go. Make sure we don't
664          * enter an inconsistent state: deny changing the VLAN awareness state
665          * as long as we have 8021q uppers.
666          */
667         if (vlan_filtering && dsa_port_is_user(dp)) {
668                 struct net_device *br = dsa_port_bridge_dev_get(dp);
669                 struct net_device *upper_dev, *slave = dp->slave;
670                 struct list_head *iter;
671
672                 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
673                         struct bridge_vlan_info br_info;
674                         u16 vid;
675
676                         if (!is_vlan_dev(upper_dev))
677                                 continue;
678
679                         vid = vlan_dev_vlan_id(upper_dev);
680
681                         /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
682                          * device, respectively the VID is not found, returning
683                          * 0 means success, which is a failure for us here.
684                          */
685                         err = br_vlan_get_info(br, vid, &br_info);
686                         if (err == 0) {
687                                 NL_SET_ERR_MSG_MOD(extack,
688                                                    "Must first remove VLAN uppers having VIDs also present in bridge");
689                                 return false;
690                         }
691                 }
692         }
693
694         if (!ds->vlan_filtering_is_global)
695                 return true;
696
697         /* For cases where enabling/disabling VLAN awareness is global to the
698          * switch, we need to handle the case where multiple bridges span
699          * different ports of the same switch device and one of them has a
700          * different setting than what is being requested.
701          */
702         dsa_switch_for_each_port(other_dp, ds) {
703                 struct net_device *other_br = dsa_port_bridge_dev_get(other_dp);
704
705                 /* If it's the same bridge, it also has same
706                  * vlan_filtering setting => no need to check
707                  */
708                 if (!other_br || other_br == dsa_port_bridge_dev_get(dp))
709                         continue;
710
711                 if (br_vlan_enabled(other_br) != vlan_filtering) {
712                         NL_SET_ERR_MSG_MOD(extack,
713                                            "VLAN filtering is a global setting");
714                         return false;
715                 }
716         }
717         return true;
718 }
719
720 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
721                             struct netlink_ext_ack *extack)
722 {
723         bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp);
724         struct dsa_switch *ds = dp->ds;
725         bool apply;
726         int err;
727
728         if (!ds->ops->port_vlan_filtering)
729                 return -EOPNOTSUPP;
730
731         /* We are called from dsa_slave_switchdev_blocking_event(),
732          * which is not under rcu_read_lock(), unlike
733          * dsa_slave_switchdev_event().
734          */
735         rcu_read_lock();
736         apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
737         rcu_read_unlock();
738         if (!apply)
739                 return -EINVAL;
740
741         if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
742                 return 0;
743
744         err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
745                                            extack);
746         if (err)
747                 return err;
748
749         if (ds->vlan_filtering_is_global) {
750                 struct dsa_port *other_dp;
751
752                 ds->vlan_filtering = vlan_filtering;
753
754                 dsa_switch_for_each_user_port(other_dp, ds) {
755                         struct net_device *slave = dp->slave;
756
757                         /* We might be called in the unbind path, so not
758                          * all slave devices might still be registered.
759                          */
760                         if (!slave)
761                                 continue;
762
763                         err = dsa_slave_manage_vlan_filtering(slave,
764                                                               vlan_filtering);
765                         if (err)
766                                 goto restore;
767                 }
768         } else {
769                 dp->vlan_filtering = vlan_filtering;
770
771                 err = dsa_slave_manage_vlan_filtering(dp->slave,
772                                                       vlan_filtering);
773                 if (err)
774                         goto restore;
775         }
776
777         return 0;
778
779 restore:
780         ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL);
781
782         if (ds->vlan_filtering_is_global)
783                 ds->vlan_filtering = old_vlan_filtering;
784         else
785                 dp->vlan_filtering = old_vlan_filtering;
786
787         return err;
788 }
789
790 /* This enforces legacy behavior for switch drivers which assume they can't
791  * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
792  */
793 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
794 {
795         struct net_device *br = dsa_port_bridge_dev_get(dp);
796         struct dsa_switch *ds = dp->ds;
797
798         if (!br)
799                 return false;
800
801         return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br);
802 }
803
804 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
805 {
806         unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
807         unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
808         struct dsa_notifier_ageing_time_info info;
809         int err;
810
811         info.ageing_time = ageing_time;
812
813         err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
814         if (err)
815                 return err;
816
817         dp->ageing_time = ageing_time;
818
819         return 0;
820 }
821
822 int dsa_port_mst_enable(struct dsa_port *dp, bool on,
823                         struct netlink_ext_ack *extack)
824 {
825         if (on && !dsa_port_supports_mst(dp)) {
826                 NL_SET_ERR_MSG_MOD(extack, "Hardware does not support MST");
827                 return -EINVAL;
828         }
829
830         return 0;
831 }
832
833 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
834                               struct switchdev_brport_flags flags,
835                               struct netlink_ext_ack *extack)
836 {
837         struct dsa_switch *ds = dp->ds;
838
839         if (!ds->ops->port_pre_bridge_flags)
840                 return -EINVAL;
841
842         return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
843 }
844
845 int dsa_port_bridge_flags(struct dsa_port *dp,
846                           struct switchdev_brport_flags flags,
847                           struct netlink_ext_ack *extack)
848 {
849         struct dsa_switch *ds = dp->ds;
850         int err;
851
852         if (!ds->ops->port_bridge_flags)
853                 return -EOPNOTSUPP;
854
855         err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
856         if (err)
857                 return err;
858
859         if (flags.mask & BR_LEARNING) {
860                 bool learning = flags.val & BR_LEARNING;
861
862                 if (learning == dp->learning)
863                         return 0;
864
865                 if ((dp->learning && !learning) &&
866                     (dp->stp_state == BR_STATE_LEARNING ||
867                      dp->stp_state == BR_STATE_FORWARDING))
868                         dsa_port_fast_age(dp);
869
870                 dp->learning = learning;
871         }
872
873         return 0;
874 }
875
876 int dsa_port_vlan_msti(struct dsa_port *dp,
877                        const struct switchdev_vlan_msti *msti)
878 {
879         struct dsa_switch *ds = dp->ds;
880
881         if (!ds->ops->vlan_msti_set)
882                 return -EOPNOTSUPP;
883
884         return ds->ops->vlan_msti_set(ds, *dp->bridge, msti);
885 }
886
887 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
888                         bool targeted_match)
889 {
890         struct dsa_notifier_mtu_info info = {
891                 .sw_index = dp->ds->index,
892                 .targeted_match = targeted_match,
893                 .port = dp->index,
894                 .mtu = new_mtu,
895         };
896
897         return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
898 }
899
900 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
901                      u16 vid)
902 {
903         struct dsa_notifier_fdb_info info = {
904                 .sw_index = dp->ds->index,
905                 .port = dp->index,
906                 .addr = addr,
907                 .vid = vid,
908                 .db = {
909                         .type = DSA_DB_BRIDGE,
910                         .bridge = *dp->bridge,
911                 },
912         };
913
914         /* Refcounting takes bridge.num as a key, and should be global for all
915          * bridges in the absence of FDB isolation, and per bridge otherwise.
916          * Force the bridge.num to zero here in the absence of FDB isolation.
917          */
918         if (!dp->ds->fdb_isolation)
919                 info.db.bridge.num = 0;
920
921         return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
922 }
923
924 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
925                      u16 vid)
926 {
927         struct dsa_notifier_fdb_info info = {
928                 .sw_index = dp->ds->index,
929                 .port = dp->index,
930                 .addr = addr,
931                 .vid = vid,
932                 .db = {
933                         .type = DSA_DB_BRIDGE,
934                         .bridge = *dp->bridge,
935                 },
936         };
937
938         if (!dp->ds->fdb_isolation)
939                 info.db.bridge.num = 0;
940
941         return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
942 }
943
944 static int dsa_port_host_fdb_add(struct dsa_port *dp,
945                                  const unsigned char *addr, u16 vid,
946                                  struct dsa_db db)
947 {
948         struct dsa_notifier_fdb_info info = {
949                 .sw_index = dp->ds->index,
950                 .port = dp->index,
951                 .addr = addr,
952                 .vid = vid,
953                 .db = db,
954         };
955
956         if (!dp->ds->fdb_isolation)
957                 info.db.bridge.num = 0;
958
959         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
960 }
961
962 int dsa_port_standalone_host_fdb_add(struct dsa_port *dp,
963                                      const unsigned char *addr, u16 vid)
964 {
965         struct dsa_db db = {
966                 .type = DSA_DB_PORT,
967                 .dp = dp,
968         };
969
970         return dsa_port_host_fdb_add(dp, addr, vid, db);
971 }
972
973 int dsa_port_bridge_host_fdb_add(struct dsa_port *dp,
974                                  const unsigned char *addr, u16 vid)
975 {
976         struct dsa_port *cpu_dp = dp->cpu_dp;
977         struct dsa_db db = {
978                 .type = DSA_DB_BRIDGE,
979                 .bridge = *dp->bridge,
980         };
981         int err;
982
983         /* Avoid a call to __dev_set_promiscuity() on the master, which
984          * requires rtnl_lock(), since we can't guarantee that is held here,
985          * and we can't take it either.
986          */
987         if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
988                 err = dev_uc_add(cpu_dp->master, addr);
989                 if (err)
990                         return err;
991         }
992
993         return dsa_port_host_fdb_add(dp, addr, vid, db);
994 }
995
996 static int dsa_port_host_fdb_del(struct dsa_port *dp,
997                                  const unsigned char *addr, u16 vid,
998                                  struct dsa_db db)
999 {
1000         struct dsa_notifier_fdb_info info = {
1001                 .sw_index = dp->ds->index,
1002                 .port = dp->index,
1003                 .addr = addr,
1004                 .vid = vid,
1005                 .db = db,
1006         };
1007
1008         if (!dp->ds->fdb_isolation)
1009                 info.db.bridge.num = 0;
1010
1011         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
1012 }
1013
1014 int dsa_port_standalone_host_fdb_del(struct dsa_port *dp,
1015                                      const unsigned char *addr, u16 vid)
1016 {
1017         struct dsa_db db = {
1018                 .type = DSA_DB_PORT,
1019                 .dp = dp,
1020         };
1021
1022         return dsa_port_host_fdb_del(dp, addr, vid, db);
1023 }
1024
1025 int dsa_port_bridge_host_fdb_del(struct dsa_port *dp,
1026                                  const unsigned char *addr, u16 vid)
1027 {
1028         struct dsa_port *cpu_dp = dp->cpu_dp;
1029         struct dsa_db db = {
1030                 .type = DSA_DB_BRIDGE,
1031                 .bridge = *dp->bridge,
1032         };
1033         int err;
1034
1035         if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
1036                 err = dev_uc_del(cpu_dp->master, addr);
1037                 if (err)
1038                         return err;
1039         }
1040
1041         return dsa_port_host_fdb_del(dp, addr, vid, db);
1042 }
1043
1044 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr,
1045                          u16 vid)
1046 {
1047         struct dsa_notifier_lag_fdb_info info = {
1048                 .lag = dp->lag,
1049                 .addr = addr,
1050                 .vid = vid,
1051                 .db = {
1052                         .type = DSA_DB_BRIDGE,
1053                         .bridge = *dp->bridge,
1054                 },
1055         };
1056
1057         if (!dp->ds->fdb_isolation)
1058                 info.db.bridge.num = 0;
1059
1060         return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info);
1061 }
1062
1063 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr,
1064                          u16 vid)
1065 {
1066         struct dsa_notifier_lag_fdb_info info = {
1067                 .lag = dp->lag,
1068                 .addr = addr,
1069                 .vid = vid,
1070                 .db = {
1071                         .type = DSA_DB_BRIDGE,
1072                         .bridge = *dp->bridge,
1073                 },
1074         };
1075
1076         if (!dp->ds->fdb_isolation)
1077                 info.db.bridge.num = 0;
1078
1079         return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info);
1080 }
1081
1082 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
1083 {
1084         struct dsa_switch *ds = dp->ds;
1085         int port = dp->index;
1086
1087         if (!ds->ops->port_fdb_dump)
1088                 return -EOPNOTSUPP;
1089
1090         return ds->ops->port_fdb_dump(ds, port, cb, data);
1091 }
1092
1093 int dsa_port_mdb_add(const struct dsa_port *dp,
1094                      const struct switchdev_obj_port_mdb *mdb)
1095 {
1096         struct dsa_notifier_mdb_info info = {
1097                 .sw_index = dp->ds->index,
1098                 .port = dp->index,
1099                 .mdb = mdb,
1100                 .db = {
1101                         .type = DSA_DB_BRIDGE,
1102                         .bridge = *dp->bridge,
1103                 },
1104         };
1105
1106         if (!dp->ds->fdb_isolation)
1107                 info.db.bridge.num = 0;
1108
1109         return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
1110 }
1111
1112 int dsa_port_mdb_del(const struct dsa_port *dp,
1113                      const struct switchdev_obj_port_mdb *mdb)
1114 {
1115         struct dsa_notifier_mdb_info info = {
1116                 .sw_index = dp->ds->index,
1117                 .port = dp->index,
1118                 .mdb = mdb,
1119                 .db = {
1120                         .type = DSA_DB_BRIDGE,
1121                         .bridge = *dp->bridge,
1122                 },
1123         };
1124
1125         if (!dp->ds->fdb_isolation)
1126                 info.db.bridge.num = 0;
1127
1128         return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
1129 }
1130
1131 static int dsa_port_host_mdb_add(const struct dsa_port *dp,
1132                                  const struct switchdev_obj_port_mdb *mdb,
1133                                  struct dsa_db db)
1134 {
1135         struct dsa_notifier_mdb_info info = {
1136                 .sw_index = dp->ds->index,
1137                 .port = dp->index,
1138                 .mdb = mdb,
1139                 .db = db,
1140         };
1141
1142         if (!dp->ds->fdb_isolation)
1143                 info.db.bridge.num = 0;
1144
1145         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
1146 }
1147
1148 int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp,
1149                                      const struct switchdev_obj_port_mdb *mdb)
1150 {
1151         struct dsa_db db = {
1152                 .type = DSA_DB_PORT,
1153                 .dp = dp,
1154         };
1155
1156         return dsa_port_host_mdb_add(dp, mdb, db);
1157 }
1158
1159 int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp,
1160                                  const struct switchdev_obj_port_mdb *mdb)
1161 {
1162         struct dsa_port *cpu_dp = dp->cpu_dp;
1163         struct dsa_db db = {
1164                 .type = DSA_DB_BRIDGE,
1165                 .bridge = *dp->bridge,
1166         };
1167         int err;
1168
1169         err = dev_mc_add(cpu_dp->master, mdb->addr);
1170         if (err)
1171                 return err;
1172
1173         return dsa_port_host_mdb_add(dp, mdb, db);
1174 }
1175
1176 static int dsa_port_host_mdb_del(const struct dsa_port *dp,
1177                                  const struct switchdev_obj_port_mdb *mdb,
1178                                  struct dsa_db db)
1179 {
1180         struct dsa_notifier_mdb_info info = {
1181                 .sw_index = dp->ds->index,
1182                 .port = dp->index,
1183                 .mdb = mdb,
1184                 .db = db,
1185         };
1186
1187         if (!dp->ds->fdb_isolation)
1188                 info.db.bridge.num = 0;
1189
1190         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
1191 }
1192
1193 int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp,
1194                                      const struct switchdev_obj_port_mdb *mdb)
1195 {
1196         struct dsa_db db = {
1197                 .type = DSA_DB_PORT,
1198                 .dp = dp,
1199         };
1200
1201         return dsa_port_host_mdb_del(dp, mdb, db);
1202 }
1203
1204 int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp,
1205                                  const struct switchdev_obj_port_mdb *mdb)
1206 {
1207         struct dsa_port *cpu_dp = dp->cpu_dp;
1208         struct dsa_db db = {
1209                 .type = DSA_DB_BRIDGE,
1210                 .bridge = *dp->bridge,
1211         };
1212         int err;
1213
1214         err = dev_mc_del(cpu_dp->master, mdb->addr);
1215         if (err)
1216                 return err;
1217
1218         return dsa_port_host_mdb_del(dp, mdb, db);
1219 }
1220
1221 int dsa_port_vlan_add(struct dsa_port *dp,
1222                       const struct switchdev_obj_port_vlan *vlan,
1223                       struct netlink_ext_ack *extack)
1224 {
1225         struct dsa_notifier_vlan_info info = {
1226                 .sw_index = dp->ds->index,
1227                 .port = dp->index,
1228                 .vlan = vlan,
1229                 .extack = extack,
1230         };
1231
1232         return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
1233 }
1234
1235 int dsa_port_vlan_del(struct dsa_port *dp,
1236                       const struct switchdev_obj_port_vlan *vlan)
1237 {
1238         struct dsa_notifier_vlan_info info = {
1239                 .sw_index = dp->ds->index,
1240                 .port = dp->index,
1241                 .vlan = vlan,
1242         };
1243
1244         return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
1245 }
1246
1247 int dsa_port_host_vlan_add(struct dsa_port *dp,
1248                            const struct switchdev_obj_port_vlan *vlan,
1249                            struct netlink_ext_ack *extack)
1250 {
1251         struct dsa_notifier_vlan_info info = {
1252                 .sw_index = dp->ds->index,
1253                 .port = dp->index,
1254                 .vlan = vlan,
1255                 .extack = extack,
1256         };
1257         struct dsa_port *cpu_dp = dp->cpu_dp;
1258         int err;
1259
1260         err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info);
1261         if (err && err != -EOPNOTSUPP)
1262                 return err;
1263
1264         vlan_vid_add(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid);
1265
1266         return err;
1267 }
1268
1269 int dsa_port_host_vlan_del(struct dsa_port *dp,
1270                            const struct switchdev_obj_port_vlan *vlan)
1271 {
1272         struct dsa_notifier_vlan_info info = {
1273                 .sw_index = dp->ds->index,
1274                 .port = dp->index,
1275                 .vlan = vlan,
1276         };
1277         struct dsa_port *cpu_dp = dp->cpu_dp;
1278         int err;
1279
1280         err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info);
1281         if (err && err != -EOPNOTSUPP)
1282                 return err;
1283
1284         vlan_vid_del(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid);
1285
1286         return err;
1287 }
1288
1289 int dsa_port_mrp_add(const struct dsa_port *dp,
1290                      const struct switchdev_obj_mrp *mrp)
1291 {
1292         struct dsa_switch *ds = dp->ds;
1293
1294         if (!ds->ops->port_mrp_add)
1295                 return -EOPNOTSUPP;
1296
1297         return ds->ops->port_mrp_add(ds, dp->index, mrp);
1298 }
1299
1300 int dsa_port_mrp_del(const struct dsa_port *dp,
1301                      const struct switchdev_obj_mrp *mrp)
1302 {
1303         struct dsa_switch *ds = dp->ds;
1304
1305         if (!ds->ops->port_mrp_del)
1306                 return -EOPNOTSUPP;
1307
1308         return ds->ops->port_mrp_del(ds, dp->index, mrp);
1309 }
1310
1311 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
1312                                const struct switchdev_obj_ring_role_mrp *mrp)
1313 {
1314         struct dsa_switch *ds = dp->ds;
1315
1316         if (!ds->ops->port_mrp_add_ring_role)
1317                 return -EOPNOTSUPP;
1318
1319         return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp);
1320 }
1321
1322 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
1323                                const struct switchdev_obj_ring_role_mrp *mrp)
1324 {
1325         struct dsa_switch *ds = dp->ds;
1326
1327         if (!ds->ops->port_mrp_del_ring_role)
1328                 return -EOPNOTSUPP;
1329
1330         return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp);
1331 }
1332
1333 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
1334                                const struct dsa_device_ops *tag_ops)
1335 {
1336         cpu_dp->rcv = tag_ops->rcv;
1337         cpu_dp->tag_ops = tag_ops;
1338 }
1339
1340 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
1341 {
1342         struct device_node *phy_dn;
1343         struct phy_device *phydev;
1344
1345         phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
1346         if (!phy_dn)
1347                 return NULL;
1348
1349         phydev = of_phy_find_device(phy_dn);
1350         if (!phydev) {
1351                 of_node_put(phy_dn);
1352                 return ERR_PTR(-EPROBE_DEFER);
1353         }
1354
1355         of_node_put(phy_dn);
1356         return phydev;
1357 }
1358
1359 static void dsa_port_phylink_validate(struct phylink_config *config,
1360                                       unsigned long *supported,
1361                                       struct phylink_link_state *state)
1362 {
1363         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1364         struct dsa_switch *ds = dp->ds;
1365
1366         if (!ds->ops->phylink_validate) {
1367                 if (config->mac_capabilities)
1368                         phylink_generic_validate(config, supported, state);
1369                 return;
1370         }
1371
1372         ds->ops->phylink_validate(ds, dp->index, supported, state);
1373 }
1374
1375 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
1376                                                struct phylink_link_state *state)
1377 {
1378         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1379         struct dsa_switch *ds = dp->ds;
1380         int err;
1381
1382         /* Only called for inband modes */
1383         if (!ds->ops->phylink_mac_link_state) {
1384                 state->link = 0;
1385                 return;
1386         }
1387
1388         err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
1389         if (err < 0) {
1390                 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
1391                         dp->index, err);
1392                 state->link = 0;
1393         }
1394 }
1395
1396 static struct phylink_pcs *
1397 dsa_port_phylink_mac_select_pcs(struct phylink_config *config,
1398                                 phy_interface_t interface)
1399 {
1400         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1401         struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP);
1402         struct dsa_switch *ds = dp->ds;
1403
1404         if (ds->ops->phylink_mac_select_pcs)
1405                 pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface);
1406
1407         return pcs;
1408 }
1409
1410 static void dsa_port_phylink_mac_config(struct phylink_config *config,
1411                                         unsigned int mode,
1412                                         const struct phylink_link_state *state)
1413 {
1414         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1415         struct dsa_switch *ds = dp->ds;
1416
1417         if (!ds->ops->phylink_mac_config)
1418                 return;
1419
1420         ds->ops->phylink_mac_config(ds, dp->index, mode, state);
1421 }
1422
1423 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
1424 {
1425         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1426         struct dsa_switch *ds = dp->ds;
1427
1428         if (!ds->ops->phylink_mac_an_restart)
1429                 return;
1430
1431         ds->ops->phylink_mac_an_restart(ds, dp->index);
1432 }
1433
1434 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
1435                                            unsigned int mode,
1436                                            phy_interface_t interface)
1437 {
1438         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1439         struct phy_device *phydev = NULL;
1440         struct dsa_switch *ds = dp->ds;
1441
1442         if (dsa_port_is_user(dp))
1443                 phydev = dp->slave->phydev;
1444
1445         if (!ds->ops->phylink_mac_link_down) {
1446                 if (ds->ops->adjust_link && phydev)
1447                         ds->ops->adjust_link(ds, dp->index, phydev);
1448                 return;
1449         }
1450
1451         ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
1452 }
1453
1454 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
1455                                          struct phy_device *phydev,
1456                                          unsigned int mode,
1457                                          phy_interface_t interface,
1458                                          int speed, int duplex,
1459                                          bool tx_pause, bool rx_pause)
1460 {
1461         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1462         struct dsa_switch *ds = dp->ds;
1463
1464         if (!ds->ops->phylink_mac_link_up) {
1465                 if (ds->ops->adjust_link && phydev)
1466                         ds->ops->adjust_link(ds, dp->index, phydev);
1467                 return;
1468         }
1469
1470         ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
1471                                      speed, duplex, tx_pause, rx_pause);
1472 }
1473
1474 static const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
1475         .validate = dsa_port_phylink_validate,
1476         .mac_select_pcs = dsa_port_phylink_mac_select_pcs,
1477         .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
1478         .mac_config = dsa_port_phylink_mac_config,
1479         .mac_an_restart = dsa_port_phylink_mac_an_restart,
1480         .mac_link_down = dsa_port_phylink_mac_link_down,
1481         .mac_link_up = dsa_port_phylink_mac_link_up,
1482 };
1483
1484 int dsa_port_phylink_create(struct dsa_port *dp)
1485 {
1486         struct dsa_switch *ds = dp->ds;
1487         phy_interface_t mode;
1488         int err;
1489
1490         err = of_get_phy_mode(dp->dn, &mode);
1491         if (err)
1492                 mode = PHY_INTERFACE_MODE_NA;
1493
1494         /* Presence of phylink_mac_link_state or phylink_mac_an_restart is
1495          * an indicator of a legacy phylink driver.
1496          */
1497         if (ds->ops->phylink_mac_link_state ||
1498             ds->ops->phylink_mac_an_restart)
1499                 dp->pl_config.legacy_pre_march2020 = true;
1500
1501         if (ds->ops->phylink_get_caps)
1502                 ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config);
1503
1504         dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn),
1505                                 mode, &dsa_port_phylink_mac_ops);
1506         if (IS_ERR(dp->pl)) {
1507                 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1508                 return PTR_ERR(dp->pl);
1509         }
1510
1511         return 0;
1512 }
1513
1514 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
1515 {
1516         struct dsa_switch *ds = dp->ds;
1517         struct phy_device *phydev;
1518         int port = dp->index;
1519         int err = 0;
1520
1521         phydev = dsa_port_get_phy_device(dp);
1522         if (!phydev)
1523                 return 0;
1524
1525         if (IS_ERR(phydev))
1526                 return PTR_ERR(phydev);
1527
1528         if (enable) {
1529                 err = genphy_resume(phydev);
1530                 if (err < 0)
1531                         goto err_put_dev;
1532
1533                 err = genphy_read_status(phydev);
1534                 if (err < 0)
1535                         goto err_put_dev;
1536         } else {
1537                 err = genphy_suspend(phydev);
1538                 if (err < 0)
1539                         goto err_put_dev;
1540         }
1541
1542         if (ds->ops->adjust_link)
1543                 ds->ops->adjust_link(ds, port, phydev);
1544
1545         dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
1546
1547 err_put_dev:
1548         put_device(&phydev->mdio.dev);
1549         return err;
1550 }
1551
1552 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
1553 {
1554         struct device_node *dn = dp->dn;
1555         struct dsa_switch *ds = dp->ds;
1556         struct phy_device *phydev;
1557         int port = dp->index;
1558         phy_interface_t mode;
1559         int err;
1560
1561         err = of_phy_register_fixed_link(dn);
1562         if (err) {
1563                 dev_err(ds->dev,
1564                         "failed to register the fixed PHY of port %d\n",
1565                         port);
1566                 return err;
1567         }
1568
1569         phydev = of_phy_find_device(dn);
1570
1571         err = of_get_phy_mode(dn, &mode);
1572         if (err)
1573                 mode = PHY_INTERFACE_MODE_NA;
1574         phydev->interface = mode;
1575
1576         genphy_read_status(phydev);
1577
1578         if (ds->ops->adjust_link)
1579                 ds->ops->adjust_link(ds, port, phydev);
1580
1581         put_device(&phydev->mdio.dev);
1582
1583         return 0;
1584 }
1585
1586 static int dsa_port_phylink_register(struct dsa_port *dp)
1587 {
1588         struct dsa_switch *ds = dp->ds;
1589         struct device_node *port_dn = dp->dn;
1590         int err;
1591
1592         dp->pl_config.dev = ds->dev;
1593         dp->pl_config.type = PHYLINK_DEV;
1594
1595         err = dsa_port_phylink_create(dp);
1596         if (err)
1597                 return err;
1598
1599         err = phylink_of_phy_connect(dp->pl, port_dn, 0);
1600         if (err && err != -ENODEV) {
1601                 pr_err("could not attach to PHY: %d\n", err);
1602                 goto err_phy_connect;
1603         }
1604
1605         return 0;
1606
1607 err_phy_connect:
1608         phylink_destroy(dp->pl);
1609         return err;
1610 }
1611
1612 int dsa_port_link_register_of(struct dsa_port *dp)
1613 {
1614         struct dsa_switch *ds = dp->ds;
1615         struct device_node *phy_np;
1616         int port = dp->index;
1617
1618         if (!ds->ops->adjust_link) {
1619                 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
1620                 if (of_phy_is_fixed_link(dp->dn) || phy_np) {
1621                         if (ds->ops->phylink_mac_link_down)
1622                                 ds->ops->phylink_mac_link_down(ds, port,
1623                                         MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
1624                         of_node_put(phy_np);
1625                         return dsa_port_phylink_register(dp);
1626                 }
1627                 of_node_put(phy_np);
1628                 return 0;
1629         }
1630
1631         dev_warn(ds->dev,
1632                  "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
1633
1634         if (of_phy_is_fixed_link(dp->dn))
1635                 return dsa_port_fixed_link_register_of(dp);
1636         else
1637                 return dsa_port_setup_phy_of(dp, true);
1638 }
1639
1640 void dsa_port_link_unregister_of(struct dsa_port *dp)
1641 {
1642         struct dsa_switch *ds = dp->ds;
1643
1644         if (!ds->ops->adjust_link && dp->pl) {
1645                 rtnl_lock();
1646                 phylink_disconnect_phy(dp->pl);
1647                 rtnl_unlock();
1648                 phylink_destroy(dp->pl);
1649                 dp->pl = NULL;
1650                 return;
1651         }
1652
1653         if (of_phy_is_fixed_link(dp->dn))
1654                 of_phy_deregister_fixed_link(dp->dn);
1655         else
1656                 dsa_port_setup_phy_of(dp, false);
1657 }
1658
1659 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
1660 {
1661         struct dsa_switch *ds = dp->ds;
1662         int err;
1663
1664         if (!ds->ops->port_hsr_join)
1665                 return -EOPNOTSUPP;
1666
1667         dp->hsr_dev = hsr;
1668
1669         err = ds->ops->port_hsr_join(ds, dp->index, hsr);
1670         if (err)
1671                 dp->hsr_dev = NULL;
1672
1673         return err;
1674 }
1675
1676 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
1677 {
1678         struct dsa_switch *ds = dp->ds;
1679         int err;
1680
1681         dp->hsr_dev = NULL;
1682
1683         if (ds->ops->port_hsr_leave) {
1684                 err = ds->ops->port_hsr_leave(ds, dp->index, hsr);
1685                 if (err)
1686                         dev_err(dp->ds->dev,
1687                                 "port %d failed to leave HSR %s: %pe\n",
1688                                 dp->index, hsr->name, ERR_PTR(err));
1689         }
1690 }
1691
1692 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast)
1693 {
1694         struct dsa_notifier_tag_8021q_vlan_info info = {
1695                 .tree_index = dp->ds->dst->index,
1696                 .sw_index = dp->ds->index,
1697                 .port = dp->index,
1698                 .vid = vid,
1699         };
1700
1701         if (broadcast)
1702                 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1703
1704         return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1705 }
1706
1707 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast)
1708 {
1709         struct dsa_notifier_tag_8021q_vlan_info info = {
1710                 .tree_index = dp->ds->dst->index,
1711                 .sw_index = dp->ds->index,
1712                 .port = dp->index,
1713                 .vid = vid,
1714         };
1715         int err;
1716
1717         if (broadcast)
1718                 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1719         else
1720                 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1721         if (err)
1722                 dev_err(dp->ds->dev,
1723                         "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n",
1724                         dp->index, vid, ERR_PTR(err));
1725 }