1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/dsa/slave.c - Slave device handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/selftests.h>
19 #include <net/tc_act/tc_mirred.h>
20 #include <linux/if_bridge.h>
21 #include <linux/if_hsr.h>
22 #include <linux/netpoll.h>
26 /* slave mii_bus handling ***************************************************/
27 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
29 struct dsa_switch *ds = bus->priv;
31 if (ds->phys_mii_mask & (1 << addr))
32 return ds->ops->phy_read(ds, addr, reg);
37 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
39 struct dsa_switch *ds = bus->priv;
41 if (ds->phys_mii_mask & (1 << addr))
42 return ds->ops->phy_write(ds, addr, reg, val);
47 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
49 ds->slave_mii_bus->priv = (void *)ds;
50 ds->slave_mii_bus->name = "dsa slave smi";
51 ds->slave_mii_bus->read = dsa_slave_phy_read;
52 ds->slave_mii_bus->write = dsa_slave_phy_write;
53 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
54 ds->dst->index, ds->index);
55 ds->slave_mii_bus->parent = ds->dev;
56 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
60 /* slave device handling ****************************************************/
61 static int dsa_slave_get_iflink(const struct net_device *dev)
63 return dsa_slave_to_master(dev)->ifindex;
66 static int dsa_slave_open(struct net_device *dev)
68 struct net_device *master = dsa_slave_to_master(dev);
69 struct dsa_port *dp = dsa_slave_to_port(dev);
72 err = dev_open(master, NULL);
74 netdev_err(dev, "failed to open master %s\n", master->name);
78 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
79 err = dev_uc_add(master, dev->dev_addr);
84 if (dev->flags & IFF_ALLMULTI) {
85 err = dev_set_allmulti(master, 1);
89 if (dev->flags & IFF_PROMISC) {
90 err = dev_set_promiscuity(master, 1);
95 err = dsa_port_enable_rt(dp, dev->phydev);
102 if (dev->flags & IFF_PROMISC)
103 dev_set_promiscuity(master, -1);
105 if (dev->flags & IFF_ALLMULTI)
106 dev_set_allmulti(master, -1);
108 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
109 dev_uc_del(master, dev->dev_addr);
114 static int dsa_slave_close(struct net_device *dev)
116 struct net_device *master = dsa_slave_to_master(dev);
117 struct dsa_port *dp = dsa_slave_to_port(dev);
119 dsa_port_disable_rt(dp);
121 dev_mc_unsync(master, dev);
122 dev_uc_unsync(master, dev);
123 if (dev->flags & IFF_ALLMULTI)
124 dev_set_allmulti(master, -1);
125 if (dev->flags & IFF_PROMISC)
126 dev_set_promiscuity(master, -1);
128 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
129 dev_uc_del(master, dev->dev_addr);
134 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
136 struct net_device *master = dsa_slave_to_master(dev);
137 if (dev->flags & IFF_UP) {
138 if (change & IFF_ALLMULTI)
139 dev_set_allmulti(master,
140 dev->flags & IFF_ALLMULTI ? 1 : -1);
141 if (change & IFF_PROMISC)
142 dev_set_promiscuity(master,
143 dev->flags & IFF_PROMISC ? 1 : -1);
147 static void dsa_slave_set_rx_mode(struct net_device *dev)
149 struct net_device *master = dsa_slave_to_master(dev);
151 dev_mc_sync(master, dev);
152 dev_uc_sync(master, dev);
155 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
157 struct net_device *master = dsa_slave_to_master(dev);
158 struct sockaddr *addr = a;
161 if (!is_valid_ether_addr(addr->sa_data))
162 return -EADDRNOTAVAIL;
164 if (!(dev->flags & IFF_UP))
167 if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
168 err = dev_uc_add(master, addr->sa_data);
173 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
174 dev_uc_del(master, dev->dev_addr);
177 ether_addr_copy(dev->dev_addr, addr->sa_data);
182 struct dsa_slave_dump_ctx {
183 struct net_device *dev;
185 struct netlink_callback *cb;
190 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
191 bool is_static, void *data)
193 struct dsa_slave_dump_ctx *dump = data;
194 u32 portid = NETLINK_CB(dump->cb->skb).portid;
195 u32 seq = dump->cb->nlh->nlmsg_seq;
196 struct nlmsghdr *nlh;
199 if (dump->idx < dump->cb->args[2])
202 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
203 sizeof(*ndm), NLM_F_MULTI);
207 ndm = nlmsg_data(nlh);
208 ndm->ndm_family = AF_BRIDGE;
211 ndm->ndm_flags = NTF_SELF;
213 ndm->ndm_ifindex = dump->dev->ifindex;
214 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
216 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
217 goto nla_put_failure;
219 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
220 goto nla_put_failure;
222 nlmsg_end(dump->skb, nlh);
229 nlmsg_cancel(dump->skb, nlh);
234 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
235 struct net_device *dev, struct net_device *filter_dev,
238 struct dsa_port *dp = dsa_slave_to_port(dev);
239 struct dsa_slave_dump_ctx dump = {
247 err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
253 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
255 struct dsa_slave_priv *p = netdev_priv(dev);
256 struct dsa_switch *ds = p->dp->ds;
257 int port = p->dp->index;
259 /* Pass through to switch driver if it supports timestamping */
262 if (ds->ops->port_hwtstamp_get)
263 return ds->ops->port_hwtstamp_get(ds, port, ifr);
266 if (ds->ops->port_hwtstamp_set)
267 return ds->ops->port_hwtstamp_set(ds, port, ifr);
271 return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
274 static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
275 const struct switchdev_attr *attr,
276 struct netlink_ext_ack *extack)
278 struct dsa_port *dp = dsa_slave_to_port(dev);
281 if (ctx && ctx != dp)
285 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
286 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
289 ret = dsa_port_set_state(dp, attr->u.stp_state);
291 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
292 if (!dsa_port_offloads_bridge(dp, attr->orig_dev))
295 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
298 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
299 if (!dsa_port_offloads_bridge(dp, attr->orig_dev))
302 ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
304 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
305 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
308 ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
311 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
312 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
315 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
317 case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
318 if (!dsa_port_offloads_bridge(dp, attr->orig_dev))
321 ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, extack);
331 /* Must be called under rcu_read_lock() */
333 dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
334 const struct switchdev_obj_port_vlan *vlan)
336 struct net_device *upper_dev;
337 struct list_head *iter;
339 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
342 if (!is_vlan_dev(upper_dev))
345 vid = vlan_dev_vlan_id(upper_dev);
346 if (vid == vlan->vid)
353 static int dsa_slave_vlan_add(struct net_device *dev,
354 const struct switchdev_obj *obj,
355 struct netlink_ext_ack *extack)
357 struct net_device *master = dsa_slave_to_master(dev);
358 struct dsa_port *dp = dsa_slave_to_port(dev);
359 struct switchdev_obj_port_vlan vlan;
362 if (dsa_port_skip_vlan_configuration(dp)) {
363 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
367 vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
369 /* Deny adding a bridge VLAN when there is already an 802.1Q upper with
372 if (br_vlan_enabled(dp->bridge_dev)) {
374 err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan);
377 NL_SET_ERR_MSG_MOD(extack,
378 "Port already has a VLAN upper with this VID");
383 err = dsa_port_vlan_add(dp, &vlan, extack);
387 /* We need the dedicated CPU port to be a member of the VLAN as well.
388 * Even though drivers often handle CPU membership in special ways,
389 * it doesn't make sense to program a PVID, so clear this flag.
391 vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
393 err = dsa_port_vlan_add(dp->cpu_dp, &vlan, extack);
397 return vlan_vid_add(master, htons(ETH_P_8021Q), vlan.vid);
400 static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
401 const struct switchdev_obj *obj,
402 struct netlink_ext_ack *extack)
404 struct dsa_port *dp = dsa_slave_to_port(dev);
407 if (ctx && ctx != dp)
411 case SWITCHDEV_OBJ_ID_PORT_MDB:
412 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
415 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
417 case SWITCHDEV_OBJ_ID_HOST_MDB:
418 if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
421 /* DSA can directly translate this to a normal MDB add,
422 * but on the CPU port.
424 err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
426 case SWITCHDEV_OBJ_ID_PORT_VLAN:
427 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
430 err = dsa_slave_vlan_add(dev, obj, extack);
432 case SWITCHDEV_OBJ_ID_MRP:
433 if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
436 err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
438 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
439 if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
442 err = dsa_port_mrp_add_ring_role(dp,
443 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
453 static int dsa_slave_vlan_del(struct net_device *dev,
454 const struct switchdev_obj *obj)
456 struct net_device *master = dsa_slave_to_master(dev);
457 struct dsa_port *dp = dsa_slave_to_port(dev);
458 struct switchdev_obj_port_vlan *vlan;
461 if (dsa_port_skip_vlan_configuration(dp))
464 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
466 /* Do not deprogram the CPU port as it may be shared with other user
467 * ports which can be members of this VLAN as well.
469 err = dsa_port_vlan_del(dp, vlan);
473 vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid);
478 static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
479 const struct switchdev_obj *obj)
481 struct dsa_port *dp = dsa_slave_to_port(dev);
484 if (ctx && ctx != dp)
488 case SWITCHDEV_OBJ_ID_PORT_MDB:
489 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
492 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
494 case SWITCHDEV_OBJ_ID_HOST_MDB:
495 if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
498 /* DSA can directly translate this to a normal MDB add,
499 * but on the CPU port.
501 err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
503 case SWITCHDEV_OBJ_ID_PORT_VLAN:
504 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
507 err = dsa_slave_vlan_del(dev, obj);
509 case SWITCHDEV_OBJ_ID_MRP:
510 if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
513 err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
515 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
516 if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
519 err = dsa_port_mrp_del_ring_role(dp,
520 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
530 static int dsa_slave_get_port_parent_id(struct net_device *dev,
531 struct netdev_phys_item_id *ppid)
533 struct dsa_port *dp = dsa_slave_to_port(dev);
534 struct dsa_switch *ds = dp->ds;
535 struct dsa_switch_tree *dst = ds->dst;
537 /* For non-legacy ports, devlink is used and it takes
538 * care of the name generation. This ndo implementation
539 * should be removed with legacy support.
544 ppid->id_len = sizeof(dst->index);
545 memcpy(&ppid->id, &dst->index, ppid->id_len);
550 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
553 #ifdef CONFIG_NET_POLL_CONTROLLER
554 struct dsa_slave_priv *p = netdev_priv(dev);
556 return netpoll_send_skb(p->netpoll, skb);
563 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
566 struct dsa_switch *ds = p->dp->ds;
568 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
571 if (!ds->ops->port_txtstamp)
574 ds->ops->port_txtstamp(ds, p->dp->index, skb);
577 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
579 /* SKB for netpoll still need to be mangled with the protocol-specific
580 * tag to be successfully transmitted
582 if (unlikely(netpoll_tx_running(dev)))
583 return dsa_slave_netpoll_send_skb(dev, skb);
585 /* Queue the SKB for transmission on the parent interface, but
586 * do not modify its EtherType
588 skb->dev = dsa_slave_to_master(dev);
593 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
595 static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
597 int needed_headroom = dev->needed_headroom;
598 int needed_tailroom = dev->needed_tailroom;
600 /* For tail taggers, we need to pad short frames ourselves, to ensure
601 * that the tail tag does not fail at its role of being at the end of
602 * the packet, once the master interface pads the frame. Account for
603 * that pad length here, and pad later.
605 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
606 needed_tailroom += ETH_ZLEN - skb->len;
607 /* skb_headroom() returns unsigned int... */
608 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
609 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
611 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
612 /* No reallocation needed, yay! */
615 return pskb_expand_head(skb, needed_headroom, needed_tailroom,
619 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
621 struct dsa_slave_priv *p = netdev_priv(dev);
622 struct sk_buff *nskb;
624 dev_sw_netstats_tx_add(dev, 1, skb->len);
626 memset(skb->cb, 0, sizeof(skb->cb));
628 /* Handle tx timestamp if any */
629 dsa_skb_tx_timestamp(p, skb);
631 if (dsa_realloc_skb(skb, dev)) {
632 dev_kfree_skb_any(skb);
636 /* needed_tailroom should still be 'warm' in the cache line from
637 * dsa_realloc_skb(), which has also ensured that padding is safe.
639 if (dev->needed_tailroom)
642 /* Transmit function may have to reallocate the original SKB,
643 * in which case it must have freed it. Only free it here on error.
645 nskb = p->xmit(skb, dev);
651 return dsa_enqueue_skb(nskb, dev);
654 /* ethtool operations *******************************************************/
656 static void dsa_slave_get_drvinfo(struct net_device *dev,
657 struct ethtool_drvinfo *drvinfo)
659 strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
660 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
661 strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
664 static int dsa_slave_get_regs_len(struct net_device *dev)
666 struct dsa_port *dp = dsa_slave_to_port(dev);
667 struct dsa_switch *ds = dp->ds;
669 if (ds->ops->get_regs_len)
670 return ds->ops->get_regs_len(ds, dp->index);
676 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
678 struct dsa_port *dp = dsa_slave_to_port(dev);
679 struct dsa_switch *ds = dp->ds;
681 if (ds->ops->get_regs)
682 ds->ops->get_regs(ds, dp->index, regs, _p);
685 static int dsa_slave_nway_reset(struct net_device *dev)
687 struct dsa_port *dp = dsa_slave_to_port(dev);
689 return phylink_ethtool_nway_reset(dp->pl);
692 static int dsa_slave_get_eeprom_len(struct net_device *dev)
694 struct dsa_port *dp = dsa_slave_to_port(dev);
695 struct dsa_switch *ds = dp->ds;
697 if (ds->cd && ds->cd->eeprom_len)
698 return ds->cd->eeprom_len;
700 if (ds->ops->get_eeprom_len)
701 return ds->ops->get_eeprom_len(ds);
706 static int dsa_slave_get_eeprom(struct net_device *dev,
707 struct ethtool_eeprom *eeprom, u8 *data)
709 struct dsa_port *dp = dsa_slave_to_port(dev);
710 struct dsa_switch *ds = dp->ds;
712 if (ds->ops->get_eeprom)
713 return ds->ops->get_eeprom(ds, eeprom, data);
718 static int dsa_slave_set_eeprom(struct net_device *dev,
719 struct ethtool_eeprom *eeprom, u8 *data)
721 struct dsa_port *dp = dsa_slave_to_port(dev);
722 struct dsa_switch *ds = dp->ds;
724 if (ds->ops->set_eeprom)
725 return ds->ops->set_eeprom(ds, eeprom, data);
730 static void dsa_slave_get_strings(struct net_device *dev,
731 uint32_t stringset, uint8_t *data)
733 struct dsa_port *dp = dsa_slave_to_port(dev);
734 struct dsa_switch *ds = dp->ds;
736 if (stringset == ETH_SS_STATS) {
737 int len = ETH_GSTRING_LEN;
739 strncpy(data, "tx_packets", len);
740 strncpy(data + len, "tx_bytes", len);
741 strncpy(data + 2 * len, "rx_packets", len);
742 strncpy(data + 3 * len, "rx_bytes", len);
743 if (ds->ops->get_strings)
744 ds->ops->get_strings(ds, dp->index, stringset,
746 } else if (stringset == ETH_SS_TEST) {
747 net_selftest_get_strings(data);
752 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
753 struct ethtool_stats *stats,
756 struct dsa_port *dp = dsa_slave_to_port(dev);
757 struct dsa_switch *ds = dp->ds;
758 struct pcpu_sw_netstats *s;
762 for_each_possible_cpu(i) {
763 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
765 s = per_cpu_ptr(dev->tstats, i);
767 start = u64_stats_fetch_begin_irq(&s->syncp);
768 tx_packets = s->tx_packets;
769 tx_bytes = s->tx_bytes;
770 rx_packets = s->rx_packets;
771 rx_bytes = s->rx_bytes;
772 } while (u64_stats_fetch_retry_irq(&s->syncp, start));
773 data[0] += tx_packets;
775 data[2] += rx_packets;
778 if (ds->ops->get_ethtool_stats)
779 ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
782 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
784 struct dsa_port *dp = dsa_slave_to_port(dev);
785 struct dsa_switch *ds = dp->ds;
787 if (sset == ETH_SS_STATS) {
790 if (ds->ops->get_sset_count) {
791 count = ds->ops->get_sset_count(ds, dp->index, sset);
797 } else if (sset == ETH_SS_TEST) {
798 return net_selftest_get_count();
804 static void dsa_slave_net_selftest(struct net_device *ndev,
805 struct ethtool_test *etest, u64 *buf)
807 struct dsa_port *dp = dsa_slave_to_port(ndev);
808 struct dsa_switch *ds = dp->ds;
810 if (ds->ops->self_test) {
811 ds->ops->self_test(ds, dp->index, etest, buf);
815 net_selftest(ndev, etest, buf);
818 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
820 struct dsa_port *dp = dsa_slave_to_port(dev);
821 struct dsa_switch *ds = dp->ds;
823 phylink_ethtool_get_wol(dp->pl, w);
825 if (ds->ops->get_wol)
826 ds->ops->get_wol(ds, dp->index, w);
829 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
831 struct dsa_port *dp = dsa_slave_to_port(dev);
832 struct dsa_switch *ds = dp->ds;
833 int ret = -EOPNOTSUPP;
835 phylink_ethtool_set_wol(dp->pl, w);
837 if (ds->ops->set_wol)
838 ret = ds->ops->set_wol(ds, dp->index, w);
843 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
845 struct dsa_port *dp = dsa_slave_to_port(dev);
846 struct dsa_switch *ds = dp->ds;
849 /* Port's PHY and MAC both need to be EEE capable */
850 if (!dev->phydev || !dp->pl)
853 if (!ds->ops->set_mac_eee)
856 ret = ds->ops->set_mac_eee(ds, dp->index, e);
860 return phylink_ethtool_set_eee(dp->pl, e);
863 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
865 struct dsa_port *dp = dsa_slave_to_port(dev);
866 struct dsa_switch *ds = dp->ds;
869 /* Port's PHY and MAC both need to be EEE capable */
870 if (!dev->phydev || !dp->pl)
873 if (!ds->ops->get_mac_eee)
876 ret = ds->ops->get_mac_eee(ds, dp->index, e);
880 return phylink_ethtool_get_eee(dp->pl, e);
883 static int dsa_slave_get_link_ksettings(struct net_device *dev,
884 struct ethtool_link_ksettings *cmd)
886 struct dsa_port *dp = dsa_slave_to_port(dev);
888 return phylink_ethtool_ksettings_get(dp->pl, cmd);
891 static int dsa_slave_set_link_ksettings(struct net_device *dev,
892 const struct ethtool_link_ksettings *cmd)
894 struct dsa_port *dp = dsa_slave_to_port(dev);
896 return phylink_ethtool_ksettings_set(dp->pl, cmd);
899 static void dsa_slave_get_pauseparam(struct net_device *dev,
900 struct ethtool_pauseparam *pause)
902 struct dsa_port *dp = dsa_slave_to_port(dev);
904 phylink_ethtool_get_pauseparam(dp->pl, pause);
907 static int dsa_slave_set_pauseparam(struct net_device *dev,
908 struct ethtool_pauseparam *pause)
910 struct dsa_port *dp = dsa_slave_to_port(dev);
912 return phylink_ethtool_set_pauseparam(dp->pl, pause);
915 #ifdef CONFIG_NET_POLL_CONTROLLER
916 static int dsa_slave_netpoll_setup(struct net_device *dev,
917 struct netpoll_info *ni)
919 struct net_device *master = dsa_slave_to_master(dev);
920 struct dsa_slave_priv *p = netdev_priv(dev);
921 struct netpoll *netpoll;
924 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
928 err = __netpoll_setup(netpoll, master);
934 p->netpoll = netpoll;
939 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
941 struct dsa_slave_priv *p = netdev_priv(dev);
942 struct netpoll *netpoll = p->netpoll;
949 __netpoll_free(netpoll);
952 static void dsa_slave_poll_controller(struct net_device *dev)
957 static int dsa_slave_get_phys_port_name(struct net_device *dev,
958 char *name, size_t len)
960 struct dsa_port *dp = dsa_slave_to_port(dev);
962 /* For non-legacy ports, devlink is used and it takes
963 * care of the name generation. This ndo implementation
964 * should be removed with legacy support.
969 if (snprintf(name, len, "p%d", dp->index) >= len)
975 static struct dsa_mall_tc_entry *
976 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
978 struct dsa_slave_priv *p = netdev_priv(dev);
979 struct dsa_mall_tc_entry *mall_tc_entry;
981 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
982 if (mall_tc_entry->cookie == cookie)
983 return mall_tc_entry;
989 dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
990 struct tc_cls_matchall_offload *cls,
993 struct dsa_port *dp = dsa_slave_to_port(dev);
994 struct dsa_slave_priv *p = netdev_priv(dev);
995 struct dsa_mall_mirror_tc_entry *mirror;
996 struct dsa_mall_tc_entry *mall_tc_entry;
997 struct dsa_switch *ds = dp->ds;
998 struct flow_action_entry *act;
999 struct dsa_port *to_dp;
1002 if (!ds->ops->port_mirror_add)
1005 if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1006 cls->common.extack))
1009 act = &cls->rule->action.entries[0];
1014 if (!dsa_slave_dev_check(act->dev))
1017 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1021 mall_tc_entry->cookie = cls->cookie;
1022 mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
1023 mirror = &mall_tc_entry->mirror;
1025 to_dp = dsa_slave_to_port(act->dev);
1027 mirror->to_local_port = to_dp->index;
1028 mirror->ingress = ingress;
1030 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
1032 kfree(mall_tc_entry);
1036 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1042 dsa_slave_add_cls_matchall_police(struct net_device *dev,
1043 struct tc_cls_matchall_offload *cls,
1046 struct netlink_ext_ack *extack = cls->common.extack;
1047 struct dsa_port *dp = dsa_slave_to_port(dev);
1048 struct dsa_slave_priv *p = netdev_priv(dev);
1049 struct dsa_mall_policer_tc_entry *policer;
1050 struct dsa_mall_tc_entry *mall_tc_entry;
1051 struct dsa_switch *ds = dp->ds;
1052 struct flow_action_entry *act;
1055 if (!ds->ops->port_policer_add) {
1056 NL_SET_ERR_MSG_MOD(extack,
1057 "Policing offload not implemented");
1062 NL_SET_ERR_MSG_MOD(extack,
1063 "Only supported on ingress qdisc");
1067 if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1068 cls->common.extack))
1071 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1072 if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1073 NL_SET_ERR_MSG_MOD(extack,
1074 "Only one port policer allowed");
1079 act = &cls->rule->action.entries[0];
1081 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1085 mall_tc_entry->cookie = cls->cookie;
1086 mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1087 policer = &mall_tc_entry->policer;
1088 policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1089 policer->burst = act->police.burst;
1091 err = ds->ops->port_policer_add(ds, dp->index, policer);
1093 kfree(mall_tc_entry);
1097 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1102 static int dsa_slave_add_cls_matchall(struct net_device *dev,
1103 struct tc_cls_matchall_offload *cls,
1106 int err = -EOPNOTSUPP;
1108 if (cls->common.protocol == htons(ETH_P_ALL) &&
1109 flow_offload_has_one_action(&cls->rule->action) &&
1110 cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1111 err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
1112 else if (flow_offload_has_one_action(&cls->rule->action) &&
1113 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1114 err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
1119 static void dsa_slave_del_cls_matchall(struct net_device *dev,
1120 struct tc_cls_matchall_offload *cls)
1122 struct dsa_port *dp = dsa_slave_to_port(dev);
1123 struct dsa_mall_tc_entry *mall_tc_entry;
1124 struct dsa_switch *ds = dp->ds;
1126 mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
1130 list_del(&mall_tc_entry->list);
1132 switch (mall_tc_entry->type) {
1133 case DSA_PORT_MALL_MIRROR:
1134 if (ds->ops->port_mirror_del)
1135 ds->ops->port_mirror_del(ds, dp->index,
1136 &mall_tc_entry->mirror);
1138 case DSA_PORT_MALL_POLICER:
1139 if (ds->ops->port_policer_del)
1140 ds->ops->port_policer_del(ds, dp->index);
1146 kfree(mall_tc_entry);
1149 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
1150 struct tc_cls_matchall_offload *cls,
1153 if (cls->common.chain_index)
1156 switch (cls->command) {
1157 case TC_CLSMATCHALL_REPLACE:
1158 return dsa_slave_add_cls_matchall(dev, cls, ingress);
1159 case TC_CLSMATCHALL_DESTROY:
1160 dsa_slave_del_cls_matchall(dev, cls);
1167 static int dsa_slave_add_cls_flower(struct net_device *dev,
1168 struct flow_cls_offload *cls,
1171 struct dsa_port *dp = dsa_slave_to_port(dev);
1172 struct dsa_switch *ds = dp->ds;
1173 int port = dp->index;
1175 if (!ds->ops->cls_flower_add)
1178 return ds->ops->cls_flower_add(ds, port, cls, ingress);
1181 static int dsa_slave_del_cls_flower(struct net_device *dev,
1182 struct flow_cls_offload *cls,
1185 struct dsa_port *dp = dsa_slave_to_port(dev);
1186 struct dsa_switch *ds = dp->ds;
1187 int port = dp->index;
1189 if (!ds->ops->cls_flower_del)
1192 return ds->ops->cls_flower_del(ds, port, cls, ingress);
1195 static int dsa_slave_stats_cls_flower(struct net_device *dev,
1196 struct flow_cls_offload *cls,
1199 struct dsa_port *dp = dsa_slave_to_port(dev);
1200 struct dsa_switch *ds = dp->ds;
1201 int port = dp->index;
1203 if (!ds->ops->cls_flower_stats)
1206 return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1209 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1210 struct flow_cls_offload *cls,
1213 switch (cls->command) {
1214 case FLOW_CLS_REPLACE:
1215 return dsa_slave_add_cls_flower(dev, cls, ingress);
1216 case FLOW_CLS_DESTROY:
1217 return dsa_slave_del_cls_flower(dev, cls, ingress);
1218 case FLOW_CLS_STATS:
1219 return dsa_slave_stats_cls_flower(dev, cls, ingress);
1225 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1226 void *cb_priv, bool ingress)
1228 struct net_device *dev = cb_priv;
1230 if (!tc_can_offload(dev))
1234 case TC_SETUP_CLSMATCHALL:
1235 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1236 case TC_SETUP_CLSFLOWER:
1237 return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1243 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1244 void *type_data, void *cb_priv)
1246 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1249 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1250 void *type_data, void *cb_priv)
1252 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1255 static LIST_HEAD(dsa_slave_block_cb_list);
1257 static int dsa_slave_setup_tc_block(struct net_device *dev,
1258 struct flow_block_offload *f)
1260 struct flow_block_cb *block_cb;
1261 flow_setup_cb_t *cb;
1263 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1264 cb = dsa_slave_setup_tc_block_cb_ig;
1265 else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1266 cb = dsa_slave_setup_tc_block_cb_eg;
1270 f->driver_block_list = &dsa_slave_block_cb_list;
1272 switch (f->command) {
1273 case FLOW_BLOCK_BIND:
1274 if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1277 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1278 if (IS_ERR(block_cb))
1279 return PTR_ERR(block_cb);
1281 flow_block_cb_add(block_cb, f);
1282 list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1284 case FLOW_BLOCK_UNBIND:
1285 block_cb = flow_block_cb_lookup(f->block, cb, dev);
1289 flow_block_cb_remove(block_cb, f);
1290 list_del(&block_cb->driver_list);
1297 static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port,
1300 struct dsa_port *cpu_dp = dsa_to_port(ds, port)->cpu_dp;
1301 struct net_device *master = cpu_dp->master;
1303 if (!master->netdev_ops->ndo_setup_tc)
1306 return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data);
1309 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1312 struct dsa_port *dp = dsa_slave_to_port(dev);
1313 struct dsa_switch *ds = dp->ds;
1316 case TC_SETUP_BLOCK:
1317 return dsa_slave_setup_tc_block(dev, type_data);
1319 return dsa_slave_setup_ft_block(ds, dp->index, type_data);
1324 if (!ds->ops->port_setup_tc)
1327 return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1330 static int dsa_slave_get_rxnfc(struct net_device *dev,
1331 struct ethtool_rxnfc *nfc, u32 *rule_locs)
1333 struct dsa_port *dp = dsa_slave_to_port(dev);
1334 struct dsa_switch *ds = dp->ds;
1336 if (!ds->ops->get_rxnfc)
1339 return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1342 static int dsa_slave_set_rxnfc(struct net_device *dev,
1343 struct ethtool_rxnfc *nfc)
1345 struct dsa_port *dp = dsa_slave_to_port(dev);
1346 struct dsa_switch *ds = dp->ds;
1348 if (!ds->ops->set_rxnfc)
1351 return ds->ops->set_rxnfc(ds, dp->index, nfc);
1354 static int dsa_slave_get_ts_info(struct net_device *dev,
1355 struct ethtool_ts_info *ts)
1357 struct dsa_slave_priv *p = netdev_priv(dev);
1358 struct dsa_switch *ds = p->dp->ds;
1360 if (!ds->ops->get_ts_info)
1363 return ds->ops->get_ts_info(ds, p->dp->index, ts);
1366 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1369 struct net_device *master = dsa_slave_to_master(dev);
1370 struct dsa_port *dp = dsa_slave_to_port(dev);
1371 struct switchdev_obj_port_vlan vlan = {
1372 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1374 /* This API only allows programming tagged, non-PVID VIDs */
1377 struct netlink_ext_ack extack = {0};
1381 ret = dsa_port_vlan_add(dp, &vlan, &extack);
1384 netdev_err(dev, "%s\n", extack._msg);
1388 /* And CPU port... */
1389 ret = dsa_port_vlan_add(dp->cpu_dp, &vlan, &extack);
1392 netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
1397 return vlan_vid_add(master, proto, vid);
1400 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1403 struct net_device *master = dsa_slave_to_master(dev);
1404 struct dsa_port *dp = dsa_slave_to_port(dev);
1405 struct switchdev_obj_port_vlan vlan = {
1407 /* This API only allows programming tagged, non-PVID VIDs */
1412 /* Do not deprogram the CPU port as it may be shared with other user
1413 * ports which can be members of this VLAN as well.
1415 err = dsa_port_vlan_del(dp, &vlan);
1419 vlan_vid_del(master, proto, vid);
1424 struct dsa_hw_port {
1425 struct list_head list;
1426 struct net_device *dev;
1430 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1432 const struct dsa_hw_port *p;
1435 list_for_each_entry(p, hw_port_list, list) {
1436 if (p->dev->mtu == mtu)
1439 err = dev_set_mtu(p->dev, mtu);
1447 list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1448 if (p->dev->mtu == p->old_mtu)
1451 if (dev_set_mtu(p->dev, p->old_mtu))
1452 netdev_err(p->dev, "Failed to restore MTU\n");
1458 static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1460 struct dsa_hw_port *p, *n;
1462 list_for_each_entry_safe(p, n, hw_port_list, list)
1466 /* Make the hardware datapath to/from @dev limited to a common MTU */
1467 static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
1469 struct list_head hw_port_list;
1470 struct dsa_switch_tree *dst;
1471 int min_mtu = ETH_MAX_MTU;
1472 struct dsa_port *other_dp;
1475 if (!dp->ds->mtu_enforcement_ingress)
1478 if (!dp->bridge_dev)
1481 INIT_LIST_HEAD(&hw_port_list);
1483 /* Populate the list of ports that are part of the same bridge
1484 * as the newly added/modified port
1486 list_for_each_entry(dst, &dsa_tree_list, list) {
1487 list_for_each_entry(other_dp, &dst->ports, list) {
1488 struct dsa_hw_port *hw_port;
1489 struct net_device *slave;
1491 if (other_dp->type != DSA_PORT_TYPE_USER)
1494 if (other_dp->bridge_dev != dp->bridge_dev)
1497 if (!other_dp->ds->mtu_enforcement_ingress)
1500 slave = other_dp->slave;
1502 if (min_mtu > slave->mtu)
1503 min_mtu = slave->mtu;
1505 hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
1509 hw_port->dev = slave;
1510 hw_port->old_mtu = slave->mtu;
1512 list_add(&hw_port->list, &hw_port_list);
1516 /* Attempt to configure the entire hardware bridge to the newly added
1517 * interface's MTU first, regardless of whether the intention of the
1518 * user was to raise or lower it.
1520 err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
1524 /* Clearly that didn't work out so well, so just set the minimum MTU on
1525 * all hardware bridge ports now. If this fails too, then all ports will
1526 * still have their old MTU rolled back anyway.
1528 dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
1531 dsa_hw_port_list_free(&hw_port_list);
1534 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
1536 struct net_device *master = dsa_slave_to_master(dev);
1537 struct dsa_port *dp = dsa_slave_to_port(dev);
1538 struct dsa_slave_priv *p = netdev_priv(dev);
1539 struct dsa_switch *ds = p->dp->ds;
1540 struct dsa_port *dp_iter;
1541 struct dsa_port *cpu_dp;
1542 int port = p->dp->index;
1543 int largest_mtu = 0;
1550 if (!ds->ops->port_change_mtu)
1553 list_for_each_entry(dp_iter, &ds->dst->ports, list) {
1556 if (!dsa_port_is_user(dp_iter))
1559 /* During probe, this function will be called for each slave
1560 * device, while not all of them have been allocated. That's
1561 * ok, it doesn't change what the maximum is, so ignore it.
1563 if (!dp_iter->slave)
1566 /* Pretend that we already applied the setting, which we
1567 * actually haven't (still haven't done all integrity checks)
1570 slave_mtu = new_mtu;
1572 slave_mtu = dp_iter->slave->mtu;
1574 if (largest_mtu < slave_mtu)
1575 largest_mtu = slave_mtu;
1578 cpu_dp = dsa_to_port(ds, port)->cpu_dp;
1580 mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
1581 old_master_mtu = master->mtu;
1582 new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
1583 if (new_master_mtu > mtu_limit)
1586 /* If the master MTU isn't over limit, there's no need to check the CPU
1587 * MTU, since that surely isn't either.
1589 cpu_mtu = largest_mtu;
1591 /* Start applying stuff */
1592 if (new_master_mtu != old_master_mtu) {
1593 err = dev_set_mtu(master, new_master_mtu);
1595 goto out_master_failed;
1597 /* We only need to propagate the MTU of the CPU port to
1598 * upstream switches, so create a non-targeted notifier which
1599 * updates all switches.
1601 err = dsa_port_mtu_change(cpu_dp, cpu_mtu, false);
1603 goto out_cpu_failed;
1606 err = dsa_port_mtu_change(dp, new_mtu, true);
1608 goto out_port_failed;
1612 dsa_bridge_mtu_normalization(dp);
1617 if (new_master_mtu != old_master_mtu)
1618 dsa_port_mtu_change(cpu_dp, old_master_mtu -
1619 dsa_tag_protocol_overhead(cpu_dp->tag_ops),
1622 if (new_master_mtu != old_master_mtu)
1623 dev_set_mtu(master, old_master_mtu);
1628 static const struct ethtool_ops dsa_slave_ethtool_ops = {
1629 .get_drvinfo = dsa_slave_get_drvinfo,
1630 .get_regs_len = dsa_slave_get_regs_len,
1631 .get_regs = dsa_slave_get_regs,
1632 .nway_reset = dsa_slave_nway_reset,
1633 .get_link = ethtool_op_get_link,
1634 .get_eeprom_len = dsa_slave_get_eeprom_len,
1635 .get_eeprom = dsa_slave_get_eeprom,
1636 .set_eeprom = dsa_slave_set_eeprom,
1637 .get_strings = dsa_slave_get_strings,
1638 .get_ethtool_stats = dsa_slave_get_ethtool_stats,
1639 .get_sset_count = dsa_slave_get_sset_count,
1640 .set_wol = dsa_slave_set_wol,
1641 .get_wol = dsa_slave_get_wol,
1642 .set_eee = dsa_slave_set_eee,
1643 .get_eee = dsa_slave_get_eee,
1644 .get_link_ksettings = dsa_slave_get_link_ksettings,
1645 .set_link_ksettings = dsa_slave_set_link_ksettings,
1646 .get_pauseparam = dsa_slave_get_pauseparam,
1647 .set_pauseparam = dsa_slave_set_pauseparam,
1648 .get_rxnfc = dsa_slave_get_rxnfc,
1649 .set_rxnfc = dsa_slave_set_rxnfc,
1650 .get_ts_info = dsa_slave_get_ts_info,
1651 .self_test = dsa_slave_net_selftest,
1654 /* legacy way, bypassing the bridge *****************************************/
1655 static int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1656 struct net_device *dev,
1657 const unsigned char *addr, u16 vid,
1659 struct netlink_ext_ack *extack)
1661 struct dsa_port *dp = dsa_slave_to_port(dev);
1663 return dsa_port_fdb_add(dp, addr, vid);
1666 static int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
1667 struct net_device *dev,
1668 const unsigned char *addr, u16 vid)
1670 struct dsa_port *dp = dsa_slave_to_port(dev);
1672 return dsa_port_fdb_del(dp, addr, vid);
1675 static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
1677 struct dsa_port *dp = dsa_slave_to_port(dev);
1679 return dp->ds->devlink ? &dp->devlink_port : NULL;
1682 static void dsa_slave_get_stats64(struct net_device *dev,
1683 struct rtnl_link_stats64 *s)
1685 struct dsa_port *dp = dsa_slave_to_port(dev);
1686 struct dsa_switch *ds = dp->ds;
1688 if (ds->ops->get_stats64)
1689 ds->ops->get_stats64(ds, dp->index, s);
1691 dev_get_tstats64(dev, s);
1694 static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx,
1695 struct net_device_path *path)
1697 struct dsa_port *dp = dsa_slave_to_port(ctx->dev);
1698 struct dsa_port *cpu_dp = dp->cpu_dp;
1700 path->dev = ctx->dev;
1701 path->type = DEV_PATH_DSA;
1702 path->dsa.proto = cpu_dp->tag_ops->proto;
1703 path->dsa.port = dp->index;
1704 ctx->dev = cpu_dp->master;
1709 static const struct net_device_ops dsa_slave_netdev_ops = {
1710 .ndo_open = dsa_slave_open,
1711 .ndo_stop = dsa_slave_close,
1712 .ndo_start_xmit = dsa_slave_xmit,
1713 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
1714 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
1715 .ndo_set_mac_address = dsa_slave_set_mac_address,
1716 .ndo_fdb_add = dsa_legacy_fdb_add,
1717 .ndo_fdb_del = dsa_legacy_fdb_del,
1718 .ndo_fdb_dump = dsa_slave_fdb_dump,
1719 .ndo_do_ioctl = dsa_slave_ioctl,
1720 .ndo_get_iflink = dsa_slave_get_iflink,
1721 #ifdef CONFIG_NET_POLL_CONTROLLER
1722 .ndo_netpoll_setup = dsa_slave_netpoll_setup,
1723 .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup,
1724 .ndo_poll_controller = dsa_slave_poll_controller,
1726 .ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
1727 .ndo_setup_tc = dsa_slave_setup_tc,
1728 .ndo_get_stats64 = dsa_slave_get_stats64,
1729 .ndo_get_port_parent_id = dsa_slave_get_port_parent_id,
1730 .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
1731 .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
1732 .ndo_get_devlink_port = dsa_slave_get_devlink_port,
1733 .ndo_change_mtu = dsa_slave_change_mtu,
1734 .ndo_fill_forward_path = dsa_slave_fill_forward_path,
1737 static struct device_type dsa_type = {
1741 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
1743 const struct dsa_port *dp = dsa_to_port(ds, port);
1746 phylink_mac_change(dp->pl, up);
1748 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
1750 static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
1751 struct phylink_link_state *state)
1753 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1754 struct dsa_switch *ds = dp->ds;
1756 /* No need to check that this operation is valid, the callback would
1757 * not be called if it was not.
1759 ds->ops->phylink_fixed_state(ds, dp->index, state);
1762 /* slave device setup *******************************************************/
1763 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr,
1766 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1767 struct dsa_switch *ds = dp->ds;
1769 slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
1770 if (!slave_dev->phydev) {
1771 netdev_err(slave_dev, "no phy at %d\n", addr);
1775 slave_dev->phydev->dev_flags |= flags;
1777 return phylink_connect_phy(dp->pl, slave_dev->phydev);
1780 static int dsa_slave_phy_setup(struct net_device *slave_dev)
1782 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1783 struct device_node *port_dn = dp->dn;
1784 struct dsa_switch *ds = dp->ds;
1785 phy_interface_t mode;
1789 ret = of_get_phy_mode(port_dn, &mode);
1791 mode = PHY_INTERFACE_MODE_NA;
1793 dp->pl_config.dev = &slave_dev->dev;
1794 dp->pl_config.type = PHYLINK_NETDEV;
1796 /* The get_fixed_state callback takes precedence over polling the
1797 * link GPIO in PHYLINK (see phylink_get_fixed_state). Only set
1798 * this if the switch provides such a callback.
1800 if (ds->ops->phylink_fixed_state) {
1801 dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
1802 dp->pl_config.poll_fixed_state = true;
1805 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode,
1806 &dsa_port_phylink_mac_ops);
1807 if (IS_ERR(dp->pl)) {
1808 netdev_err(slave_dev,
1809 "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1810 return PTR_ERR(dp->pl);
1813 if (ds->ops->get_phy_flags)
1814 phy_flags = ds->ops->get_phy_flags(ds, dp->index);
1816 ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
1817 if (ret == -ENODEV && ds->slave_mii_bus) {
1818 /* We could not connect to a designated PHY or SFP, so try to
1819 * use the switch internal MDIO bus instead
1821 ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
1823 netdev_err(slave_dev,
1824 "failed to connect to port %d: %d\n",
1826 phylink_destroy(dp->pl);
1834 void dsa_slave_setup_tagger(struct net_device *slave)
1836 struct dsa_port *dp = dsa_slave_to_port(slave);
1837 struct dsa_slave_priv *p = netdev_priv(slave);
1838 const struct dsa_port *cpu_dp = dp->cpu_dp;
1839 struct net_device *master = cpu_dp->master;
1841 slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
1842 slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
1843 /* Try to save one extra realloc later in the TX path (in the master)
1844 * by also inheriting the master's needed headroom and tailroom.
1845 * The 8021q driver also does this.
1847 slave->needed_headroom += master->needed_headroom;
1848 slave->needed_tailroom += master->needed_tailroom;
1850 p->xmit = cpu_dp->tag_ops->xmit;
1853 static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
1854 static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
1855 struct netdev_queue *txq,
1858 lockdep_set_class(&txq->_xmit_lock,
1859 &dsa_slave_netdev_xmit_lock_key);
1862 int dsa_slave_suspend(struct net_device *slave_dev)
1864 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1866 if (!netif_running(slave_dev))
1869 netif_device_detach(slave_dev);
1872 phylink_stop(dp->pl);
1878 int dsa_slave_resume(struct net_device *slave_dev)
1880 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1882 if (!netif_running(slave_dev))
1885 netif_device_attach(slave_dev);
1888 phylink_start(dp->pl);
1894 int dsa_slave_create(struct dsa_port *port)
1896 const struct dsa_port *cpu_dp = port->cpu_dp;
1897 struct net_device *master = cpu_dp->master;
1898 struct dsa_switch *ds = port->ds;
1899 const char *name = port->name;
1900 struct net_device *slave_dev;
1901 struct dsa_slave_priv *p;
1904 if (!ds->num_tx_queues)
1905 ds->num_tx_queues = 1;
1907 slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
1908 NET_NAME_UNKNOWN, ether_setup,
1909 ds->num_tx_queues, 1);
1910 if (slave_dev == NULL)
1913 slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
1914 if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
1915 slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1916 slave_dev->hw_features |= NETIF_F_HW_TC;
1917 slave_dev->features |= NETIF_F_LLTX;
1918 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
1919 if (!is_zero_ether_addr(port->mac))
1920 ether_addr_copy(slave_dev->dev_addr, port->mac);
1922 eth_hw_addr_inherit(slave_dev, master);
1923 slave_dev->priv_flags |= IFF_NO_QUEUE;
1924 slave_dev->netdev_ops = &dsa_slave_netdev_ops;
1925 if (ds->ops->port_max_mtu)
1926 slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
1927 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1929 netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
1932 SET_NETDEV_DEV(slave_dev, port->ds->dev);
1933 slave_dev->dev.of_node = port->dn;
1934 slave_dev->vlan_features = master->vlan_features;
1936 p = netdev_priv(slave_dev);
1937 slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1938 if (!slave_dev->tstats) {
1939 free_netdev(slave_dev);
1943 ret = gro_cells_init(&p->gcells, slave_dev);
1948 INIT_LIST_HEAD(&p->mall_tc_list);
1949 port->slave = slave_dev;
1950 dsa_slave_setup_tagger(slave_dev);
1953 ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
1955 if (ret && ret != -EOPNOTSUPP)
1956 dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
1957 ret, ETH_DATA_LEN, port->index);
1959 netif_carrier_off(slave_dev);
1961 ret = dsa_slave_phy_setup(slave_dev);
1963 netdev_err(slave_dev,
1964 "error %d setting up PHY for tree %d, switch %d, port %d\n",
1965 ret, ds->dst->index, ds->index, port->index);
1971 ret = register_netdevice(slave_dev);
1973 netdev_err(master, "error %d registering interface %s\n",
1974 ret, slave_dev->name);
1979 ret = netdev_upper_dev_link(master, slave_dev, NULL);
1984 goto out_unregister;
1989 unregister_netdev(slave_dev);
1992 phylink_disconnect_phy(p->dp->pl);
1994 phylink_destroy(p->dp->pl);
1996 gro_cells_destroy(&p->gcells);
1998 free_percpu(slave_dev->tstats);
1999 free_netdev(slave_dev);
2004 void dsa_slave_destroy(struct net_device *slave_dev)
2006 struct net_device *master = dsa_slave_to_master(slave_dev);
2007 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2008 struct dsa_slave_priv *p = netdev_priv(slave_dev);
2010 netif_carrier_off(slave_dev);
2012 netdev_upper_dev_unlink(master, slave_dev);
2013 unregister_netdevice(slave_dev);
2014 phylink_disconnect_phy(dp->pl);
2017 phylink_destroy(dp->pl);
2018 gro_cells_destroy(&p->gcells);
2019 free_percpu(slave_dev->tstats);
2020 free_netdev(slave_dev);
2023 bool dsa_slave_dev_check(const struct net_device *dev)
2025 return dev->netdev_ops == &dsa_slave_netdev_ops;
2027 EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
2029 static int dsa_slave_changeupper(struct net_device *dev,
2030 struct netdev_notifier_changeupper_info *info)
2032 struct dsa_port *dp = dsa_slave_to_port(dev);
2033 struct netlink_ext_ack *extack;
2034 int err = NOTIFY_DONE;
2036 extack = netdev_notifier_info_to_extack(&info->info);
2038 if (netif_is_bridge_master(info->upper_dev)) {
2039 if (info->linking) {
2040 err = dsa_port_bridge_join(dp, info->upper_dev, extack);
2042 dsa_bridge_mtu_normalization(dp);
2043 err = notifier_from_errno(err);
2045 dsa_port_bridge_leave(dp, info->upper_dev);
2048 } else if (netif_is_lag_master(info->upper_dev)) {
2049 if (info->linking) {
2050 err = dsa_port_lag_join(dp, info->upper_dev,
2051 info->upper_info, extack);
2052 if (err == -EOPNOTSUPP) {
2053 NL_SET_ERR_MSG_MOD(info->info.extack,
2054 "Offloading not supported");
2057 err = notifier_from_errno(err);
2059 dsa_port_lag_leave(dp, info->upper_dev);
2062 } else if (is_hsr_master(info->upper_dev)) {
2063 if (info->linking) {
2064 err = dsa_port_hsr_join(dp, info->upper_dev);
2065 if (err == -EOPNOTSUPP) {
2066 NL_SET_ERR_MSG_MOD(info->info.extack,
2067 "Offloading not supported");
2070 err = notifier_from_errno(err);
2072 dsa_port_hsr_leave(dp, info->upper_dev);
2081 dsa_slave_lag_changeupper(struct net_device *dev,
2082 struct netdev_notifier_changeupper_info *info)
2084 struct net_device *lower;
2085 struct list_head *iter;
2086 int err = NOTIFY_DONE;
2087 struct dsa_port *dp;
2089 netdev_for_each_lower_dev(dev, lower, iter) {
2090 if (!dsa_slave_dev_check(lower))
2093 dp = dsa_slave_to_port(lower);
2098 err = dsa_slave_changeupper(lower, info);
2099 if (notifier_to_errno(err))
2107 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
2108 struct netdev_notifier_changeupper_info *info)
2110 struct netlink_ext_ack *ext_ack;
2111 struct net_device *slave;
2112 struct dsa_port *dp;
2114 ext_ack = netdev_notifier_info_to_extack(&info->info);
2116 if (!is_vlan_dev(dev))
2119 slave = vlan_dev_real_dev(dev);
2120 if (!dsa_slave_dev_check(slave))
2123 dp = dsa_slave_to_port(slave);
2124 if (!dp->bridge_dev)
2127 /* Deny enslaving a VLAN device into a VLAN-aware bridge */
2128 if (br_vlan_enabled(dp->bridge_dev) &&
2129 netif_is_bridge_master(info->upper_dev) && info->linking) {
2130 NL_SET_ERR_MSG_MOD(ext_ack,
2131 "Cannot enslave VLAN device into VLAN aware bridge");
2132 return notifier_from_errno(-EINVAL);
2139 dsa_slave_check_8021q_upper(struct net_device *dev,
2140 struct netdev_notifier_changeupper_info *info)
2142 struct dsa_port *dp = dsa_slave_to_port(dev);
2143 struct net_device *br = dp->bridge_dev;
2144 struct bridge_vlan_info br_info;
2145 struct netlink_ext_ack *extack;
2146 int err = NOTIFY_DONE;
2149 if (!br || !br_vlan_enabled(br))
2152 extack = netdev_notifier_info_to_extack(&info->info);
2153 vid = vlan_dev_vlan_id(info->upper_dev);
2155 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
2156 * device, respectively the VID is not found, returning
2157 * 0 means success, which is a failure for us here.
2159 err = br_vlan_get_info(br, vid, &br_info);
2161 NL_SET_ERR_MSG_MOD(extack,
2162 "This VLAN is already configured by the bridge");
2163 return notifier_from_errno(-EBUSY);
2169 static int dsa_slave_netdevice_event(struct notifier_block *nb,
2170 unsigned long event, void *ptr)
2172 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2175 case NETDEV_PRECHANGEUPPER: {
2176 struct netdev_notifier_changeupper_info *info = ptr;
2177 struct dsa_switch *ds;
2178 struct dsa_port *dp;
2181 if (!dsa_slave_dev_check(dev))
2182 return dsa_prevent_bridging_8021q_upper(dev, ptr);
2184 dp = dsa_slave_to_port(dev);
2187 if (ds->ops->port_prechangeupper) {
2188 err = ds->ops->port_prechangeupper(ds, dp->index, info);
2190 return notifier_from_errno(err);
2193 if (is_vlan_dev(info->upper_dev))
2194 return dsa_slave_check_8021q_upper(dev, ptr);
2197 case NETDEV_CHANGEUPPER:
2198 if (dsa_slave_dev_check(dev))
2199 return dsa_slave_changeupper(dev, ptr);
2201 if (netif_is_lag_master(dev))
2202 return dsa_slave_lag_changeupper(dev, ptr);
2205 case NETDEV_CHANGELOWERSTATE: {
2206 struct netdev_notifier_changelowerstate_info *info = ptr;
2207 struct dsa_port *dp;
2210 if (!dsa_slave_dev_check(dev))
2213 dp = dsa_slave_to_port(dev);
2215 err = dsa_port_lag_change(dp, info->lower_state_info);
2216 return notifier_from_errno(err);
2218 case NETDEV_GOING_DOWN: {
2219 struct dsa_port *dp, *cpu_dp;
2220 struct dsa_switch_tree *dst;
2221 LIST_HEAD(close_list);
2223 if (!netdev_uses_dsa(dev))
2226 cpu_dp = dev->dsa_ptr;
2227 dst = cpu_dp->ds->dst;
2229 list_for_each_entry(dp, &dst->ports, list) {
2230 if (!dsa_is_user_port(dp->ds, dp->index))
2233 list_add(&dp->slave->close_list, &close_list);
2236 dev_close_many(&close_list, true);
2248 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
2250 struct dsa_switch *ds = switchdev_work->ds;
2251 struct switchdev_notifier_fdb_info info;
2252 struct dsa_port *dp;
2254 if (!dsa_is_user_port(ds, switchdev_work->port))
2257 info.addr = switchdev_work->addr;
2258 info.vid = switchdev_work->vid;
2259 info.offloaded = true;
2260 dp = dsa_to_port(ds, switchdev_work->port);
2261 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
2262 dp->slave, &info.info, NULL);
2265 static void dsa_slave_switchdev_event_work(struct work_struct *work)
2267 struct dsa_switchdev_event_work *switchdev_work =
2268 container_of(work, struct dsa_switchdev_event_work, work);
2269 struct dsa_switch *ds = switchdev_work->ds;
2270 struct dsa_port *dp;
2273 dp = dsa_to_port(ds, switchdev_work->port);
2276 switch (switchdev_work->event) {
2277 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2278 err = dsa_port_fdb_add(dp, switchdev_work->addr,
2279 switchdev_work->vid);
2282 "port %d failed to add %pM vid %d to fdb: %d\n",
2283 dp->index, switchdev_work->addr,
2284 switchdev_work->vid, err);
2287 dsa_fdb_offload_notify(switchdev_work);
2290 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2291 err = dsa_port_fdb_del(dp, switchdev_work->addr,
2292 switchdev_work->vid);
2295 "port %d failed to delete %pM vid %d from fdb: %d\n",
2296 dp->index, switchdev_work->addr,
2297 switchdev_work->vid, err);
2304 kfree(switchdev_work);
2305 if (dsa_is_user_port(ds, dp->index))
2309 static int dsa_lower_dev_walk(struct net_device *lower_dev,
2310 struct netdev_nested_priv *priv)
2312 if (dsa_slave_dev_check(lower_dev)) {
2313 priv->data = (void *)netdev_priv(lower_dev);
2320 static struct dsa_slave_priv *dsa_slave_dev_lower_find(struct net_device *dev)
2322 struct netdev_nested_priv priv = {
2326 netdev_walk_all_lower_dev_rcu(dev, dsa_lower_dev_walk, &priv);
2328 return (struct dsa_slave_priv *)priv.data;
2331 /* Called under rcu_read_lock() */
2332 static int dsa_slave_switchdev_event(struct notifier_block *unused,
2333 unsigned long event, void *ptr)
2335 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2336 const struct switchdev_notifier_fdb_info *fdb_info;
2337 struct dsa_switchdev_event_work *switchdev_work;
2338 struct dsa_port *dp;
2342 case SWITCHDEV_PORT_ATTR_SET:
2343 err = switchdev_handle_port_attr_set(dev, ptr,
2344 dsa_slave_dev_check,
2345 dsa_slave_port_attr_set);
2346 return notifier_from_errno(err);
2347 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2348 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2351 if (dsa_slave_dev_check(dev)) {
2352 if (!fdb_info->added_by_user || fdb_info->is_local)
2355 dp = dsa_slave_to_port(dev);
2357 /* Snoop addresses learnt on foreign interfaces
2358 * bridged with us, for switches that don't
2359 * automatically learn SA from CPU-injected traffic
2361 struct net_device *br_dev;
2362 struct dsa_slave_priv *p;
2364 br_dev = netdev_master_upper_dev_get_rcu(dev);
2368 if (!netif_is_bridge_master(br_dev))
2371 p = dsa_slave_dev_lower_find(br_dev);
2377 if (!dp->ds->assisted_learning_on_cpu_port)
2380 /* When the bridge learns an address on an offloaded
2381 * LAG we don't want to send traffic to the CPU, the
2382 * other ports bridged with the LAG should be able to
2383 * autonomously forward towards it.
2385 if (dsa_tree_offloads_bridge_port(dp->ds->dst, dev))
2389 if (!dp->ds->ops->port_fdb_add || !dp->ds->ops->port_fdb_del)
2392 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2393 if (!switchdev_work)
2396 INIT_WORK(&switchdev_work->work,
2397 dsa_slave_switchdev_event_work);
2398 switchdev_work->ds = dp->ds;
2399 switchdev_work->port = dp->index;
2400 switchdev_work->event = event;
2402 ether_addr_copy(switchdev_work->addr,
2404 switchdev_work->vid = fdb_info->vid;
2406 /* Hold a reference on the slave for dsa_fdb_offload_notify */
2407 if (dsa_is_user_port(dp->ds, dp->index))
2409 dsa_schedule_work(&switchdev_work->work);
2418 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
2419 unsigned long event, void *ptr)
2421 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2425 case SWITCHDEV_PORT_OBJ_ADD:
2426 err = switchdev_handle_port_obj_add(dev, ptr,
2427 dsa_slave_dev_check,
2428 dsa_slave_port_obj_add);
2429 return notifier_from_errno(err);
2430 case SWITCHDEV_PORT_OBJ_DEL:
2431 err = switchdev_handle_port_obj_del(dev, ptr,
2432 dsa_slave_dev_check,
2433 dsa_slave_port_obj_del);
2434 return notifier_from_errno(err);
2435 case SWITCHDEV_PORT_ATTR_SET:
2436 err = switchdev_handle_port_attr_set(dev, ptr,
2437 dsa_slave_dev_check,
2438 dsa_slave_port_attr_set);
2439 return notifier_from_errno(err);
2445 static struct notifier_block dsa_slave_nb __read_mostly = {
2446 .notifier_call = dsa_slave_netdevice_event,
2449 struct notifier_block dsa_slave_switchdev_notifier = {
2450 .notifier_call = dsa_slave_switchdev_event,
2453 struct notifier_block dsa_slave_switchdev_blocking_notifier = {
2454 .notifier_call = dsa_slave_switchdev_blocking_event,
2457 int dsa_slave_register_notifier(void)
2459 struct notifier_block *nb;
2462 err = register_netdevice_notifier(&dsa_slave_nb);
2466 err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
2468 goto err_switchdev_nb;
2470 nb = &dsa_slave_switchdev_blocking_notifier;
2471 err = register_switchdev_blocking_notifier(nb);
2473 goto err_switchdev_blocking_nb;
2477 err_switchdev_blocking_nb:
2478 unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2480 unregister_netdevice_notifier(&dsa_slave_nb);
2484 void dsa_slave_unregister_notifier(void)
2486 struct notifier_block *nb;
2489 nb = &dsa_slave_switchdev_blocking_notifier;
2490 err = unregister_switchdev_blocking_notifier(nb);
2492 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
2494 err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2496 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
2498 err = unregister_netdevice_notifier(&dsa_slave_nb);
2500 pr_err("DSA: failed to unregister slave notifier (%d)\n", err);