1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/dsa/slave.c - Slave device handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/tc_act/tc_mirred.h>
19 #include <linux/if_bridge.h>
20 #include <linux/if_hsr.h>
21 #include <linux/netpoll.h>
22 #include <linux/ptp_classify.h>
26 /* slave mii_bus handling ***************************************************/
27 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
29 struct dsa_switch *ds = bus->priv;
31 if (ds->phys_mii_mask & (1 << addr))
32 return ds->ops->phy_read(ds, addr, reg);
37 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
39 struct dsa_switch *ds = bus->priv;
41 if (ds->phys_mii_mask & (1 << addr))
42 return ds->ops->phy_write(ds, addr, reg, val);
47 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
49 ds->slave_mii_bus->priv = (void *)ds;
50 ds->slave_mii_bus->name = "dsa slave smi";
51 ds->slave_mii_bus->read = dsa_slave_phy_read;
52 ds->slave_mii_bus->write = dsa_slave_phy_write;
53 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
54 ds->dst->index, ds->index);
55 ds->slave_mii_bus->parent = ds->dev;
56 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
60 /* slave device handling ****************************************************/
61 static int dsa_slave_get_iflink(const struct net_device *dev)
63 return dsa_slave_to_master(dev)->ifindex;
66 static int dsa_slave_open(struct net_device *dev)
68 struct net_device *master = dsa_slave_to_master(dev);
69 struct dsa_port *dp = dsa_slave_to_port(dev);
72 err = dev_open(master, NULL);
74 netdev_err(dev, "failed to open master %s\n", master->name);
78 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
79 err = dev_uc_add(master, dev->dev_addr);
84 if (dev->flags & IFF_ALLMULTI) {
85 err = dev_set_allmulti(master, 1);
89 if (dev->flags & IFF_PROMISC) {
90 err = dev_set_promiscuity(master, 1);
95 err = dsa_port_enable_rt(dp, dev->phydev);
102 if (dev->flags & IFF_PROMISC)
103 dev_set_promiscuity(master, -1);
105 if (dev->flags & IFF_ALLMULTI)
106 dev_set_allmulti(master, -1);
108 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
109 dev_uc_del(master, dev->dev_addr);
114 static int dsa_slave_close(struct net_device *dev)
116 struct net_device *master = dsa_slave_to_master(dev);
117 struct dsa_port *dp = dsa_slave_to_port(dev);
119 dsa_port_disable_rt(dp);
121 dev_mc_unsync(master, dev);
122 dev_uc_unsync(master, dev);
123 if (dev->flags & IFF_ALLMULTI)
124 dev_set_allmulti(master, -1);
125 if (dev->flags & IFF_PROMISC)
126 dev_set_promiscuity(master, -1);
128 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
129 dev_uc_del(master, dev->dev_addr);
134 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
136 struct net_device *master = dsa_slave_to_master(dev);
137 if (dev->flags & IFF_UP) {
138 if (change & IFF_ALLMULTI)
139 dev_set_allmulti(master,
140 dev->flags & IFF_ALLMULTI ? 1 : -1);
141 if (change & IFF_PROMISC)
142 dev_set_promiscuity(master,
143 dev->flags & IFF_PROMISC ? 1 : -1);
147 static void dsa_slave_set_rx_mode(struct net_device *dev)
149 struct net_device *master = dsa_slave_to_master(dev);
151 dev_mc_sync(master, dev);
152 dev_uc_sync(master, dev);
155 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
157 struct net_device *master = dsa_slave_to_master(dev);
158 struct sockaddr *addr = a;
161 if (!is_valid_ether_addr(addr->sa_data))
162 return -EADDRNOTAVAIL;
164 if (!(dev->flags & IFF_UP))
167 if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
168 err = dev_uc_add(master, addr->sa_data);
173 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
174 dev_uc_del(master, dev->dev_addr);
177 ether_addr_copy(dev->dev_addr, addr->sa_data);
182 struct dsa_slave_dump_ctx {
183 struct net_device *dev;
185 struct netlink_callback *cb;
190 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
191 bool is_static, void *data)
193 struct dsa_slave_dump_ctx *dump = data;
194 u32 portid = NETLINK_CB(dump->cb->skb).portid;
195 u32 seq = dump->cb->nlh->nlmsg_seq;
196 struct nlmsghdr *nlh;
199 if (dump->idx < dump->cb->args[2])
202 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
203 sizeof(*ndm), NLM_F_MULTI);
207 ndm = nlmsg_data(nlh);
208 ndm->ndm_family = AF_BRIDGE;
211 ndm->ndm_flags = NTF_SELF;
213 ndm->ndm_ifindex = dump->dev->ifindex;
214 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
216 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
217 goto nla_put_failure;
219 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
220 goto nla_put_failure;
222 nlmsg_end(dump->skb, nlh);
229 nlmsg_cancel(dump->skb, nlh);
234 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
235 struct net_device *dev, struct net_device *filter_dev,
238 struct dsa_port *dp = dsa_slave_to_port(dev);
239 struct dsa_slave_dump_ctx dump = {
247 err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
253 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
255 struct dsa_slave_priv *p = netdev_priv(dev);
256 struct dsa_switch *ds = p->dp->ds;
257 int port = p->dp->index;
259 /* Pass through to switch driver if it supports timestamping */
262 if (ds->ops->port_hwtstamp_get)
263 return ds->ops->port_hwtstamp_get(ds, port, ifr);
266 if (ds->ops->port_hwtstamp_set)
267 return ds->ops->port_hwtstamp_set(ds, port, ifr);
271 return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
274 static int dsa_slave_port_attr_set(struct net_device *dev,
275 const struct switchdev_attr *attr,
276 struct netlink_ext_ack *extack)
278 struct dsa_port *dp = dsa_slave_to_port(dev);
281 if (!dsa_port_offloads_netdev(dp, attr->orig_dev))
285 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
286 ret = dsa_port_set_state(dp, attr->u.stp_state);
288 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
289 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
292 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
293 ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
295 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
296 ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
299 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
300 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
302 case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
303 ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, extack);
313 /* Must be called under rcu_read_lock() */
315 dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
316 const struct switchdev_obj_port_vlan *vlan)
318 struct net_device *upper_dev;
319 struct list_head *iter;
321 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
324 if (!is_vlan_dev(upper_dev))
327 vid = vlan_dev_vlan_id(upper_dev);
328 if (vid == vlan->vid)
335 static int dsa_slave_vlan_add(struct net_device *dev,
336 const struct switchdev_obj *obj,
337 struct netlink_ext_ack *extack)
339 struct net_device *master = dsa_slave_to_master(dev);
340 struct dsa_port *dp = dsa_slave_to_port(dev);
341 struct switchdev_obj_port_vlan vlan;
344 if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
347 if (dsa_port_skip_vlan_configuration(dp)) {
348 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
352 vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
354 /* Deny adding a bridge VLAN when there is already an 802.1Q upper with
357 if (br_vlan_enabled(dp->bridge_dev)) {
359 err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan);
362 NL_SET_ERR_MSG_MOD(extack,
363 "Port already has a VLAN upper with this VID");
368 err = dsa_port_vlan_add(dp, &vlan, extack);
372 /* We need the dedicated CPU port to be a member of the VLAN as well.
373 * Even though drivers often handle CPU membership in special ways,
374 * it doesn't make sense to program a PVID, so clear this flag.
376 vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
378 err = dsa_port_vlan_add(dp->cpu_dp, &vlan, extack);
382 return vlan_vid_add(master, htons(ETH_P_8021Q), vlan.vid);
385 static int dsa_slave_port_obj_add(struct net_device *dev,
386 const struct switchdev_obj *obj,
387 struct netlink_ext_ack *extack)
389 struct dsa_port *dp = dsa_slave_to_port(dev);
393 case SWITCHDEV_OBJ_ID_PORT_MDB:
394 if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
396 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
398 case SWITCHDEV_OBJ_ID_HOST_MDB:
399 /* DSA can directly translate this to a normal MDB add,
400 * but on the CPU port.
402 err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
404 case SWITCHDEV_OBJ_ID_PORT_VLAN:
405 err = dsa_slave_vlan_add(dev, obj, extack);
415 static int dsa_slave_vlan_del(struct net_device *dev,
416 const struct switchdev_obj *obj)
418 struct net_device *master = dsa_slave_to_master(dev);
419 struct dsa_port *dp = dsa_slave_to_port(dev);
420 struct switchdev_obj_port_vlan *vlan;
423 if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
426 if (dsa_port_skip_vlan_configuration(dp))
429 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
431 /* Do not deprogram the CPU port as it may be shared with other user
432 * ports which can be members of this VLAN as well.
434 err = dsa_port_vlan_del(dp, vlan);
438 vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid);
443 static int dsa_slave_port_obj_del(struct net_device *dev,
444 const struct switchdev_obj *obj)
446 struct dsa_port *dp = dsa_slave_to_port(dev);
450 case SWITCHDEV_OBJ_ID_PORT_MDB:
451 if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
453 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
455 case SWITCHDEV_OBJ_ID_HOST_MDB:
456 /* DSA can directly translate this to a normal MDB add,
457 * but on the CPU port.
459 err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
461 case SWITCHDEV_OBJ_ID_PORT_VLAN:
462 err = dsa_slave_vlan_del(dev, obj);
472 static int dsa_slave_get_port_parent_id(struct net_device *dev,
473 struct netdev_phys_item_id *ppid)
475 struct dsa_port *dp = dsa_slave_to_port(dev);
476 struct dsa_switch *ds = dp->ds;
477 struct dsa_switch_tree *dst = ds->dst;
479 /* For non-legacy ports, devlink is used and it takes
480 * care of the name generation. This ndo implementation
481 * should be removed with legacy support.
486 ppid->id_len = sizeof(dst->index);
487 memcpy(&ppid->id, &dst->index, ppid->id_len);
492 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
495 #ifdef CONFIG_NET_POLL_CONTROLLER
496 struct dsa_slave_priv *p = netdev_priv(dev);
498 return netpoll_send_skb(p->netpoll, skb);
505 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
508 struct dsa_switch *ds = p->dp->ds;
509 struct sk_buff *clone;
512 type = ptp_classify_raw(skb);
513 if (type == PTP_CLASS_NONE)
516 if (!ds->ops->port_txtstamp)
519 clone = skb_clone_sk(skb);
523 if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type)) {
524 DSA_SKB_CB(skb)->clone = clone;
531 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
533 /* SKB for netpoll still need to be mangled with the protocol-specific
534 * tag to be successfully transmitted
536 if (unlikely(netpoll_tx_running(dev)))
537 return dsa_slave_netpoll_send_skb(dev, skb);
539 /* Queue the SKB for transmission on the parent interface, but
540 * do not modify its EtherType
542 skb->dev = dsa_slave_to_master(dev);
547 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
549 static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
551 int needed_headroom = dev->needed_headroom;
552 int needed_tailroom = dev->needed_tailroom;
554 /* For tail taggers, we need to pad short frames ourselves, to ensure
555 * that the tail tag does not fail at its role of being at the end of
556 * the packet, once the master interface pads the frame. Account for
557 * that pad length here, and pad later.
559 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
560 needed_tailroom += ETH_ZLEN - skb->len;
561 /* skb_headroom() returns unsigned int... */
562 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
563 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
565 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
566 /* No reallocation needed, yay! */
569 return pskb_expand_head(skb, needed_headroom, needed_tailroom,
573 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
575 struct dsa_slave_priv *p = netdev_priv(dev);
576 struct sk_buff *nskb;
578 dev_sw_netstats_tx_add(dev, 1, skb->len);
580 DSA_SKB_CB(skb)->clone = NULL;
582 /* Identify PTP protocol packets, clone them, and pass them to the
585 dsa_skb_tx_timestamp(p, skb);
587 if (dsa_realloc_skb(skb, dev)) {
588 dev_kfree_skb_any(skb);
592 /* needed_tailroom should still be 'warm' in the cache line from
593 * dsa_realloc_skb(), which has also ensured that padding is safe.
595 if (dev->needed_tailroom)
598 /* Transmit function may have to reallocate the original SKB,
599 * in which case it must have freed it. Only free it here on error.
601 nskb = p->xmit(skb, dev);
607 return dsa_enqueue_skb(nskb, dev);
610 /* ethtool operations *******************************************************/
612 static void dsa_slave_get_drvinfo(struct net_device *dev,
613 struct ethtool_drvinfo *drvinfo)
615 strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
616 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
617 strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
620 static int dsa_slave_get_regs_len(struct net_device *dev)
622 struct dsa_port *dp = dsa_slave_to_port(dev);
623 struct dsa_switch *ds = dp->ds;
625 if (ds->ops->get_regs_len)
626 return ds->ops->get_regs_len(ds, dp->index);
632 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
634 struct dsa_port *dp = dsa_slave_to_port(dev);
635 struct dsa_switch *ds = dp->ds;
637 if (ds->ops->get_regs)
638 ds->ops->get_regs(ds, dp->index, regs, _p);
641 static int dsa_slave_nway_reset(struct net_device *dev)
643 struct dsa_port *dp = dsa_slave_to_port(dev);
645 return phylink_ethtool_nway_reset(dp->pl);
648 static int dsa_slave_get_eeprom_len(struct net_device *dev)
650 struct dsa_port *dp = dsa_slave_to_port(dev);
651 struct dsa_switch *ds = dp->ds;
653 if (ds->cd && ds->cd->eeprom_len)
654 return ds->cd->eeprom_len;
656 if (ds->ops->get_eeprom_len)
657 return ds->ops->get_eeprom_len(ds);
662 static int dsa_slave_get_eeprom(struct net_device *dev,
663 struct ethtool_eeprom *eeprom, u8 *data)
665 struct dsa_port *dp = dsa_slave_to_port(dev);
666 struct dsa_switch *ds = dp->ds;
668 if (ds->ops->get_eeprom)
669 return ds->ops->get_eeprom(ds, eeprom, data);
674 static int dsa_slave_set_eeprom(struct net_device *dev,
675 struct ethtool_eeprom *eeprom, u8 *data)
677 struct dsa_port *dp = dsa_slave_to_port(dev);
678 struct dsa_switch *ds = dp->ds;
680 if (ds->ops->set_eeprom)
681 return ds->ops->set_eeprom(ds, eeprom, data);
686 static void dsa_slave_get_strings(struct net_device *dev,
687 uint32_t stringset, uint8_t *data)
689 struct dsa_port *dp = dsa_slave_to_port(dev);
690 struct dsa_switch *ds = dp->ds;
692 if (stringset == ETH_SS_STATS) {
693 int len = ETH_GSTRING_LEN;
695 strncpy(data, "tx_packets", len);
696 strncpy(data + len, "tx_bytes", len);
697 strncpy(data + 2 * len, "rx_packets", len);
698 strncpy(data + 3 * len, "rx_bytes", len);
699 if (ds->ops->get_strings)
700 ds->ops->get_strings(ds, dp->index, stringset,
705 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
706 struct ethtool_stats *stats,
709 struct dsa_port *dp = dsa_slave_to_port(dev);
710 struct dsa_switch *ds = dp->ds;
711 struct pcpu_sw_netstats *s;
715 for_each_possible_cpu(i) {
716 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
718 s = per_cpu_ptr(dev->tstats, i);
720 start = u64_stats_fetch_begin_irq(&s->syncp);
721 tx_packets = s->tx_packets;
722 tx_bytes = s->tx_bytes;
723 rx_packets = s->rx_packets;
724 rx_bytes = s->rx_bytes;
725 } while (u64_stats_fetch_retry_irq(&s->syncp, start));
726 data[0] += tx_packets;
728 data[2] += rx_packets;
731 if (ds->ops->get_ethtool_stats)
732 ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
735 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
737 struct dsa_port *dp = dsa_slave_to_port(dev);
738 struct dsa_switch *ds = dp->ds;
740 if (sset == ETH_SS_STATS) {
744 if (ds->ops->get_sset_count)
745 count += ds->ops->get_sset_count(ds, dp->index, sset);
753 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
755 struct dsa_port *dp = dsa_slave_to_port(dev);
756 struct dsa_switch *ds = dp->ds;
758 phylink_ethtool_get_wol(dp->pl, w);
760 if (ds->ops->get_wol)
761 ds->ops->get_wol(ds, dp->index, w);
764 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
766 struct dsa_port *dp = dsa_slave_to_port(dev);
767 struct dsa_switch *ds = dp->ds;
768 int ret = -EOPNOTSUPP;
770 phylink_ethtool_set_wol(dp->pl, w);
772 if (ds->ops->set_wol)
773 ret = ds->ops->set_wol(ds, dp->index, w);
778 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
780 struct dsa_port *dp = dsa_slave_to_port(dev);
781 struct dsa_switch *ds = dp->ds;
784 /* Port's PHY and MAC both need to be EEE capable */
785 if (!dev->phydev || !dp->pl)
788 if (!ds->ops->set_mac_eee)
791 ret = ds->ops->set_mac_eee(ds, dp->index, e);
795 return phylink_ethtool_set_eee(dp->pl, e);
798 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
800 struct dsa_port *dp = dsa_slave_to_port(dev);
801 struct dsa_switch *ds = dp->ds;
804 /* Port's PHY and MAC both need to be EEE capable */
805 if (!dev->phydev || !dp->pl)
808 if (!ds->ops->get_mac_eee)
811 ret = ds->ops->get_mac_eee(ds, dp->index, e);
815 return phylink_ethtool_get_eee(dp->pl, e);
818 static int dsa_slave_get_link_ksettings(struct net_device *dev,
819 struct ethtool_link_ksettings *cmd)
821 struct dsa_port *dp = dsa_slave_to_port(dev);
823 return phylink_ethtool_ksettings_get(dp->pl, cmd);
826 static int dsa_slave_set_link_ksettings(struct net_device *dev,
827 const struct ethtool_link_ksettings *cmd)
829 struct dsa_port *dp = dsa_slave_to_port(dev);
831 return phylink_ethtool_ksettings_set(dp->pl, cmd);
834 static void dsa_slave_get_pauseparam(struct net_device *dev,
835 struct ethtool_pauseparam *pause)
837 struct dsa_port *dp = dsa_slave_to_port(dev);
839 phylink_ethtool_get_pauseparam(dp->pl, pause);
842 static int dsa_slave_set_pauseparam(struct net_device *dev,
843 struct ethtool_pauseparam *pause)
845 struct dsa_port *dp = dsa_slave_to_port(dev);
847 return phylink_ethtool_set_pauseparam(dp->pl, pause);
850 #ifdef CONFIG_NET_POLL_CONTROLLER
851 static int dsa_slave_netpoll_setup(struct net_device *dev,
852 struct netpoll_info *ni)
854 struct net_device *master = dsa_slave_to_master(dev);
855 struct dsa_slave_priv *p = netdev_priv(dev);
856 struct netpoll *netpoll;
859 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
863 err = __netpoll_setup(netpoll, master);
869 p->netpoll = netpoll;
874 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
876 struct dsa_slave_priv *p = netdev_priv(dev);
877 struct netpoll *netpoll = p->netpoll;
884 __netpoll_free(netpoll);
887 static void dsa_slave_poll_controller(struct net_device *dev)
892 static int dsa_slave_get_phys_port_name(struct net_device *dev,
893 char *name, size_t len)
895 struct dsa_port *dp = dsa_slave_to_port(dev);
897 /* For non-legacy ports, devlink is used and it takes
898 * care of the name generation. This ndo implementation
899 * should be removed with legacy support.
904 if (snprintf(name, len, "p%d", dp->index) >= len)
910 static struct dsa_mall_tc_entry *
911 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
913 struct dsa_slave_priv *p = netdev_priv(dev);
914 struct dsa_mall_tc_entry *mall_tc_entry;
916 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
917 if (mall_tc_entry->cookie == cookie)
918 return mall_tc_entry;
924 dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
925 struct tc_cls_matchall_offload *cls,
928 struct dsa_port *dp = dsa_slave_to_port(dev);
929 struct dsa_slave_priv *p = netdev_priv(dev);
930 struct dsa_mall_mirror_tc_entry *mirror;
931 struct dsa_mall_tc_entry *mall_tc_entry;
932 struct dsa_switch *ds = dp->ds;
933 struct flow_action_entry *act;
934 struct dsa_port *to_dp;
937 if (!ds->ops->port_mirror_add)
940 if (!flow_action_basic_hw_stats_check(&cls->rule->action,
944 act = &cls->rule->action.entries[0];
949 if (!dsa_slave_dev_check(act->dev))
952 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
956 mall_tc_entry->cookie = cls->cookie;
957 mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
958 mirror = &mall_tc_entry->mirror;
960 to_dp = dsa_slave_to_port(act->dev);
962 mirror->to_local_port = to_dp->index;
963 mirror->ingress = ingress;
965 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
967 kfree(mall_tc_entry);
971 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
977 dsa_slave_add_cls_matchall_police(struct net_device *dev,
978 struct tc_cls_matchall_offload *cls,
981 struct netlink_ext_ack *extack = cls->common.extack;
982 struct dsa_port *dp = dsa_slave_to_port(dev);
983 struct dsa_slave_priv *p = netdev_priv(dev);
984 struct dsa_mall_policer_tc_entry *policer;
985 struct dsa_mall_tc_entry *mall_tc_entry;
986 struct dsa_switch *ds = dp->ds;
987 struct flow_action_entry *act;
990 if (!ds->ops->port_policer_add) {
991 NL_SET_ERR_MSG_MOD(extack,
992 "Policing offload not implemented");
997 NL_SET_ERR_MSG_MOD(extack,
998 "Only supported on ingress qdisc");
1002 if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1003 cls->common.extack))
1006 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1007 if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1008 NL_SET_ERR_MSG_MOD(extack,
1009 "Only one port policer allowed");
1014 act = &cls->rule->action.entries[0];
1016 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1020 mall_tc_entry->cookie = cls->cookie;
1021 mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1022 policer = &mall_tc_entry->policer;
1023 policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1024 policer->burst = act->police.burst;
1026 err = ds->ops->port_policer_add(ds, dp->index, policer);
1028 kfree(mall_tc_entry);
1032 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1037 static int dsa_slave_add_cls_matchall(struct net_device *dev,
1038 struct tc_cls_matchall_offload *cls,
1041 int err = -EOPNOTSUPP;
1043 if (cls->common.protocol == htons(ETH_P_ALL) &&
1044 flow_offload_has_one_action(&cls->rule->action) &&
1045 cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1046 err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
1047 else if (flow_offload_has_one_action(&cls->rule->action) &&
1048 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1049 err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
1054 static void dsa_slave_del_cls_matchall(struct net_device *dev,
1055 struct tc_cls_matchall_offload *cls)
1057 struct dsa_port *dp = dsa_slave_to_port(dev);
1058 struct dsa_mall_tc_entry *mall_tc_entry;
1059 struct dsa_switch *ds = dp->ds;
1061 mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
1065 list_del(&mall_tc_entry->list);
1067 switch (mall_tc_entry->type) {
1068 case DSA_PORT_MALL_MIRROR:
1069 if (ds->ops->port_mirror_del)
1070 ds->ops->port_mirror_del(ds, dp->index,
1071 &mall_tc_entry->mirror);
1073 case DSA_PORT_MALL_POLICER:
1074 if (ds->ops->port_policer_del)
1075 ds->ops->port_policer_del(ds, dp->index);
1081 kfree(mall_tc_entry);
1084 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
1085 struct tc_cls_matchall_offload *cls,
1088 if (cls->common.chain_index)
1091 switch (cls->command) {
1092 case TC_CLSMATCHALL_REPLACE:
1093 return dsa_slave_add_cls_matchall(dev, cls, ingress);
1094 case TC_CLSMATCHALL_DESTROY:
1095 dsa_slave_del_cls_matchall(dev, cls);
1102 static int dsa_slave_add_cls_flower(struct net_device *dev,
1103 struct flow_cls_offload *cls,
1106 struct dsa_port *dp = dsa_slave_to_port(dev);
1107 struct dsa_switch *ds = dp->ds;
1108 int port = dp->index;
1110 if (!ds->ops->cls_flower_add)
1113 return ds->ops->cls_flower_add(ds, port, cls, ingress);
1116 static int dsa_slave_del_cls_flower(struct net_device *dev,
1117 struct flow_cls_offload *cls,
1120 struct dsa_port *dp = dsa_slave_to_port(dev);
1121 struct dsa_switch *ds = dp->ds;
1122 int port = dp->index;
1124 if (!ds->ops->cls_flower_del)
1127 return ds->ops->cls_flower_del(ds, port, cls, ingress);
1130 static int dsa_slave_stats_cls_flower(struct net_device *dev,
1131 struct flow_cls_offload *cls,
1134 struct dsa_port *dp = dsa_slave_to_port(dev);
1135 struct dsa_switch *ds = dp->ds;
1136 int port = dp->index;
1138 if (!ds->ops->cls_flower_stats)
1141 return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1144 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1145 struct flow_cls_offload *cls,
1148 switch (cls->command) {
1149 case FLOW_CLS_REPLACE:
1150 return dsa_slave_add_cls_flower(dev, cls, ingress);
1151 case FLOW_CLS_DESTROY:
1152 return dsa_slave_del_cls_flower(dev, cls, ingress);
1153 case FLOW_CLS_STATS:
1154 return dsa_slave_stats_cls_flower(dev, cls, ingress);
1160 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1161 void *cb_priv, bool ingress)
1163 struct net_device *dev = cb_priv;
1165 if (!tc_can_offload(dev))
1169 case TC_SETUP_CLSMATCHALL:
1170 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1171 case TC_SETUP_CLSFLOWER:
1172 return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1178 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1179 void *type_data, void *cb_priv)
1181 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1184 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1185 void *type_data, void *cb_priv)
1187 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1190 static LIST_HEAD(dsa_slave_block_cb_list);
1192 static int dsa_slave_setup_tc_block(struct net_device *dev,
1193 struct flow_block_offload *f)
1195 struct flow_block_cb *block_cb;
1196 flow_setup_cb_t *cb;
1198 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1199 cb = dsa_slave_setup_tc_block_cb_ig;
1200 else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1201 cb = dsa_slave_setup_tc_block_cb_eg;
1205 f->driver_block_list = &dsa_slave_block_cb_list;
1207 switch (f->command) {
1208 case FLOW_BLOCK_BIND:
1209 if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1212 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1213 if (IS_ERR(block_cb))
1214 return PTR_ERR(block_cb);
1216 flow_block_cb_add(block_cb, f);
1217 list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1219 case FLOW_BLOCK_UNBIND:
1220 block_cb = flow_block_cb_lookup(f->block, cb, dev);
1224 flow_block_cb_remove(block_cb, f);
1225 list_del(&block_cb->driver_list);
1232 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1235 struct dsa_port *dp = dsa_slave_to_port(dev);
1236 struct dsa_switch *ds = dp->ds;
1238 if (type == TC_SETUP_BLOCK)
1239 return dsa_slave_setup_tc_block(dev, type_data);
1241 if (!ds->ops->port_setup_tc)
1244 return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1247 static int dsa_slave_get_rxnfc(struct net_device *dev,
1248 struct ethtool_rxnfc *nfc, u32 *rule_locs)
1250 struct dsa_port *dp = dsa_slave_to_port(dev);
1251 struct dsa_switch *ds = dp->ds;
1253 if (!ds->ops->get_rxnfc)
1256 return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1259 static int dsa_slave_set_rxnfc(struct net_device *dev,
1260 struct ethtool_rxnfc *nfc)
1262 struct dsa_port *dp = dsa_slave_to_port(dev);
1263 struct dsa_switch *ds = dp->ds;
1265 if (!ds->ops->set_rxnfc)
1268 return ds->ops->set_rxnfc(ds, dp->index, nfc);
1271 static int dsa_slave_get_ts_info(struct net_device *dev,
1272 struct ethtool_ts_info *ts)
1274 struct dsa_slave_priv *p = netdev_priv(dev);
1275 struct dsa_switch *ds = p->dp->ds;
1277 if (!ds->ops->get_ts_info)
1280 return ds->ops->get_ts_info(ds, p->dp->index, ts);
1283 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1286 struct net_device *master = dsa_slave_to_master(dev);
1287 struct dsa_port *dp = dsa_slave_to_port(dev);
1288 struct switchdev_obj_port_vlan vlan = {
1289 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1291 /* This API only allows programming tagged, non-PVID VIDs */
1294 struct netlink_ext_ack extack = {0};
1298 ret = dsa_port_vlan_add(dp, &vlan, &extack);
1301 netdev_err(dev, "%s\n", extack._msg);
1305 /* And CPU port... */
1306 ret = dsa_port_vlan_add(dp->cpu_dp, &vlan, &extack);
1309 netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
1314 return vlan_vid_add(master, proto, vid);
1317 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1320 struct net_device *master = dsa_slave_to_master(dev);
1321 struct dsa_port *dp = dsa_slave_to_port(dev);
1322 struct switchdev_obj_port_vlan vlan = {
1324 /* This API only allows programming tagged, non-PVID VIDs */
1329 /* Do not deprogram the CPU port as it may be shared with other user
1330 * ports which can be members of this VLAN as well.
1332 err = dsa_port_vlan_del(dp, &vlan);
1336 vlan_vid_del(master, proto, vid);
1341 struct dsa_hw_port {
1342 struct list_head list;
1343 struct net_device *dev;
1347 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1349 const struct dsa_hw_port *p;
1352 list_for_each_entry(p, hw_port_list, list) {
1353 if (p->dev->mtu == mtu)
1356 err = dev_set_mtu(p->dev, mtu);
1364 list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1365 if (p->dev->mtu == p->old_mtu)
1368 if (dev_set_mtu(p->dev, p->old_mtu))
1369 netdev_err(p->dev, "Failed to restore MTU\n");
1375 static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1377 struct dsa_hw_port *p, *n;
1379 list_for_each_entry_safe(p, n, hw_port_list, list)
1383 /* Make the hardware datapath to/from @dev limited to a common MTU */
1384 static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
1386 struct list_head hw_port_list;
1387 struct dsa_switch_tree *dst;
1388 int min_mtu = ETH_MAX_MTU;
1389 struct dsa_port *other_dp;
1392 if (!dp->ds->mtu_enforcement_ingress)
1395 if (!dp->bridge_dev)
1398 INIT_LIST_HEAD(&hw_port_list);
1400 /* Populate the list of ports that are part of the same bridge
1401 * as the newly added/modified port
1403 list_for_each_entry(dst, &dsa_tree_list, list) {
1404 list_for_each_entry(other_dp, &dst->ports, list) {
1405 struct dsa_hw_port *hw_port;
1406 struct net_device *slave;
1408 if (other_dp->type != DSA_PORT_TYPE_USER)
1411 if (other_dp->bridge_dev != dp->bridge_dev)
1414 if (!other_dp->ds->mtu_enforcement_ingress)
1417 slave = other_dp->slave;
1419 if (min_mtu > slave->mtu)
1420 min_mtu = slave->mtu;
1422 hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
1426 hw_port->dev = slave;
1427 hw_port->old_mtu = slave->mtu;
1429 list_add(&hw_port->list, &hw_port_list);
1433 /* Attempt to configure the entire hardware bridge to the newly added
1434 * interface's MTU first, regardless of whether the intention of the
1435 * user was to raise or lower it.
1437 err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
1441 /* Clearly that didn't work out so well, so just set the minimum MTU on
1442 * all hardware bridge ports now. If this fails too, then all ports will
1443 * still have their old MTU rolled back anyway.
1445 dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
1448 dsa_hw_port_list_free(&hw_port_list);
1451 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
1453 struct net_device *master = dsa_slave_to_master(dev);
1454 struct dsa_port *dp = dsa_slave_to_port(dev);
1455 struct dsa_slave_priv *p = netdev_priv(dev);
1456 struct dsa_switch *ds = p->dp->ds;
1457 struct dsa_port *cpu_dp;
1458 int port = p->dp->index;
1459 int largest_mtu = 0;
1466 if (!ds->ops->port_change_mtu)
1469 for (i = 0; i < ds->num_ports; i++) {
1472 if (!dsa_is_user_port(ds, i))
1475 /* During probe, this function will be called for each slave
1476 * device, while not all of them have been allocated. That's
1477 * ok, it doesn't change what the maximum is, so ignore it.
1479 if (!dsa_to_port(ds, i)->slave)
1482 /* Pretend that we already applied the setting, which we
1483 * actually haven't (still haven't done all integrity checks)
1486 slave_mtu = new_mtu;
1488 slave_mtu = dsa_to_port(ds, i)->slave->mtu;
1490 if (largest_mtu < slave_mtu)
1491 largest_mtu = slave_mtu;
1494 cpu_dp = dsa_to_port(ds, port)->cpu_dp;
1496 mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
1497 old_master_mtu = master->mtu;
1498 new_master_mtu = largest_mtu + cpu_dp->tag_ops->overhead;
1499 if (new_master_mtu > mtu_limit)
1502 /* If the master MTU isn't over limit, there's no need to check the CPU
1503 * MTU, since that surely isn't either.
1505 cpu_mtu = largest_mtu;
1507 /* Start applying stuff */
1508 if (new_master_mtu != old_master_mtu) {
1509 err = dev_set_mtu(master, new_master_mtu);
1511 goto out_master_failed;
1513 /* We only need to propagate the MTU of the CPU port to
1514 * upstream switches.
1516 err = dsa_port_mtu_change(cpu_dp, cpu_mtu, true);
1518 goto out_cpu_failed;
1521 err = dsa_port_mtu_change(dp, new_mtu, false);
1523 goto out_port_failed;
1527 dsa_bridge_mtu_normalization(dp);
1532 if (new_master_mtu != old_master_mtu)
1533 dsa_port_mtu_change(cpu_dp, old_master_mtu -
1534 cpu_dp->tag_ops->overhead,
1537 if (new_master_mtu != old_master_mtu)
1538 dev_set_mtu(master, old_master_mtu);
1543 static const struct ethtool_ops dsa_slave_ethtool_ops = {
1544 .get_drvinfo = dsa_slave_get_drvinfo,
1545 .get_regs_len = dsa_slave_get_regs_len,
1546 .get_regs = dsa_slave_get_regs,
1547 .nway_reset = dsa_slave_nway_reset,
1548 .get_link = ethtool_op_get_link,
1549 .get_eeprom_len = dsa_slave_get_eeprom_len,
1550 .get_eeprom = dsa_slave_get_eeprom,
1551 .set_eeprom = dsa_slave_set_eeprom,
1552 .get_strings = dsa_slave_get_strings,
1553 .get_ethtool_stats = dsa_slave_get_ethtool_stats,
1554 .get_sset_count = dsa_slave_get_sset_count,
1555 .set_wol = dsa_slave_set_wol,
1556 .get_wol = dsa_slave_get_wol,
1557 .set_eee = dsa_slave_set_eee,
1558 .get_eee = dsa_slave_get_eee,
1559 .get_link_ksettings = dsa_slave_get_link_ksettings,
1560 .set_link_ksettings = dsa_slave_set_link_ksettings,
1561 .get_pauseparam = dsa_slave_get_pauseparam,
1562 .set_pauseparam = dsa_slave_set_pauseparam,
1563 .get_rxnfc = dsa_slave_get_rxnfc,
1564 .set_rxnfc = dsa_slave_set_rxnfc,
1565 .get_ts_info = dsa_slave_get_ts_info,
1568 /* legacy way, bypassing the bridge *****************************************/
1569 static int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1570 struct net_device *dev,
1571 const unsigned char *addr, u16 vid,
1573 struct netlink_ext_ack *extack)
1575 struct dsa_port *dp = dsa_slave_to_port(dev);
1577 return dsa_port_fdb_add(dp, addr, vid);
1580 static int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
1581 struct net_device *dev,
1582 const unsigned char *addr, u16 vid)
1584 struct dsa_port *dp = dsa_slave_to_port(dev);
1586 return dsa_port_fdb_del(dp, addr, vid);
1589 static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
1591 struct dsa_port *dp = dsa_slave_to_port(dev);
1593 return dp->ds->devlink ? &dp->devlink_port : NULL;
1596 static void dsa_slave_get_stats64(struct net_device *dev,
1597 struct rtnl_link_stats64 *s)
1599 struct dsa_port *dp = dsa_slave_to_port(dev);
1600 struct dsa_switch *ds = dp->ds;
1602 if (ds->ops->get_stats64)
1603 ds->ops->get_stats64(ds, dp->index, s);
1605 dev_get_tstats64(dev, s);
1608 static const struct net_device_ops dsa_slave_netdev_ops = {
1609 .ndo_open = dsa_slave_open,
1610 .ndo_stop = dsa_slave_close,
1611 .ndo_start_xmit = dsa_slave_xmit,
1612 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
1613 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
1614 .ndo_set_mac_address = dsa_slave_set_mac_address,
1615 .ndo_fdb_add = dsa_legacy_fdb_add,
1616 .ndo_fdb_del = dsa_legacy_fdb_del,
1617 .ndo_fdb_dump = dsa_slave_fdb_dump,
1618 .ndo_do_ioctl = dsa_slave_ioctl,
1619 .ndo_get_iflink = dsa_slave_get_iflink,
1620 #ifdef CONFIG_NET_POLL_CONTROLLER
1621 .ndo_netpoll_setup = dsa_slave_netpoll_setup,
1622 .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup,
1623 .ndo_poll_controller = dsa_slave_poll_controller,
1625 .ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
1626 .ndo_setup_tc = dsa_slave_setup_tc,
1627 .ndo_get_stats64 = dsa_slave_get_stats64,
1628 .ndo_get_port_parent_id = dsa_slave_get_port_parent_id,
1629 .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
1630 .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
1631 .ndo_get_devlink_port = dsa_slave_get_devlink_port,
1632 .ndo_change_mtu = dsa_slave_change_mtu,
1635 static struct device_type dsa_type = {
1639 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
1641 const struct dsa_port *dp = dsa_to_port(ds, port);
1644 phylink_mac_change(dp->pl, up);
1646 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
1648 static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
1649 struct phylink_link_state *state)
1651 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1652 struct dsa_switch *ds = dp->ds;
1654 /* No need to check that this operation is valid, the callback would
1655 * not be called if it was not.
1657 ds->ops->phylink_fixed_state(ds, dp->index, state);
1660 /* slave device setup *******************************************************/
1661 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr)
1663 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1664 struct dsa_switch *ds = dp->ds;
1666 slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
1667 if (!slave_dev->phydev) {
1668 netdev_err(slave_dev, "no phy at %d\n", addr);
1672 return phylink_connect_phy(dp->pl, slave_dev->phydev);
1675 static int dsa_slave_phy_setup(struct net_device *slave_dev)
1677 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1678 struct device_node *port_dn = dp->dn;
1679 struct dsa_switch *ds = dp->ds;
1680 phy_interface_t mode;
1684 ret = of_get_phy_mode(port_dn, &mode);
1686 mode = PHY_INTERFACE_MODE_NA;
1688 dp->pl_config.dev = &slave_dev->dev;
1689 dp->pl_config.type = PHYLINK_NETDEV;
1691 /* The get_fixed_state callback takes precedence over polling the
1692 * link GPIO in PHYLINK (see phylink_get_fixed_state). Only set
1693 * this if the switch provides such a callback.
1695 if (ds->ops->phylink_fixed_state) {
1696 dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
1697 dp->pl_config.poll_fixed_state = true;
1700 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode,
1701 &dsa_port_phylink_mac_ops);
1702 if (IS_ERR(dp->pl)) {
1703 netdev_err(slave_dev,
1704 "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1705 return PTR_ERR(dp->pl);
1708 if (ds->ops->get_phy_flags)
1709 phy_flags = ds->ops->get_phy_flags(ds, dp->index);
1711 ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
1712 if (ret == -ENODEV && ds->slave_mii_bus) {
1713 /* We could not connect to a designated PHY or SFP, so try to
1714 * use the switch internal MDIO bus instead
1716 ret = dsa_slave_phy_connect(slave_dev, dp->index);
1718 netdev_err(slave_dev,
1719 "failed to connect to port %d: %d\n",
1721 phylink_destroy(dp->pl);
1729 void dsa_slave_setup_tagger(struct net_device *slave)
1731 struct dsa_port *dp = dsa_slave_to_port(slave);
1732 struct dsa_slave_priv *p = netdev_priv(slave);
1733 const struct dsa_port *cpu_dp = dp->cpu_dp;
1734 struct net_device *master = cpu_dp->master;
1736 if (cpu_dp->tag_ops->tail_tag)
1737 slave->needed_tailroom = cpu_dp->tag_ops->overhead;
1739 slave->needed_headroom = cpu_dp->tag_ops->overhead;
1740 /* Try to save one extra realloc later in the TX path (in the master)
1741 * by also inheriting the master's needed headroom and tailroom.
1742 * The 8021q driver also does this.
1744 slave->needed_headroom += master->needed_headroom;
1745 slave->needed_tailroom += master->needed_tailroom;
1747 p->xmit = cpu_dp->tag_ops->xmit;
1750 static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
1751 static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
1752 struct netdev_queue *txq,
1755 lockdep_set_class(&txq->_xmit_lock,
1756 &dsa_slave_netdev_xmit_lock_key);
1759 int dsa_slave_suspend(struct net_device *slave_dev)
1761 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1763 if (!netif_running(slave_dev))
1766 netif_device_detach(slave_dev);
1769 phylink_stop(dp->pl);
1775 int dsa_slave_resume(struct net_device *slave_dev)
1777 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1779 if (!netif_running(slave_dev))
1782 netif_device_attach(slave_dev);
1785 phylink_start(dp->pl);
1791 int dsa_slave_create(struct dsa_port *port)
1793 const struct dsa_port *cpu_dp = port->cpu_dp;
1794 struct net_device *master = cpu_dp->master;
1795 struct dsa_switch *ds = port->ds;
1796 const char *name = port->name;
1797 struct net_device *slave_dev;
1798 struct dsa_slave_priv *p;
1801 if (!ds->num_tx_queues)
1802 ds->num_tx_queues = 1;
1804 slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
1805 NET_NAME_UNKNOWN, ether_setup,
1806 ds->num_tx_queues, 1);
1807 if (slave_dev == NULL)
1810 slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
1811 if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
1812 slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1813 slave_dev->hw_features |= NETIF_F_HW_TC;
1814 slave_dev->features |= NETIF_F_LLTX;
1815 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
1816 if (!IS_ERR_OR_NULL(port->mac))
1817 ether_addr_copy(slave_dev->dev_addr, port->mac);
1819 eth_hw_addr_inherit(slave_dev, master);
1820 slave_dev->priv_flags |= IFF_NO_QUEUE;
1821 slave_dev->netdev_ops = &dsa_slave_netdev_ops;
1822 if (ds->ops->port_max_mtu)
1823 slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
1824 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1826 netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
1829 SET_NETDEV_DEV(slave_dev, port->ds->dev);
1830 slave_dev->dev.of_node = port->dn;
1831 slave_dev->vlan_features = master->vlan_features;
1833 p = netdev_priv(slave_dev);
1834 slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1835 if (!slave_dev->tstats) {
1836 free_netdev(slave_dev);
1840 ret = gro_cells_init(&p->gcells, slave_dev);
1845 INIT_LIST_HEAD(&p->mall_tc_list);
1846 port->slave = slave_dev;
1847 dsa_slave_setup_tagger(slave_dev);
1850 ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
1852 if (ret && ret != -EOPNOTSUPP)
1853 dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
1854 ret, ETH_DATA_LEN, port->index);
1856 netif_carrier_off(slave_dev);
1858 ret = dsa_slave_phy_setup(slave_dev);
1860 netdev_err(slave_dev,
1861 "error %d setting up PHY for tree %d, switch %d, port %d\n",
1862 ret, ds->dst->index, ds->index, port->index);
1868 ret = register_netdevice(slave_dev);
1870 netdev_err(master, "error %d registering interface %s\n",
1871 ret, slave_dev->name);
1876 ret = netdev_upper_dev_link(master, slave_dev, NULL);
1881 goto out_unregister;
1886 unregister_netdev(slave_dev);
1889 phylink_disconnect_phy(p->dp->pl);
1891 phylink_destroy(p->dp->pl);
1893 gro_cells_destroy(&p->gcells);
1895 free_percpu(slave_dev->tstats);
1896 free_netdev(slave_dev);
1901 void dsa_slave_destroy(struct net_device *slave_dev)
1903 struct net_device *master = dsa_slave_to_master(slave_dev);
1904 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1905 struct dsa_slave_priv *p = netdev_priv(slave_dev);
1907 netif_carrier_off(slave_dev);
1909 netdev_upper_dev_unlink(master, slave_dev);
1910 unregister_netdevice(slave_dev);
1911 phylink_disconnect_phy(dp->pl);
1914 phylink_destroy(dp->pl);
1915 gro_cells_destroy(&p->gcells);
1916 free_percpu(slave_dev->tstats);
1917 free_netdev(slave_dev);
1920 bool dsa_slave_dev_check(const struct net_device *dev)
1922 return dev->netdev_ops == &dsa_slave_netdev_ops;
1924 EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
1926 static int dsa_slave_changeupper(struct net_device *dev,
1927 struct netdev_notifier_changeupper_info *info)
1929 struct dsa_port *dp = dsa_slave_to_port(dev);
1930 int err = NOTIFY_DONE;
1932 if (netif_is_bridge_master(info->upper_dev)) {
1933 if (info->linking) {
1934 err = dsa_port_bridge_join(dp, info->upper_dev);
1936 dsa_bridge_mtu_normalization(dp);
1937 err = notifier_from_errno(err);
1939 dsa_port_bridge_leave(dp, info->upper_dev);
1942 } else if (netif_is_lag_master(info->upper_dev)) {
1943 if (info->linking) {
1944 err = dsa_port_lag_join(dp, info->upper_dev,
1946 if (err == -EOPNOTSUPP) {
1947 NL_SET_ERR_MSG_MOD(info->info.extack,
1948 "Offloading not supported");
1951 err = notifier_from_errno(err);
1953 dsa_port_lag_leave(dp, info->upper_dev);
1956 } else if (is_hsr_master(info->upper_dev)) {
1957 if (info->linking) {
1958 err = dsa_port_hsr_join(dp, info->upper_dev);
1959 if (err == -EOPNOTSUPP) {
1960 NL_SET_ERR_MSG_MOD(info->info.extack,
1961 "Offloading not supported");
1964 err = notifier_from_errno(err);
1966 dsa_port_hsr_leave(dp, info->upper_dev);
1975 dsa_slave_lag_changeupper(struct net_device *dev,
1976 struct netdev_notifier_changeupper_info *info)
1978 struct net_device *lower;
1979 struct list_head *iter;
1980 int err = NOTIFY_DONE;
1981 struct dsa_port *dp;
1983 netdev_for_each_lower_dev(dev, lower, iter) {
1984 if (!dsa_slave_dev_check(lower))
1987 dp = dsa_slave_to_port(lower);
1992 err = dsa_slave_changeupper(lower, info);
1993 if (notifier_to_errno(err))
2001 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
2002 struct netdev_notifier_changeupper_info *info)
2004 struct netlink_ext_ack *ext_ack;
2005 struct net_device *slave;
2006 struct dsa_port *dp;
2008 ext_ack = netdev_notifier_info_to_extack(&info->info);
2010 if (!is_vlan_dev(dev))
2013 slave = vlan_dev_real_dev(dev);
2014 if (!dsa_slave_dev_check(slave))
2017 dp = dsa_slave_to_port(slave);
2018 if (!dp->bridge_dev)
2021 /* Deny enslaving a VLAN device into a VLAN-aware bridge */
2022 if (br_vlan_enabled(dp->bridge_dev) &&
2023 netif_is_bridge_master(info->upper_dev) && info->linking) {
2024 NL_SET_ERR_MSG_MOD(ext_ack,
2025 "Cannot enslave VLAN device into VLAN aware bridge");
2026 return notifier_from_errno(-EINVAL);
2033 dsa_slave_check_8021q_upper(struct net_device *dev,
2034 struct netdev_notifier_changeupper_info *info)
2036 struct dsa_port *dp = dsa_slave_to_port(dev);
2037 struct net_device *br = dp->bridge_dev;
2038 struct bridge_vlan_info br_info;
2039 struct netlink_ext_ack *extack;
2040 int err = NOTIFY_DONE;
2043 if (!br || !br_vlan_enabled(br))
2046 extack = netdev_notifier_info_to_extack(&info->info);
2047 vid = vlan_dev_vlan_id(info->upper_dev);
2049 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
2050 * device, respectively the VID is not found, returning
2051 * 0 means success, which is a failure for us here.
2053 err = br_vlan_get_info(br, vid, &br_info);
2055 NL_SET_ERR_MSG_MOD(extack,
2056 "This VLAN is already configured by the bridge");
2057 return notifier_from_errno(-EBUSY);
2063 static int dsa_slave_netdevice_event(struct notifier_block *nb,
2064 unsigned long event, void *ptr)
2066 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2069 case NETDEV_PRECHANGEUPPER: {
2070 struct netdev_notifier_changeupper_info *info = ptr;
2071 struct dsa_switch *ds;
2072 struct dsa_port *dp;
2075 if (!dsa_slave_dev_check(dev))
2076 return dsa_prevent_bridging_8021q_upper(dev, ptr);
2078 dp = dsa_slave_to_port(dev);
2081 if (ds->ops->port_prechangeupper) {
2082 err = ds->ops->port_prechangeupper(ds, dp->index, info);
2084 return notifier_from_errno(err);
2087 if (is_vlan_dev(info->upper_dev))
2088 return dsa_slave_check_8021q_upper(dev, ptr);
2091 case NETDEV_CHANGEUPPER:
2092 if (dsa_slave_dev_check(dev))
2093 return dsa_slave_changeupper(dev, ptr);
2095 if (netif_is_lag_master(dev))
2096 return dsa_slave_lag_changeupper(dev, ptr);
2099 case NETDEV_CHANGELOWERSTATE: {
2100 struct netdev_notifier_changelowerstate_info *info = ptr;
2101 struct dsa_port *dp;
2104 if (!dsa_slave_dev_check(dev))
2107 dp = dsa_slave_to_port(dev);
2109 err = dsa_port_lag_change(dp, info->lower_state_info);
2110 return notifier_from_errno(err);
2112 case NETDEV_GOING_DOWN: {
2113 struct dsa_port *dp, *cpu_dp;
2114 struct dsa_switch_tree *dst;
2115 LIST_HEAD(close_list);
2117 if (!netdev_uses_dsa(dev))
2120 cpu_dp = dev->dsa_ptr;
2121 dst = cpu_dp->ds->dst;
2123 list_for_each_entry(dp, &dst->ports, list) {
2124 if (!dsa_is_user_port(dp->ds, dp->index))
2127 list_add(&dp->slave->close_list, &close_list);
2130 dev_close_many(&close_list, true);
2142 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
2144 struct dsa_switch *ds = switchdev_work->ds;
2145 struct switchdev_notifier_fdb_info info;
2146 struct dsa_port *dp;
2148 if (!dsa_is_user_port(ds, switchdev_work->port))
2151 info.addr = switchdev_work->addr;
2152 info.vid = switchdev_work->vid;
2153 info.offloaded = true;
2154 dp = dsa_to_port(ds, switchdev_work->port);
2155 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
2156 dp->slave, &info.info, NULL);
2159 static void dsa_slave_switchdev_event_work(struct work_struct *work)
2161 struct dsa_switchdev_event_work *switchdev_work =
2162 container_of(work, struct dsa_switchdev_event_work, work);
2163 struct dsa_switch *ds = switchdev_work->ds;
2164 struct dsa_port *dp;
2167 dp = dsa_to_port(ds, switchdev_work->port);
2170 switch (switchdev_work->event) {
2171 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2172 err = dsa_port_fdb_add(dp, switchdev_work->addr,
2173 switchdev_work->vid);
2176 "port %d failed to add %pM vid %d to fdb: %d\n",
2177 dp->index, switchdev_work->addr,
2178 switchdev_work->vid, err);
2181 dsa_fdb_offload_notify(switchdev_work);
2184 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2185 err = dsa_port_fdb_del(dp, switchdev_work->addr,
2186 switchdev_work->vid);
2189 "port %d failed to delete %pM vid %d from fdb: %d\n",
2190 dp->index, switchdev_work->addr,
2191 switchdev_work->vid, err);
2198 kfree(switchdev_work);
2199 if (dsa_is_user_port(ds, dp->index))
2203 static int dsa_lower_dev_walk(struct net_device *lower_dev,
2204 struct netdev_nested_priv *priv)
2206 if (dsa_slave_dev_check(lower_dev)) {
2207 priv->data = (void *)netdev_priv(lower_dev);
2214 static struct dsa_slave_priv *dsa_slave_dev_lower_find(struct net_device *dev)
2216 struct netdev_nested_priv priv = {
2220 netdev_walk_all_lower_dev_rcu(dev, dsa_lower_dev_walk, &priv);
2222 return (struct dsa_slave_priv *)priv.data;
2225 /* Called under rcu_read_lock() */
2226 static int dsa_slave_switchdev_event(struct notifier_block *unused,
2227 unsigned long event, void *ptr)
2229 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2230 const struct switchdev_notifier_fdb_info *fdb_info;
2231 struct dsa_switchdev_event_work *switchdev_work;
2232 struct dsa_port *dp;
2236 case SWITCHDEV_PORT_ATTR_SET:
2237 err = switchdev_handle_port_attr_set(dev, ptr,
2238 dsa_slave_dev_check,
2239 dsa_slave_port_attr_set);
2240 return notifier_from_errno(err);
2241 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2242 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2245 if (dsa_slave_dev_check(dev)) {
2246 if (!fdb_info->added_by_user)
2249 dp = dsa_slave_to_port(dev);
2251 /* Snoop addresses learnt on foreign interfaces
2252 * bridged with us, for switches that don't
2253 * automatically learn SA from CPU-injected traffic
2255 struct net_device *br_dev;
2256 struct dsa_slave_priv *p;
2258 br_dev = netdev_master_upper_dev_get_rcu(dev);
2262 if (!netif_is_bridge_master(br_dev))
2265 p = dsa_slave_dev_lower_find(br_dev);
2271 if (!dp->ds->assisted_learning_on_cpu_port)
2274 /* When the bridge learns an address on an offloaded
2275 * LAG we don't want to send traffic to the CPU, the
2276 * other ports bridged with the LAG should be able to
2277 * autonomously forward towards it.
2279 if (dsa_tree_offloads_netdev(dp->ds->dst, dev))
2283 if (!dp->ds->ops->port_fdb_add || !dp->ds->ops->port_fdb_del)
2286 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2287 if (!switchdev_work)
2290 INIT_WORK(&switchdev_work->work,
2291 dsa_slave_switchdev_event_work);
2292 switchdev_work->ds = dp->ds;
2293 switchdev_work->port = dp->index;
2294 switchdev_work->event = event;
2296 ether_addr_copy(switchdev_work->addr,
2298 switchdev_work->vid = fdb_info->vid;
2300 /* Hold a reference on the slave for dsa_fdb_offload_notify */
2301 if (dsa_is_user_port(dp->ds, dp->index))
2303 dsa_schedule_work(&switchdev_work->work);
2312 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
2313 unsigned long event, void *ptr)
2315 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2319 case SWITCHDEV_PORT_OBJ_ADD:
2320 err = switchdev_handle_port_obj_add(dev, ptr,
2321 dsa_slave_dev_check,
2322 dsa_slave_port_obj_add);
2323 return notifier_from_errno(err);
2324 case SWITCHDEV_PORT_OBJ_DEL:
2325 err = switchdev_handle_port_obj_del(dev, ptr,
2326 dsa_slave_dev_check,
2327 dsa_slave_port_obj_del);
2328 return notifier_from_errno(err);
2329 case SWITCHDEV_PORT_ATTR_SET:
2330 err = switchdev_handle_port_attr_set(dev, ptr,
2331 dsa_slave_dev_check,
2332 dsa_slave_port_attr_set);
2333 return notifier_from_errno(err);
2339 static struct notifier_block dsa_slave_nb __read_mostly = {
2340 .notifier_call = dsa_slave_netdevice_event,
2343 static struct notifier_block dsa_slave_switchdev_notifier = {
2344 .notifier_call = dsa_slave_switchdev_event,
2347 static struct notifier_block dsa_slave_switchdev_blocking_notifier = {
2348 .notifier_call = dsa_slave_switchdev_blocking_event,
2351 int dsa_slave_register_notifier(void)
2353 struct notifier_block *nb;
2356 err = register_netdevice_notifier(&dsa_slave_nb);
2360 err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
2362 goto err_switchdev_nb;
2364 nb = &dsa_slave_switchdev_blocking_notifier;
2365 err = register_switchdev_blocking_notifier(nb);
2367 goto err_switchdev_blocking_nb;
2371 err_switchdev_blocking_nb:
2372 unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2374 unregister_netdevice_notifier(&dsa_slave_nb);
2378 void dsa_slave_unregister_notifier(void)
2380 struct notifier_block *nb;
2383 nb = &dsa_slave_switchdev_blocking_notifier;
2384 err = unregister_switchdev_blocking_notifier(nb);
2386 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
2388 err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2390 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
2392 err = unregister_netdevice_notifier(&dsa_slave_nb);
2394 pr_err("DSA: failed to unregister slave notifier (%d)\n", err);