1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/dsa/slave.c - Slave device handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/tc_act/tc_mirred.h>
19 #include <linux/if_bridge.h>
20 #include <linux/netpoll.h>
21 #include <linux/ptp_classify.h>
25 /* slave mii_bus handling ***************************************************/
26 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
28 struct dsa_switch *ds = bus->priv;
30 if (ds->phys_mii_mask & (1 << addr))
31 return ds->ops->phy_read(ds, addr, reg);
36 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
38 struct dsa_switch *ds = bus->priv;
40 if (ds->phys_mii_mask & (1 << addr))
41 return ds->ops->phy_write(ds, addr, reg, val);
46 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
48 ds->slave_mii_bus->priv = (void *)ds;
49 ds->slave_mii_bus->name = "dsa slave smi";
50 ds->slave_mii_bus->read = dsa_slave_phy_read;
51 ds->slave_mii_bus->write = dsa_slave_phy_write;
52 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
53 ds->dst->index, ds->index);
54 ds->slave_mii_bus->parent = ds->dev;
55 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
59 /* slave device handling ****************************************************/
60 static int dsa_slave_get_iflink(const struct net_device *dev)
62 return dsa_slave_to_master(dev)->ifindex;
65 static int dsa_slave_open(struct net_device *dev)
67 struct net_device *master = dsa_slave_to_master(dev);
68 struct dsa_port *dp = dsa_slave_to_port(dev);
71 if (!(master->flags & IFF_UP))
74 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
75 err = dev_uc_add(master, dev->dev_addr);
80 if (dev->flags & IFF_ALLMULTI) {
81 err = dev_set_allmulti(master, 1);
85 if (dev->flags & IFF_PROMISC) {
86 err = dev_set_promiscuity(master, 1);
91 err = dsa_port_enable_rt(dp, dev->phydev);
98 if (dev->flags & IFF_PROMISC)
99 dev_set_promiscuity(master, -1);
101 if (dev->flags & IFF_ALLMULTI)
102 dev_set_allmulti(master, -1);
104 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
105 dev_uc_del(master, dev->dev_addr);
110 static int dsa_slave_close(struct net_device *dev)
112 struct net_device *master = dsa_slave_to_master(dev);
113 struct dsa_port *dp = dsa_slave_to_port(dev);
115 dsa_port_disable_rt(dp);
117 dev_mc_unsync(master, dev);
118 dev_uc_unsync(master, dev);
119 if (dev->flags & IFF_ALLMULTI)
120 dev_set_allmulti(master, -1);
121 if (dev->flags & IFF_PROMISC)
122 dev_set_promiscuity(master, -1);
124 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
125 dev_uc_del(master, dev->dev_addr);
130 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
132 struct net_device *master = dsa_slave_to_master(dev);
133 if (dev->flags & IFF_UP) {
134 if (change & IFF_ALLMULTI)
135 dev_set_allmulti(master,
136 dev->flags & IFF_ALLMULTI ? 1 : -1);
137 if (change & IFF_PROMISC)
138 dev_set_promiscuity(master,
139 dev->flags & IFF_PROMISC ? 1 : -1);
143 static void dsa_slave_set_rx_mode(struct net_device *dev)
145 struct net_device *master = dsa_slave_to_master(dev);
147 dev_mc_sync(master, dev);
148 dev_uc_sync(master, dev);
151 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
153 struct net_device *master = dsa_slave_to_master(dev);
154 struct sockaddr *addr = a;
157 if (!is_valid_ether_addr(addr->sa_data))
158 return -EADDRNOTAVAIL;
160 if (!(dev->flags & IFF_UP))
163 if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
164 err = dev_uc_add(master, addr->sa_data);
169 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
170 dev_uc_del(master, dev->dev_addr);
173 ether_addr_copy(dev->dev_addr, addr->sa_data);
178 struct dsa_slave_dump_ctx {
179 struct net_device *dev;
181 struct netlink_callback *cb;
186 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
187 bool is_static, void *data)
189 struct dsa_slave_dump_ctx *dump = data;
190 u32 portid = NETLINK_CB(dump->cb->skb).portid;
191 u32 seq = dump->cb->nlh->nlmsg_seq;
192 struct nlmsghdr *nlh;
195 if (dump->idx < dump->cb->args[2])
198 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
199 sizeof(*ndm), NLM_F_MULTI);
203 ndm = nlmsg_data(nlh);
204 ndm->ndm_family = AF_BRIDGE;
207 ndm->ndm_flags = NTF_SELF;
209 ndm->ndm_ifindex = dump->dev->ifindex;
210 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
212 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
213 goto nla_put_failure;
215 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
216 goto nla_put_failure;
218 nlmsg_end(dump->skb, nlh);
225 nlmsg_cancel(dump->skb, nlh);
230 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
231 struct net_device *dev, struct net_device *filter_dev,
234 struct dsa_port *dp = dsa_slave_to_port(dev);
235 struct dsa_slave_dump_ctx dump = {
243 err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
249 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
251 struct dsa_slave_priv *p = netdev_priv(dev);
252 struct dsa_switch *ds = p->dp->ds;
253 int port = p->dp->index;
255 /* Pass through to switch driver if it supports timestamping */
258 if (ds->ops->port_hwtstamp_get)
259 return ds->ops->port_hwtstamp_get(ds, port, ifr);
262 if (ds->ops->port_hwtstamp_set)
263 return ds->ops->port_hwtstamp_set(ds, port, ifr);
267 return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
270 static int dsa_slave_port_attr_set(struct net_device *dev,
271 const struct switchdev_attr *attr)
273 struct dsa_port *dp = dsa_slave_to_port(dev);
276 if (!dsa_port_offloads_netdev(dp, attr->orig_dev))
280 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
281 ret = dsa_port_set_state(dp, attr->u.stp_state);
283 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
284 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering);
286 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
287 ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
289 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
290 ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags);
292 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
293 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags);
295 case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
296 ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter);
306 /* Must be called under rcu_read_lock() */
308 dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
309 const struct switchdev_obj_port_vlan *vlan)
311 struct net_device *upper_dev;
312 struct list_head *iter;
314 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
317 if (!is_vlan_dev(upper_dev))
320 vid = vlan_dev_vlan_id(upper_dev);
321 if (vid == vlan->vid)
328 static int dsa_slave_vlan_add(struct net_device *dev,
329 const struct switchdev_obj *obj,
330 struct netlink_ext_ack *extack)
332 struct net_device *master = dsa_slave_to_master(dev);
333 struct dsa_port *dp = dsa_slave_to_port(dev);
334 struct switchdev_obj_port_vlan vlan;
337 if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
340 if (dsa_port_skip_vlan_configuration(dp)) {
341 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
345 vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
347 /* Deny adding a bridge VLAN when there is already an 802.1Q upper with
350 if (br_vlan_enabled(dp->bridge_dev)) {
352 err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan);
358 err = dsa_port_vlan_add(dp, &vlan);
362 /* We need the dedicated CPU port to be a member of the VLAN as well.
363 * Even though drivers often handle CPU membership in special ways,
364 * it doesn't make sense to program a PVID, so clear this flag.
366 vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
368 err = dsa_port_vlan_add(dp->cpu_dp, &vlan);
372 return vlan_vid_add(master, htons(ETH_P_8021Q), vlan.vid);
375 static int dsa_slave_port_obj_add(struct net_device *dev,
376 const struct switchdev_obj *obj,
377 struct netlink_ext_ack *extack)
379 struct dsa_port *dp = dsa_slave_to_port(dev);
383 case SWITCHDEV_OBJ_ID_PORT_MDB:
384 if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
386 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
388 case SWITCHDEV_OBJ_ID_HOST_MDB:
389 /* DSA can directly translate this to a normal MDB add,
390 * but on the CPU port.
392 err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
394 case SWITCHDEV_OBJ_ID_PORT_VLAN:
395 err = dsa_slave_vlan_add(dev, obj, extack);
405 static int dsa_slave_vlan_del(struct net_device *dev,
406 const struct switchdev_obj *obj)
408 struct net_device *master = dsa_slave_to_master(dev);
409 struct dsa_port *dp = dsa_slave_to_port(dev);
410 struct switchdev_obj_port_vlan *vlan;
413 if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
416 if (dsa_port_skip_vlan_configuration(dp))
419 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
421 /* Do not deprogram the CPU port as it may be shared with other user
422 * ports which can be members of this VLAN as well.
424 err = dsa_port_vlan_del(dp, vlan);
428 vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid);
433 static int dsa_slave_port_obj_del(struct net_device *dev,
434 const struct switchdev_obj *obj)
436 struct dsa_port *dp = dsa_slave_to_port(dev);
440 case SWITCHDEV_OBJ_ID_PORT_MDB:
441 if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
443 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
445 case SWITCHDEV_OBJ_ID_HOST_MDB:
446 /* DSA can directly translate this to a normal MDB add,
447 * but on the CPU port.
449 err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
451 case SWITCHDEV_OBJ_ID_PORT_VLAN:
452 err = dsa_slave_vlan_del(dev, obj);
462 static int dsa_slave_get_port_parent_id(struct net_device *dev,
463 struct netdev_phys_item_id *ppid)
465 struct dsa_port *dp = dsa_slave_to_port(dev);
466 struct dsa_switch *ds = dp->ds;
467 struct dsa_switch_tree *dst = ds->dst;
469 /* For non-legacy ports, devlink is used and it takes
470 * care of the name generation. This ndo implementation
471 * should be removed with legacy support.
476 ppid->id_len = sizeof(dst->index);
477 memcpy(&ppid->id, &dst->index, ppid->id_len);
482 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
485 #ifdef CONFIG_NET_POLL_CONTROLLER
486 struct dsa_slave_priv *p = netdev_priv(dev);
488 return netpoll_send_skb(p->netpoll, skb);
495 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
498 struct dsa_switch *ds = p->dp->ds;
499 struct sk_buff *clone;
502 type = ptp_classify_raw(skb);
503 if (type == PTP_CLASS_NONE)
506 if (!ds->ops->port_txtstamp)
509 clone = skb_clone_sk(skb);
513 if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type)) {
514 DSA_SKB_CB(skb)->clone = clone;
521 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
523 /* SKB for netpoll still need to be mangled with the protocol-specific
524 * tag to be successfully transmitted
526 if (unlikely(netpoll_tx_running(dev)))
527 return dsa_slave_netpoll_send_skb(dev, skb);
529 /* Queue the SKB for transmission on the parent interface, but
530 * do not modify its EtherType
532 skb->dev = dsa_slave_to_master(dev);
537 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
539 static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
541 int needed_headroom = dev->needed_headroom;
542 int needed_tailroom = dev->needed_tailroom;
544 /* For tail taggers, we need to pad short frames ourselves, to ensure
545 * that the tail tag does not fail at its role of being at the end of
546 * the packet, once the master interface pads the frame. Account for
547 * that pad length here, and pad later.
549 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
550 needed_tailroom += ETH_ZLEN - skb->len;
551 /* skb_headroom() returns unsigned int... */
552 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
553 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
555 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
556 /* No reallocation needed, yay! */
559 return pskb_expand_head(skb, needed_headroom, needed_tailroom,
563 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
565 struct dsa_slave_priv *p = netdev_priv(dev);
566 struct sk_buff *nskb;
568 dev_sw_netstats_tx_add(dev, 1, skb->len);
570 DSA_SKB_CB(skb)->clone = NULL;
572 /* Identify PTP protocol packets, clone them, and pass them to the
575 dsa_skb_tx_timestamp(p, skb);
577 if (dsa_realloc_skb(skb, dev)) {
578 dev_kfree_skb_any(skb);
582 /* needed_tailroom should still be 'warm' in the cache line from
583 * dsa_realloc_skb(), which has also ensured that padding is safe.
585 if (dev->needed_tailroom)
588 /* Transmit function may have to reallocate the original SKB,
589 * in which case it must have freed it. Only free it here on error.
591 nskb = p->xmit(skb, dev);
597 return dsa_enqueue_skb(nskb, dev);
600 /* ethtool operations *******************************************************/
602 static void dsa_slave_get_drvinfo(struct net_device *dev,
603 struct ethtool_drvinfo *drvinfo)
605 strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
606 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
607 strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
610 static int dsa_slave_get_regs_len(struct net_device *dev)
612 struct dsa_port *dp = dsa_slave_to_port(dev);
613 struct dsa_switch *ds = dp->ds;
615 if (ds->ops->get_regs_len)
616 return ds->ops->get_regs_len(ds, dp->index);
622 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
624 struct dsa_port *dp = dsa_slave_to_port(dev);
625 struct dsa_switch *ds = dp->ds;
627 if (ds->ops->get_regs)
628 ds->ops->get_regs(ds, dp->index, regs, _p);
631 static int dsa_slave_nway_reset(struct net_device *dev)
633 struct dsa_port *dp = dsa_slave_to_port(dev);
635 return phylink_ethtool_nway_reset(dp->pl);
638 static int dsa_slave_get_eeprom_len(struct net_device *dev)
640 struct dsa_port *dp = dsa_slave_to_port(dev);
641 struct dsa_switch *ds = dp->ds;
643 if (ds->cd && ds->cd->eeprom_len)
644 return ds->cd->eeprom_len;
646 if (ds->ops->get_eeprom_len)
647 return ds->ops->get_eeprom_len(ds);
652 static int dsa_slave_get_eeprom(struct net_device *dev,
653 struct ethtool_eeprom *eeprom, u8 *data)
655 struct dsa_port *dp = dsa_slave_to_port(dev);
656 struct dsa_switch *ds = dp->ds;
658 if (ds->ops->get_eeprom)
659 return ds->ops->get_eeprom(ds, eeprom, data);
664 static int dsa_slave_set_eeprom(struct net_device *dev,
665 struct ethtool_eeprom *eeprom, u8 *data)
667 struct dsa_port *dp = dsa_slave_to_port(dev);
668 struct dsa_switch *ds = dp->ds;
670 if (ds->ops->set_eeprom)
671 return ds->ops->set_eeprom(ds, eeprom, data);
676 static void dsa_slave_get_strings(struct net_device *dev,
677 uint32_t stringset, uint8_t *data)
679 struct dsa_port *dp = dsa_slave_to_port(dev);
680 struct dsa_switch *ds = dp->ds;
682 if (stringset == ETH_SS_STATS) {
683 int len = ETH_GSTRING_LEN;
685 strncpy(data, "tx_packets", len);
686 strncpy(data + len, "tx_bytes", len);
687 strncpy(data + 2 * len, "rx_packets", len);
688 strncpy(data + 3 * len, "rx_bytes", len);
689 if (ds->ops->get_strings)
690 ds->ops->get_strings(ds, dp->index, stringset,
695 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
696 struct ethtool_stats *stats,
699 struct dsa_port *dp = dsa_slave_to_port(dev);
700 struct dsa_switch *ds = dp->ds;
701 struct pcpu_sw_netstats *s;
705 for_each_possible_cpu(i) {
706 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
708 s = per_cpu_ptr(dev->tstats, i);
710 start = u64_stats_fetch_begin_irq(&s->syncp);
711 tx_packets = s->tx_packets;
712 tx_bytes = s->tx_bytes;
713 rx_packets = s->rx_packets;
714 rx_bytes = s->rx_bytes;
715 } while (u64_stats_fetch_retry_irq(&s->syncp, start));
716 data[0] += tx_packets;
718 data[2] += rx_packets;
721 if (ds->ops->get_ethtool_stats)
722 ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
725 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
727 struct dsa_port *dp = dsa_slave_to_port(dev);
728 struct dsa_switch *ds = dp->ds;
730 if (sset == ETH_SS_STATS) {
734 if (ds->ops->get_sset_count)
735 count += ds->ops->get_sset_count(ds, dp->index, sset);
743 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
745 struct dsa_port *dp = dsa_slave_to_port(dev);
746 struct dsa_switch *ds = dp->ds;
748 phylink_ethtool_get_wol(dp->pl, w);
750 if (ds->ops->get_wol)
751 ds->ops->get_wol(ds, dp->index, w);
754 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
756 struct dsa_port *dp = dsa_slave_to_port(dev);
757 struct dsa_switch *ds = dp->ds;
758 int ret = -EOPNOTSUPP;
760 phylink_ethtool_set_wol(dp->pl, w);
762 if (ds->ops->set_wol)
763 ret = ds->ops->set_wol(ds, dp->index, w);
768 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
770 struct dsa_port *dp = dsa_slave_to_port(dev);
771 struct dsa_switch *ds = dp->ds;
774 /* Port's PHY and MAC both need to be EEE capable */
775 if (!dev->phydev || !dp->pl)
778 if (!ds->ops->set_mac_eee)
781 ret = ds->ops->set_mac_eee(ds, dp->index, e);
785 return phylink_ethtool_set_eee(dp->pl, e);
788 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
790 struct dsa_port *dp = dsa_slave_to_port(dev);
791 struct dsa_switch *ds = dp->ds;
794 /* Port's PHY and MAC both need to be EEE capable */
795 if (!dev->phydev || !dp->pl)
798 if (!ds->ops->get_mac_eee)
801 ret = ds->ops->get_mac_eee(ds, dp->index, e);
805 return phylink_ethtool_get_eee(dp->pl, e);
808 static int dsa_slave_get_link_ksettings(struct net_device *dev,
809 struct ethtool_link_ksettings *cmd)
811 struct dsa_port *dp = dsa_slave_to_port(dev);
813 return phylink_ethtool_ksettings_get(dp->pl, cmd);
816 static int dsa_slave_set_link_ksettings(struct net_device *dev,
817 const struct ethtool_link_ksettings *cmd)
819 struct dsa_port *dp = dsa_slave_to_port(dev);
821 return phylink_ethtool_ksettings_set(dp->pl, cmd);
824 static void dsa_slave_get_pauseparam(struct net_device *dev,
825 struct ethtool_pauseparam *pause)
827 struct dsa_port *dp = dsa_slave_to_port(dev);
829 phylink_ethtool_get_pauseparam(dp->pl, pause);
832 static int dsa_slave_set_pauseparam(struct net_device *dev,
833 struct ethtool_pauseparam *pause)
835 struct dsa_port *dp = dsa_slave_to_port(dev);
837 return phylink_ethtool_set_pauseparam(dp->pl, pause);
840 #ifdef CONFIG_NET_POLL_CONTROLLER
841 static int dsa_slave_netpoll_setup(struct net_device *dev,
842 struct netpoll_info *ni)
844 struct net_device *master = dsa_slave_to_master(dev);
845 struct dsa_slave_priv *p = netdev_priv(dev);
846 struct netpoll *netpoll;
849 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
853 err = __netpoll_setup(netpoll, master);
859 p->netpoll = netpoll;
864 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
866 struct dsa_slave_priv *p = netdev_priv(dev);
867 struct netpoll *netpoll = p->netpoll;
874 __netpoll_free(netpoll);
877 static void dsa_slave_poll_controller(struct net_device *dev)
882 static int dsa_slave_get_phys_port_name(struct net_device *dev,
883 char *name, size_t len)
885 struct dsa_port *dp = dsa_slave_to_port(dev);
887 /* For non-legacy ports, devlink is used and it takes
888 * care of the name generation. This ndo implementation
889 * should be removed with legacy support.
894 if (snprintf(name, len, "p%d", dp->index) >= len)
900 static struct dsa_mall_tc_entry *
901 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
903 struct dsa_slave_priv *p = netdev_priv(dev);
904 struct dsa_mall_tc_entry *mall_tc_entry;
906 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
907 if (mall_tc_entry->cookie == cookie)
908 return mall_tc_entry;
914 dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
915 struct tc_cls_matchall_offload *cls,
918 struct dsa_port *dp = dsa_slave_to_port(dev);
919 struct dsa_slave_priv *p = netdev_priv(dev);
920 struct dsa_mall_mirror_tc_entry *mirror;
921 struct dsa_mall_tc_entry *mall_tc_entry;
922 struct dsa_switch *ds = dp->ds;
923 struct flow_action_entry *act;
924 struct dsa_port *to_dp;
927 if (!ds->ops->port_mirror_add)
930 if (!flow_action_basic_hw_stats_check(&cls->rule->action,
934 act = &cls->rule->action.entries[0];
939 if (!dsa_slave_dev_check(act->dev))
942 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
946 mall_tc_entry->cookie = cls->cookie;
947 mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
948 mirror = &mall_tc_entry->mirror;
950 to_dp = dsa_slave_to_port(act->dev);
952 mirror->to_local_port = to_dp->index;
953 mirror->ingress = ingress;
955 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
957 kfree(mall_tc_entry);
961 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
967 dsa_slave_add_cls_matchall_police(struct net_device *dev,
968 struct tc_cls_matchall_offload *cls,
971 struct netlink_ext_ack *extack = cls->common.extack;
972 struct dsa_port *dp = dsa_slave_to_port(dev);
973 struct dsa_slave_priv *p = netdev_priv(dev);
974 struct dsa_mall_policer_tc_entry *policer;
975 struct dsa_mall_tc_entry *mall_tc_entry;
976 struct dsa_switch *ds = dp->ds;
977 struct flow_action_entry *act;
980 if (!ds->ops->port_policer_add) {
981 NL_SET_ERR_MSG_MOD(extack,
982 "Policing offload not implemented");
987 NL_SET_ERR_MSG_MOD(extack,
988 "Only supported on ingress qdisc");
992 if (!flow_action_basic_hw_stats_check(&cls->rule->action,
996 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
997 if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
998 NL_SET_ERR_MSG_MOD(extack,
999 "Only one port policer allowed");
1004 act = &cls->rule->action.entries[0];
1006 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1010 mall_tc_entry->cookie = cls->cookie;
1011 mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1012 policer = &mall_tc_entry->policer;
1013 policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1014 policer->burst = act->police.burst;
1016 err = ds->ops->port_policer_add(ds, dp->index, policer);
1018 kfree(mall_tc_entry);
1022 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1027 static int dsa_slave_add_cls_matchall(struct net_device *dev,
1028 struct tc_cls_matchall_offload *cls,
1031 int err = -EOPNOTSUPP;
1033 if (cls->common.protocol == htons(ETH_P_ALL) &&
1034 flow_offload_has_one_action(&cls->rule->action) &&
1035 cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1036 err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
1037 else if (flow_offload_has_one_action(&cls->rule->action) &&
1038 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1039 err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
1044 static void dsa_slave_del_cls_matchall(struct net_device *dev,
1045 struct tc_cls_matchall_offload *cls)
1047 struct dsa_port *dp = dsa_slave_to_port(dev);
1048 struct dsa_mall_tc_entry *mall_tc_entry;
1049 struct dsa_switch *ds = dp->ds;
1051 mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
1055 list_del(&mall_tc_entry->list);
1057 switch (mall_tc_entry->type) {
1058 case DSA_PORT_MALL_MIRROR:
1059 if (ds->ops->port_mirror_del)
1060 ds->ops->port_mirror_del(ds, dp->index,
1061 &mall_tc_entry->mirror);
1063 case DSA_PORT_MALL_POLICER:
1064 if (ds->ops->port_policer_del)
1065 ds->ops->port_policer_del(ds, dp->index);
1071 kfree(mall_tc_entry);
1074 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
1075 struct tc_cls_matchall_offload *cls,
1078 if (cls->common.chain_index)
1081 switch (cls->command) {
1082 case TC_CLSMATCHALL_REPLACE:
1083 return dsa_slave_add_cls_matchall(dev, cls, ingress);
1084 case TC_CLSMATCHALL_DESTROY:
1085 dsa_slave_del_cls_matchall(dev, cls);
1092 static int dsa_slave_add_cls_flower(struct net_device *dev,
1093 struct flow_cls_offload *cls,
1096 struct dsa_port *dp = dsa_slave_to_port(dev);
1097 struct dsa_switch *ds = dp->ds;
1098 int port = dp->index;
1100 if (!ds->ops->cls_flower_add)
1103 return ds->ops->cls_flower_add(ds, port, cls, ingress);
1106 static int dsa_slave_del_cls_flower(struct net_device *dev,
1107 struct flow_cls_offload *cls,
1110 struct dsa_port *dp = dsa_slave_to_port(dev);
1111 struct dsa_switch *ds = dp->ds;
1112 int port = dp->index;
1114 if (!ds->ops->cls_flower_del)
1117 return ds->ops->cls_flower_del(ds, port, cls, ingress);
1120 static int dsa_slave_stats_cls_flower(struct net_device *dev,
1121 struct flow_cls_offload *cls,
1124 struct dsa_port *dp = dsa_slave_to_port(dev);
1125 struct dsa_switch *ds = dp->ds;
1126 int port = dp->index;
1128 if (!ds->ops->cls_flower_stats)
1131 return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1134 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1135 struct flow_cls_offload *cls,
1138 switch (cls->command) {
1139 case FLOW_CLS_REPLACE:
1140 return dsa_slave_add_cls_flower(dev, cls, ingress);
1141 case FLOW_CLS_DESTROY:
1142 return dsa_slave_del_cls_flower(dev, cls, ingress);
1143 case FLOW_CLS_STATS:
1144 return dsa_slave_stats_cls_flower(dev, cls, ingress);
1150 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1151 void *cb_priv, bool ingress)
1153 struct net_device *dev = cb_priv;
1155 if (!tc_can_offload(dev))
1159 case TC_SETUP_CLSMATCHALL:
1160 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1161 case TC_SETUP_CLSFLOWER:
1162 return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1168 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1169 void *type_data, void *cb_priv)
1171 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1174 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1175 void *type_data, void *cb_priv)
1177 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1180 static LIST_HEAD(dsa_slave_block_cb_list);
1182 static int dsa_slave_setup_tc_block(struct net_device *dev,
1183 struct flow_block_offload *f)
1185 struct flow_block_cb *block_cb;
1186 flow_setup_cb_t *cb;
1188 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1189 cb = dsa_slave_setup_tc_block_cb_ig;
1190 else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1191 cb = dsa_slave_setup_tc_block_cb_eg;
1195 f->driver_block_list = &dsa_slave_block_cb_list;
1197 switch (f->command) {
1198 case FLOW_BLOCK_BIND:
1199 if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1202 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1203 if (IS_ERR(block_cb))
1204 return PTR_ERR(block_cb);
1206 flow_block_cb_add(block_cb, f);
1207 list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1209 case FLOW_BLOCK_UNBIND:
1210 block_cb = flow_block_cb_lookup(f->block, cb, dev);
1214 flow_block_cb_remove(block_cb, f);
1215 list_del(&block_cb->driver_list);
1222 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1225 struct dsa_port *dp = dsa_slave_to_port(dev);
1226 struct dsa_switch *ds = dp->ds;
1228 if (type == TC_SETUP_BLOCK)
1229 return dsa_slave_setup_tc_block(dev, type_data);
1231 if (!ds->ops->port_setup_tc)
1234 return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1237 static int dsa_slave_get_rxnfc(struct net_device *dev,
1238 struct ethtool_rxnfc *nfc, u32 *rule_locs)
1240 struct dsa_port *dp = dsa_slave_to_port(dev);
1241 struct dsa_switch *ds = dp->ds;
1243 if (!ds->ops->get_rxnfc)
1246 return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1249 static int dsa_slave_set_rxnfc(struct net_device *dev,
1250 struct ethtool_rxnfc *nfc)
1252 struct dsa_port *dp = dsa_slave_to_port(dev);
1253 struct dsa_switch *ds = dp->ds;
1255 if (!ds->ops->set_rxnfc)
1258 return ds->ops->set_rxnfc(ds, dp->index, nfc);
1261 static int dsa_slave_get_ts_info(struct net_device *dev,
1262 struct ethtool_ts_info *ts)
1264 struct dsa_slave_priv *p = netdev_priv(dev);
1265 struct dsa_switch *ds = p->dp->ds;
1267 if (!ds->ops->get_ts_info)
1270 return ds->ops->get_ts_info(ds, p->dp->index, ts);
1273 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1276 struct net_device *master = dsa_slave_to_master(dev);
1277 struct dsa_port *dp = dsa_slave_to_port(dev);
1278 struct switchdev_obj_port_vlan vlan = {
1279 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1281 /* This API only allows programming tagged, non-PVID VIDs */
1287 ret = dsa_port_vlan_add(dp, &vlan);
1291 /* And CPU port... */
1292 ret = dsa_port_vlan_add(dp->cpu_dp, &vlan);
1296 return vlan_vid_add(master, proto, vid);
1299 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1302 struct net_device *master = dsa_slave_to_master(dev);
1303 struct dsa_port *dp = dsa_slave_to_port(dev);
1304 struct switchdev_obj_port_vlan vlan = {
1306 /* This API only allows programming tagged, non-PVID VIDs */
1311 /* Do not deprogram the CPU port as it may be shared with other user
1312 * ports which can be members of this VLAN as well.
1314 err = dsa_port_vlan_del(dp, &vlan);
1318 vlan_vid_del(master, proto, vid);
1323 struct dsa_hw_port {
1324 struct list_head list;
1325 struct net_device *dev;
1329 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1331 const struct dsa_hw_port *p;
1334 list_for_each_entry(p, hw_port_list, list) {
1335 if (p->dev->mtu == mtu)
1338 err = dev_set_mtu(p->dev, mtu);
1346 list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1347 if (p->dev->mtu == p->old_mtu)
1350 if (dev_set_mtu(p->dev, p->old_mtu))
1351 netdev_err(p->dev, "Failed to restore MTU\n");
1357 static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1359 struct dsa_hw_port *p, *n;
1361 list_for_each_entry_safe(p, n, hw_port_list, list)
1365 /* Make the hardware datapath to/from @dev limited to a common MTU */
1366 static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
1368 struct list_head hw_port_list;
1369 struct dsa_switch_tree *dst;
1370 int min_mtu = ETH_MAX_MTU;
1371 struct dsa_port *other_dp;
1374 if (!dp->ds->mtu_enforcement_ingress)
1377 if (!dp->bridge_dev)
1380 INIT_LIST_HEAD(&hw_port_list);
1382 /* Populate the list of ports that are part of the same bridge
1383 * as the newly added/modified port
1385 list_for_each_entry(dst, &dsa_tree_list, list) {
1386 list_for_each_entry(other_dp, &dst->ports, list) {
1387 struct dsa_hw_port *hw_port;
1388 struct net_device *slave;
1390 if (other_dp->type != DSA_PORT_TYPE_USER)
1393 if (other_dp->bridge_dev != dp->bridge_dev)
1396 if (!other_dp->ds->mtu_enforcement_ingress)
1399 slave = other_dp->slave;
1401 if (min_mtu > slave->mtu)
1402 min_mtu = slave->mtu;
1404 hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
1408 hw_port->dev = slave;
1409 hw_port->old_mtu = slave->mtu;
1411 list_add(&hw_port->list, &hw_port_list);
1415 /* Attempt to configure the entire hardware bridge to the newly added
1416 * interface's MTU first, regardless of whether the intention of the
1417 * user was to raise or lower it.
1419 err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
1423 /* Clearly that didn't work out so well, so just set the minimum MTU on
1424 * all hardware bridge ports now. If this fails too, then all ports will
1425 * still have their old MTU rolled back anyway.
1427 dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
1430 dsa_hw_port_list_free(&hw_port_list);
1433 static int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
1435 struct net_device *master = dsa_slave_to_master(dev);
1436 struct dsa_port *dp = dsa_slave_to_port(dev);
1437 struct dsa_slave_priv *p = netdev_priv(dev);
1438 struct dsa_switch *ds = p->dp->ds;
1439 struct dsa_port *cpu_dp;
1440 int port = p->dp->index;
1441 int largest_mtu = 0;
1448 if (!ds->ops->port_change_mtu)
1451 for (i = 0; i < ds->num_ports; i++) {
1454 if (!dsa_is_user_port(ds, i))
1457 /* During probe, this function will be called for each slave
1458 * device, while not all of them have been allocated. That's
1459 * ok, it doesn't change what the maximum is, so ignore it.
1461 if (!dsa_to_port(ds, i)->slave)
1464 /* Pretend that we already applied the setting, which we
1465 * actually haven't (still haven't done all integrity checks)
1468 slave_mtu = new_mtu;
1470 slave_mtu = dsa_to_port(ds, i)->slave->mtu;
1472 if (largest_mtu < slave_mtu)
1473 largest_mtu = slave_mtu;
1476 cpu_dp = dsa_to_port(ds, port)->cpu_dp;
1478 mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
1479 old_master_mtu = master->mtu;
1480 new_master_mtu = largest_mtu + cpu_dp->tag_ops->overhead;
1481 if (new_master_mtu > mtu_limit)
1484 /* If the master MTU isn't over limit, there's no need to check the CPU
1485 * MTU, since that surely isn't either.
1487 cpu_mtu = largest_mtu;
1489 /* Start applying stuff */
1490 if (new_master_mtu != old_master_mtu) {
1491 err = dev_set_mtu(master, new_master_mtu);
1493 goto out_master_failed;
1495 /* We only need to propagate the MTU of the CPU port to
1496 * upstream switches.
1498 err = dsa_port_mtu_change(cpu_dp, cpu_mtu, true);
1500 goto out_cpu_failed;
1503 err = dsa_port_mtu_change(dp, new_mtu, false);
1505 goto out_port_failed;
1509 dsa_bridge_mtu_normalization(dp);
1514 if (new_master_mtu != old_master_mtu)
1515 dsa_port_mtu_change(cpu_dp, old_master_mtu -
1516 cpu_dp->tag_ops->overhead,
1519 if (new_master_mtu != old_master_mtu)
1520 dev_set_mtu(master, old_master_mtu);
1525 static const struct ethtool_ops dsa_slave_ethtool_ops = {
1526 .get_drvinfo = dsa_slave_get_drvinfo,
1527 .get_regs_len = dsa_slave_get_regs_len,
1528 .get_regs = dsa_slave_get_regs,
1529 .nway_reset = dsa_slave_nway_reset,
1530 .get_link = ethtool_op_get_link,
1531 .get_eeprom_len = dsa_slave_get_eeprom_len,
1532 .get_eeprom = dsa_slave_get_eeprom,
1533 .set_eeprom = dsa_slave_set_eeprom,
1534 .get_strings = dsa_slave_get_strings,
1535 .get_ethtool_stats = dsa_slave_get_ethtool_stats,
1536 .get_sset_count = dsa_slave_get_sset_count,
1537 .set_wol = dsa_slave_set_wol,
1538 .get_wol = dsa_slave_get_wol,
1539 .set_eee = dsa_slave_set_eee,
1540 .get_eee = dsa_slave_get_eee,
1541 .get_link_ksettings = dsa_slave_get_link_ksettings,
1542 .set_link_ksettings = dsa_slave_set_link_ksettings,
1543 .get_pauseparam = dsa_slave_get_pauseparam,
1544 .set_pauseparam = dsa_slave_set_pauseparam,
1545 .get_rxnfc = dsa_slave_get_rxnfc,
1546 .set_rxnfc = dsa_slave_set_rxnfc,
1547 .get_ts_info = dsa_slave_get_ts_info,
1550 /* legacy way, bypassing the bridge *****************************************/
1551 static int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1552 struct net_device *dev,
1553 const unsigned char *addr, u16 vid,
1555 struct netlink_ext_ack *extack)
1557 struct dsa_port *dp = dsa_slave_to_port(dev);
1559 return dsa_port_fdb_add(dp, addr, vid);
1562 static int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
1563 struct net_device *dev,
1564 const unsigned char *addr, u16 vid)
1566 struct dsa_port *dp = dsa_slave_to_port(dev);
1568 return dsa_port_fdb_del(dp, addr, vid);
1571 static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
1573 struct dsa_port *dp = dsa_slave_to_port(dev);
1575 return dp->ds->devlink ? &dp->devlink_port : NULL;
1578 static void dsa_slave_get_stats64(struct net_device *dev,
1579 struct rtnl_link_stats64 *s)
1581 struct dsa_port *dp = dsa_slave_to_port(dev);
1582 struct dsa_switch *ds = dp->ds;
1584 if (ds->ops->get_stats64)
1585 ds->ops->get_stats64(ds, dp->index, s);
1587 dev_get_tstats64(dev, s);
1590 static const struct net_device_ops dsa_slave_netdev_ops = {
1591 .ndo_open = dsa_slave_open,
1592 .ndo_stop = dsa_slave_close,
1593 .ndo_start_xmit = dsa_slave_xmit,
1594 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
1595 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
1596 .ndo_set_mac_address = dsa_slave_set_mac_address,
1597 .ndo_fdb_add = dsa_legacy_fdb_add,
1598 .ndo_fdb_del = dsa_legacy_fdb_del,
1599 .ndo_fdb_dump = dsa_slave_fdb_dump,
1600 .ndo_do_ioctl = dsa_slave_ioctl,
1601 .ndo_get_iflink = dsa_slave_get_iflink,
1602 #ifdef CONFIG_NET_POLL_CONTROLLER
1603 .ndo_netpoll_setup = dsa_slave_netpoll_setup,
1604 .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup,
1605 .ndo_poll_controller = dsa_slave_poll_controller,
1607 .ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
1608 .ndo_setup_tc = dsa_slave_setup_tc,
1609 .ndo_get_stats64 = dsa_slave_get_stats64,
1610 .ndo_get_port_parent_id = dsa_slave_get_port_parent_id,
1611 .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
1612 .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
1613 .ndo_get_devlink_port = dsa_slave_get_devlink_port,
1614 .ndo_change_mtu = dsa_slave_change_mtu,
1617 static struct device_type dsa_type = {
1621 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
1623 const struct dsa_port *dp = dsa_to_port(ds, port);
1626 phylink_mac_change(dp->pl, up);
1628 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
1630 static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
1631 struct phylink_link_state *state)
1633 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1634 struct dsa_switch *ds = dp->ds;
1636 /* No need to check that this operation is valid, the callback would
1637 * not be called if it was not.
1639 ds->ops->phylink_fixed_state(ds, dp->index, state);
1642 /* slave device setup *******************************************************/
1643 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr)
1645 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1646 struct dsa_switch *ds = dp->ds;
1648 slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
1649 if (!slave_dev->phydev) {
1650 netdev_err(slave_dev, "no phy at %d\n", addr);
1654 return phylink_connect_phy(dp->pl, slave_dev->phydev);
1657 static int dsa_slave_phy_setup(struct net_device *slave_dev)
1659 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1660 struct device_node *port_dn = dp->dn;
1661 struct dsa_switch *ds = dp->ds;
1662 phy_interface_t mode;
1666 ret = of_get_phy_mode(port_dn, &mode);
1668 mode = PHY_INTERFACE_MODE_NA;
1670 dp->pl_config.dev = &slave_dev->dev;
1671 dp->pl_config.type = PHYLINK_NETDEV;
1673 /* The get_fixed_state callback takes precedence over polling the
1674 * link GPIO in PHYLINK (see phylink_get_fixed_state). Only set
1675 * this if the switch provides such a callback.
1677 if (ds->ops->phylink_fixed_state) {
1678 dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
1679 dp->pl_config.poll_fixed_state = true;
1682 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode,
1683 &dsa_port_phylink_mac_ops);
1684 if (IS_ERR(dp->pl)) {
1685 netdev_err(slave_dev,
1686 "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1687 return PTR_ERR(dp->pl);
1690 if (ds->ops->get_phy_flags)
1691 phy_flags = ds->ops->get_phy_flags(ds, dp->index);
1693 ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
1694 if (ret == -ENODEV && ds->slave_mii_bus) {
1695 /* We could not connect to a designated PHY or SFP, so try to
1696 * use the switch internal MDIO bus instead
1698 ret = dsa_slave_phy_connect(slave_dev, dp->index);
1700 netdev_err(slave_dev,
1701 "failed to connect to port %d: %d\n",
1703 phylink_destroy(dp->pl);
1711 static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
1712 static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
1713 struct netdev_queue *txq,
1716 lockdep_set_class(&txq->_xmit_lock,
1717 &dsa_slave_netdev_xmit_lock_key);
1720 int dsa_slave_suspend(struct net_device *slave_dev)
1722 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1724 if (!netif_running(slave_dev))
1727 netif_device_detach(slave_dev);
1730 phylink_stop(dp->pl);
1736 int dsa_slave_resume(struct net_device *slave_dev)
1738 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1740 if (!netif_running(slave_dev))
1743 netif_device_attach(slave_dev);
1746 phylink_start(dp->pl);
1752 int dsa_slave_create(struct dsa_port *port)
1754 const struct dsa_port *cpu_dp = port->cpu_dp;
1755 struct net_device *master = cpu_dp->master;
1756 struct dsa_switch *ds = port->ds;
1757 const char *name = port->name;
1758 struct net_device *slave_dev;
1759 struct dsa_slave_priv *p;
1762 if (!ds->num_tx_queues)
1763 ds->num_tx_queues = 1;
1765 slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
1766 NET_NAME_UNKNOWN, ether_setup,
1767 ds->num_tx_queues, 1);
1768 if (slave_dev == NULL)
1771 slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
1772 if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
1773 slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1774 slave_dev->hw_features |= NETIF_F_HW_TC;
1775 slave_dev->features |= NETIF_F_LLTX;
1776 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
1777 if (!IS_ERR_OR_NULL(port->mac))
1778 ether_addr_copy(slave_dev->dev_addr, port->mac);
1780 eth_hw_addr_inherit(slave_dev, master);
1781 slave_dev->priv_flags |= IFF_NO_QUEUE;
1782 slave_dev->netdev_ops = &dsa_slave_netdev_ops;
1783 if (ds->ops->port_max_mtu)
1784 slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
1785 if (cpu_dp->tag_ops->tail_tag)
1786 slave_dev->needed_tailroom = cpu_dp->tag_ops->overhead;
1788 slave_dev->needed_headroom = cpu_dp->tag_ops->overhead;
1789 /* Try to save one extra realloc later in the TX path (in the master)
1790 * by also inheriting the master's needed headroom and tailroom.
1791 * The 8021q driver also does this.
1793 slave_dev->needed_headroom += master->needed_headroom;
1794 slave_dev->needed_tailroom += master->needed_tailroom;
1795 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1797 netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
1800 SET_NETDEV_DEV(slave_dev, port->ds->dev);
1801 slave_dev->dev.of_node = port->dn;
1802 slave_dev->vlan_features = master->vlan_features;
1804 p = netdev_priv(slave_dev);
1805 slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1806 if (!slave_dev->tstats) {
1807 free_netdev(slave_dev);
1811 ret = gro_cells_init(&p->gcells, slave_dev);
1816 INIT_LIST_HEAD(&p->mall_tc_list);
1817 p->xmit = cpu_dp->tag_ops->xmit;
1818 port->slave = slave_dev;
1821 ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
1823 if (ret && ret != -EOPNOTSUPP)
1824 dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
1825 ret, ETH_DATA_LEN, port->index);
1827 netif_carrier_off(slave_dev);
1829 ret = dsa_slave_phy_setup(slave_dev);
1831 netdev_err(slave_dev,
1832 "error %d setting up PHY for tree %d, switch %d, port %d\n",
1833 ret, ds->dst->index, ds->index, port->index);
1839 ret = register_netdevice(slave_dev);
1841 netdev_err(master, "error %d registering interface %s\n",
1842 ret, slave_dev->name);
1847 ret = netdev_upper_dev_link(master, slave_dev, NULL);
1852 goto out_unregister;
1857 unregister_netdev(slave_dev);
1860 phylink_disconnect_phy(p->dp->pl);
1862 phylink_destroy(p->dp->pl);
1864 gro_cells_destroy(&p->gcells);
1866 free_percpu(slave_dev->tstats);
1867 free_netdev(slave_dev);
1872 void dsa_slave_destroy(struct net_device *slave_dev)
1874 struct net_device *master = dsa_slave_to_master(slave_dev);
1875 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1876 struct dsa_slave_priv *p = netdev_priv(slave_dev);
1878 netif_carrier_off(slave_dev);
1880 netdev_upper_dev_unlink(master, slave_dev);
1881 unregister_netdevice(slave_dev);
1882 phylink_disconnect_phy(dp->pl);
1885 phylink_destroy(dp->pl);
1886 gro_cells_destroy(&p->gcells);
1887 free_percpu(slave_dev->tstats);
1888 free_netdev(slave_dev);
1891 bool dsa_slave_dev_check(const struct net_device *dev)
1893 return dev->netdev_ops == &dsa_slave_netdev_ops;
1895 EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
1897 static int dsa_slave_changeupper(struct net_device *dev,
1898 struct netdev_notifier_changeupper_info *info)
1900 struct dsa_port *dp = dsa_slave_to_port(dev);
1901 int err = NOTIFY_DONE;
1903 if (netif_is_bridge_master(info->upper_dev)) {
1904 if (info->linking) {
1905 err = dsa_port_bridge_join(dp, info->upper_dev);
1907 dsa_bridge_mtu_normalization(dp);
1908 err = notifier_from_errno(err);
1910 dsa_port_bridge_leave(dp, info->upper_dev);
1913 } else if (netif_is_lag_master(info->upper_dev)) {
1914 if (info->linking) {
1915 err = dsa_port_lag_join(dp, info->upper_dev,
1917 if (err == -EOPNOTSUPP) {
1918 NL_SET_ERR_MSG_MOD(info->info.extack,
1919 "Offloading not supported");
1922 err = notifier_from_errno(err);
1924 dsa_port_lag_leave(dp, info->upper_dev);
1933 dsa_slave_lag_changeupper(struct net_device *dev,
1934 struct netdev_notifier_changeupper_info *info)
1936 struct net_device *lower;
1937 struct list_head *iter;
1938 int err = NOTIFY_DONE;
1939 struct dsa_port *dp;
1941 netdev_for_each_lower_dev(dev, lower, iter) {
1942 if (!dsa_slave_dev_check(lower))
1945 dp = dsa_slave_to_port(lower);
1950 err = dsa_slave_changeupper(lower, info);
1951 if (notifier_to_errno(err))
1959 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
1960 struct netdev_notifier_changeupper_info *info)
1962 struct netlink_ext_ack *ext_ack;
1963 struct net_device *slave;
1964 struct dsa_port *dp;
1966 ext_ack = netdev_notifier_info_to_extack(&info->info);
1968 if (!is_vlan_dev(dev))
1971 slave = vlan_dev_real_dev(dev);
1972 if (!dsa_slave_dev_check(slave))
1975 dp = dsa_slave_to_port(slave);
1976 if (!dp->bridge_dev)
1979 /* Deny enslaving a VLAN device into a VLAN-aware bridge */
1980 if (br_vlan_enabled(dp->bridge_dev) &&
1981 netif_is_bridge_master(info->upper_dev) && info->linking) {
1982 NL_SET_ERR_MSG_MOD(ext_ack,
1983 "Cannot enslave VLAN device into VLAN aware bridge");
1984 return notifier_from_errno(-EINVAL);
1991 dsa_slave_check_8021q_upper(struct net_device *dev,
1992 struct netdev_notifier_changeupper_info *info)
1994 struct dsa_port *dp = dsa_slave_to_port(dev);
1995 struct net_device *br = dp->bridge_dev;
1996 struct bridge_vlan_info br_info;
1997 struct netlink_ext_ack *extack;
1998 int err = NOTIFY_DONE;
2001 if (!br || !br_vlan_enabled(br))
2004 extack = netdev_notifier_info_to_extack(&info->info);
2005 vid = vlan_dev_vlan_id(info->upper_dev);
2007 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
2008 * device, respectively the VID is not found, returning
2009 * 0 means success, which is a failure for us here.
2011 err = br_vlan_get_info(br, vid, &br_info);
2013 NL_SET_ERR_MSG_MOD(extack,
2014 "This VLAN is already configured by the bridge");
2015 return notifier_from_errno(-EBUSY);
2021 static int dsa_slave_netdevice_event(struct notifier_block *nb,
2022 unsigned long event, void *ptr)
2024 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2027 case NETDEV_PRECHANGEUPPER: {
2028 struct netdev_notifier_changeupper_info *info = ptr;
2029 struct dsa_switch *ds;
2030 struct dsa_port *dp;
2033 if (!dsa_slave_dev_check(dev))
2034 return dsa_prevent_bridging_8021q_upper(dev, ptr);
2036 dp = dsa_slave_to_port(dev);
2039 if (ds->ops->port_prechangeupper) {
2040 err = ds->ops->port_prechangeupper(ds, dp->index, info);
2042 return notifier_from_errno(err);
2045 if (is_vlan_dev(info->upper_dev))
2046 return dsa_slave_check_8021q_upper(dev, ptr);
2049 case NETDEV_CHANGEUPPER:
2050 if (dsa_slave_dev_check(dev))
2051 return dsa_slave_changeupper(dev, ptr);
2053 if (netif_is_lag_master(dev))
2054 return dsa_slave_lag_changeupper(dev, ptr);
2057 case NETDEV_CHANGELOWERSTATE: {
2058 struct netdev_notifier_changelowerstate_info *info = ptr;
2059 struct dsa_port *dp;
2062 if (!dsa_slave_dev_check(dev))
2065 dp = dsa_slave_to_port(dev);
2067 err = dsa_port_lag_change(dp, info->lower_state_info);
2068 return notifier_from_errno(err);
2076 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
2078 struct dsa_switch *ds = switchdev_work->ds;
2079 struct switchdev_notifier_fdb_info info;
2080 struct dsa_port *dp;
2082 if (!dsa_is_user_port(ds, switchdev_work->port))
2085 info.addr = switchdev_work->addr;
2086 info.vid = switchdev_work->vid;
2087 info.offloaded = true;
2088 dp = dsa_to_port(ds, switchdev_work->port);
2089 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
2090 dp->slave, &info.info, NULL);
2093 static void dsa_slave_switchdev_event_work(struct work_struct *work)
2095 struct dsa_switchdev_event_work *switchdev_work =
2096 container_of(work, struct dsa_switchdev_event_work, work);
2097 struct dsa_switch *ds = switchdev_work->ds;
2098 struct dsa_port *dp;
2101 dp = dsa_to_port(ds, switchdev_work->port);
2104 switch (switchdev_work->event) {
2105 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2106 err = dsa_port_fdb_add(dp, switchdev_work->addr,
2107 switchdev_work->vid);
2110 "port %d failed to add %pM vid %d to fdb: %d\n",
2111 dp->index, switchdev_work->addr,
2112 switchdev_work->vid, err);
2115 dsa_fdb_offload_notify(switchdev_work);
2118 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2119 err = dsa_port_fdb_del(dp, switchdev_work->addr,
2120 switchdev_work->vid);
2123 "port %d failed to delete %pM vid %d from fdb: %d\n",
2124 dp->index, switchdev_work->addr,
2125 switchdev_work->vid, err);
2132 kfree(switchdev_work);
2133 if (dsa_is_user_port(ds, dp->index))
2137 static int dsa_lower_dev_walk(struct net_device *lower_dev,
2138 struct netdev_nested_priv *priv)
2140 if (dsa_slave_dev_check(lower_dev)) {
2141 priv->data = (void *)netdev_priv(lower_dev);
2148 static struct dsa_slave_priv *dsa_slave_dev_lower_find(struct net_device *dev)
2150 struct netdev_nested_priv priv = {
2154 netdev_walk_all_lower_dev_rcu(dev, dsa_lower_dev_walk, &priv);
2156 return (struct dsa_slave_priv *)priv.data;
2159 /* Called under rcu_read_lock() */
2160 static int dsa_slave_switchdev_event(struct notifier_block *unused,
2161 unsigned long event, void *ptr)
2163 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2164 const struct switchdev_notifier_fdb_info *fdb_info;
2165 struct dsa_switchdev_event_work *switchdev_work;
2166 struct dsa_port *dp;
2170 case SWITCHDEV_PORT_ATTR_SET:
2171 err = switchdev_handle_port_attr_set(dev, ptr,
2172 dsa_slave_dev_check,
2173 dsa_slave_port_attr_set);
2174 return notifier_from_errno(err);
2175 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2176 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2179 if (dsa_slave_dev_check(dev)) {
2180 if (!fdb_info->added_by_user)
2183 dp = dsa_slave_to_port(dev);
2185 /* Snoop addresses learnt on foreign interfaces
2186 * bridged with us, for switches that don't
2187 * automatically learn SA from CPU-injected traffic
2189 struct net_device *br_dev;
2190 struct dsa_slave_priv *p;
2192 br_dev = netdev_master_upper_dev_get_rcu(dev);
2196 if (!netif_is_bridge_master(br_dev))
2199 p = dsa_slave_dev_lower_find(br_dev);
2205 if (!dp->ds->assisted_learning_on_cpu_port)
2209 if (!dp->ds->ops->port_fdb_add || !dp->ds->ops->port_fdb_del)
2212 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2213 if (!switchdev_work)
2216 INIT_WORK(&switchdev_work->work,
2217 dsa_slave_switchdev_event_work);
2218 switchdev_work->ds = dp->ds;
2219 switchdev_work->port = dp->index;
2220 switchdev_work->event = event;
2222 ether_addr_copy(switchdev_work->addr,
2224 switchdev_work->vid = fdb_info->vid;
2226 /* Hold a reference on the slave for dsa_fdb_offload_notify */
2227 if (dsa_is_user_port(dp->ds, dp->index))
2229 dsa_schedule_work(&switchdev_work->work);
2238 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
2239 unsigned long event, void *ptr)
2241 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2245 case SWITCHDEV_PORT_OBJ_ADD:
2246 err = switchdev_handle_port_obj_add(dev, ptr,
2247 dsa_slave_dev_check,
2248 dsa_slave_port_obj_add);
2249 return notifier_from_errno(err);
2250 case SWITCHDEV_PORT_OBJ_DEL:
2251 err = switchdev_handle_port_obj_del(dev, ptr,
2252 dsa_slave_dev_check,
2253 dsa_slave_port_obj_del);
2254 return notifier_from_errno(err);
2255 case SWITCHDEV_PORT_ATTR_SET:
2256 err = switchdev_handle_port_attr_set(dev, ptr,
2257 dsa_slave_dev_check,
2258 dsa_slave_port_attr_set);
2259 return notifier_from_errno(err);
2265 static struct notifier_block dsa_slave_nb __read_mostly = {
2266 .notifier_call = dsa_slave_netdevice_event,
2269 static struct notifier_block dsa_slave_switchdev_notifier = {
2270 .notifier_call = dsa_slave_switchdev_event,
2273 static struct notifier_block dsa_slave_switchdev_blocking_notifier = {
2274 .notifier_call = dsa_slave_switchdev_blocking_event,
2277 int dsa_slave_register_notifier(void)
2279 struct notifier_block *nb;
2282 err = register_netdevice_notifier(&dsa_slave_nb);
2286 err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
2288 goto err_switchdev_nb;
2290 nb = &dsa_slave_switchdev_blocking_notifier;
2291 err = register_switchdev_blocking_notifier(nb);
2293 goto err_switchdev_blocking_nb;
2297 err_switchdev_blocking_nb:
2298 unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2300 unregister_netdevice_notifier(&dsa_slave_nb);
2304 void dsa_slave_unregister_notifier(void)
2306 struct notifier_block *nb;
2309 nb = &dsa_slave_switchdev_blocking_notifier;
2310 err = unregister_switchdev_blocking_notifier(nb);
2312 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
2314 err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2316 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
2318 err = unregister_netdevice_notifier(&dsa_slave_nb);
2320 pr_err("DSA: failed to unregister slave notifier (%d)\n", err);