1 // SPDX-License-Identifier: GPL-2.0
3 * DPAA2 Ethernet Switch driver
5 * Copyright 2014-2016 Freescale Semiconductor Inc.
6 * Copyright 2017-2018 NXP
10 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/msi.h>
14 #include <linux/kthread.h>
15 #include <linux/workqueue.h>
17 #include <linux/fsl/mc.h>
21 /* Minimal supported DPSW version */
22 #define DPSW_MIN_VER_MAJOR 8
23 #define DPSW_MIN_VER_MINOR 1
25 #define DEFAULT_VLAN_ID 1
27 static int ethsw_add_vlan(struct ethsw_core *ethsw, u16 vid)
31 struct dpsw_vlan_cfg vcfg = {
35 err = dpsw_vlan_add(ethsw->mc_io, 0,
36 ethsw->dpsw_handle, vid, &vcfg);
38 dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
41 ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
46 static int ethsw_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
48 struct ethsw_core *ethsw = port_priv->ethsw_data;
49 struct net_device *netdev = port_priv->netdev;
50 struct dpsw_tci_cfg tci_cfg = { 0 };
54 err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
55 port_priv->idx, &tci_cfg);
57 netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
61 tci_cfg.vlan_id = pvid;
63 /* Interface needs to be down to change PVID */
64 is_oper = netif_oper_up(netdev);
66 err = dpsw_if_disable(ethsw->mc_io, 0,
70 netdev_err(netdev, "dpsw_if_disable err %d\n", err);
75 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
76 port_priv->idx, &tci_cfg);
78 netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
82 /* Delete previous PVID info and mark the new one */
83 port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
84 port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
85 port_priv->pvid = pvid;
89 ret = dpsw_if_enable(ethsw->mc_io, 0,
93 netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
101 static int ethsw_port_add_vlan(struct ethsw_port_priv *port_priv,
104 struct ethsw_core *ethsw = port_priv->ethsw_data;
105 struct net_device *netdev = port_priv->netdev;
106 struct dpsw_vlan_if_cfg vcfg;
109 if (port_priv->vlans[vid]) {
110 netdev_warn(netdev, "VLAN %d already configured\n", vid);
115 vcfg.if_id[0] = port_priv->idx;
116 err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
118 netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
122 port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
124 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
125 err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
130 "dpsw_vlan_add_if_untagged err %d\n", err);
133 port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
136 if (flags & BRIDGE_VLAN_INFO_PVID) {
137 err = ethsw_port_set_pvid(port_priv, vid);
145 static int ethsw_set_learning(struct ethsw_core *ethsw, bool enable)
147 enum dpsw_fdb_learning_mode learn_mode;
151 learn_mode = DPSW_FDB_LEARNING_MODE_HW;
153 learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
155 err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
158 dev_err(ethsw->dev, "dpsw_fdb_set_learning_mode err %d\n", err);
161 ethsw->learning = enable;
166 static int ethsw_port_set_flood(struct ethsw_port_priv *port_priv, bool enable)
170 err = dpsw_if_set_flooding(port_priv->ethsw_data->mc_io, 0,
171 port_priv->ethsw_data->dpsw_handle,
172 port_priv->idx, enable);
174 netdev_err(port_priv->netdev,
175 "dpsw_if_set_flooding err %d\n", err);
178 port_priv->flood = enable;
183 static int ethsw_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
185 struct dpsw_stp_cfg stp_cfg = {
186 .vlan_id = DEFAULT_VLAN_ID,
191 if (!netif_oper_up(port_priv->netdev) || state == port_priv->stp_state)
192 return 0; /* Nothing to do */
194 err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
195 port_priv->ethsw_data->dpsw_handle,
196 port_priv->idx, &stp_cfg);
198 netdev_err(port_priv->netdev,
199 "dpsw_if_set_stp err %d\n", err);
203 port_priv->stp_state = state;
208 static int ethsw_dellink_switch(struct ethsw_core *ethsw, u16 vid)
210 struct ethsw_port_priv *ppriv_local = NULL;
213 if (!ethsw->vlans[vid])
216 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
218 dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
221 ethsw->vlans[vid] = 0;
223 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
224 ppriv_local = ethsw->ports[i];
225 ppriv_local->vlans[vid] = 0;
231 static int ethsw_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
232 const unsigned char *addr)
234 struct dpsw_fdb_unicast_cfg entry = {0};
237 entry.if_egress = port_priv->idx;
238 entry.type = DPSW_FDB_ENTRY_STATIC;
239 ether_addr_copy(entry.mac_addr, addr);
241 err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
242 port_priv->ethsw_data->dpsw_handle,
245 netdev_err(port_priv->netdev,
246 "dpsw_fdb_add_unicast err %d\n", err);
250 static int ethsw_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
251 const unsigned char *addr)
253 struct dpsw_fdb_unicast_cfg entry = {0};
256 entry.if_egress = port_priv->idx;
257 entry.type = DPSW_FDB_ENTRY_STATIC;
258 ether_addr_copy(entry.mac_addr, addr);
260 err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
261 port_priv->ethsw_data->dpsw_handle,
263 /* Silently discard error for calling multiple times the del command */
264 if (err && err != -ENXIO)
265 netdev_err(port_priv->netdev,
266 "dpsw_fdb_remove_unicast err %d\n", err);
270 static int ethsw_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
271 const unsigned char *addr)
273 struct dpsw_fdb_multicast_cfg entry = {0};
276 ether_addr_copy(entry.mac_addr, addr);
277 entry.type = DPSW_FDB_ENTRY_STATIC;
279 entry.if_id[0] = port_priv->idx;
281 err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
282 port_priv->ethsw_data->dpsw_handle,
284 /* Silently discard error for calling multiple times the add command */
285 if (err && err != -ENXIO)
286 netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
291 static int ethsw_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
292 const unsigned char *addr)
294 struct dpsw_fdb_multicast_cfg entry = {0};
297 ether_addr_copy(entry.mac_addr, addr);
298 entry.type = DPSW_FDB_ENTRY_STATIC;
300 entry.if_id[0] = port_priv->idx;
302 err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
303 port_priv->ethsw_data->dpsw_handle,
305 /* Silently discard error for calling multiple times the del command */
306 if (err && err != -ENAVAIL)
307 netdev_err(port_priv->netdev,
308 "dpsw_fdb_remove_multicast err %d\n", err);
312 static int port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
313 struct net_device *dev, const unsigned char *addr,
315 struct netlink_ext_ack *extack)
317 if (is_unicast_ether_addr(addr))
318 return ethsw_port_fdb_add_uc(netdev_priv(dev),
321 return ethsw_port_fdb_add_mc(netdev_priv(dev),
325 static int port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
326 struct net_device *dev,
327 const unsigned char *addr, u16 vid)
329 if (is_unicast_ether_addr(addr))
330 return ethsw_port_fdb_del_uc(netdev_priv(dev),
333 return ethsw_port_fdb_del_mc(netdev_priv(dev),
337 static void port_get_stats(struct net_device *netdev,
338 struct rtnl_link_stats64 *stats)
340 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
344 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
345 port_priv->ethsw_data->dpsw_handle,
347 DPSW_CNT_ING_FRAME, &stats->rx_packets);
351 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
352 port_priv->ethsw_data->dpsw_handle,
354 DPSW_CNT_EGR_FRAME, &stats->tx_packets);
358 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
359 port_priv->ethsw_data->dpsw_handle,
361 DPSW_CNT_ING_BYTE, &stats->rx_bytes);
365 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
366 port_priv->ethsw_data->dpsw_handle,
368 DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
372 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
373 port_priv->ethsw_data->dpsw_handle,
375 DPSW_CNT_ING_FRAME_DISCARD,
380 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
381 port_priv->ethsw_data->dpsw_handle,
383 DPSW_CNT_ING_FLTR_FRAME,
387 stats->rx_dropped += tmp;
389 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
390 port_priv->ethsw_data->dpsw_handle,
392 DPSW_CNT_EGR_FRAME_DISCARD,
400 netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
403 static bool port_has_offload_stats(const struct net_device *netdev,
406 return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
409 static int port_get_offload_stats(int attr_id,
410 const struct net_device *netdev,
414 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
415 port_get_stats((struct net_device *)netdev, sp);
422 static int port_change_mtu(struct net_device *netdev, int mtu)
424 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
427 err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
429 port_priv->ethsw_data->dpsw_handle,
431 (u16)ETHSW_L2_MAX_FRM(mtu));
434 "dpsw_if_set_max_frame_length() err %d\n", err);
442 static int port_carrier_state_sync(struct net_device *netdev)
444 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
445 struct dpsw_link_state state;
448 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
449 port_priv->ethsw_data->dpsw_handle,
450 port_priv->idx, &state);
452 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
456 WARN_ONCE(state.up > 1, "Garbage read into link_state");
458 if (state.up != port_priv->link_state) {
460 netif_carrier_on(netdev);
462 netif_carrier_off(netdev);
463 port_priv->link_state = state.up;
468 static int port_open(struct net_device *netdev)
470 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
473 /* No need to allow Tx as control interface is disabled */
474 netif_tx_stop_all_queues(netdev);
476 err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
477 port_priv->ethsw_data->dpsw_handle,
480 netdev_err(netdev, "dpsw_if_enable err %d\n", err);
484 /* sync carrier state */
485 err = port_carrier_state_sync(netdev);
488 "port_carrier_state_sync err %d\n", err);
489 goto err_carrier_sync;
495 dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
496 port_priv->ethsw_data->dpsw_handle,
501 static int port_stop(struct net_device *netdev)
503 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
506 err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
507 port_priv->ethsw_data->dpsw_handle,
510 netdev_err(netdev, "dpsw_if_disable err %d\n", err);
517 static netdev_tx_t port_dropframe(struct sk_buff *skb,
518 struct net_device *netdev)
520 /* we don't support I/O for now, drop the frame */
521 dev_kfree_skb_any(skb);
526 static int swdev_get_port_parent_id(struct net_device *dev,
527 struct netdev_phys_item_id *ppid)
529 struct ethsw_port_priv *port_priv = netdev_priv(dev);
532 ppid->id[0] = port_priv->ethsw_data->dev_id;
537 static int port_get_phys_name(struct net_device *netdev, char *name,
540 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
543 err = snprintf(name, len, "p%d", port_priv->idx);
550 struct ethsw_dump_ctx {
551 struct net_device *dev;
553 struct netlink_callback *cb;
557 static int ethsw_fdb_do_dump(struct fdb_dump_entry *entry,
558 struct ethsw_dump_ctx *dump)
560 int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
561 u32 portid = NETLINK_CB(dump->cb->skb).portid;
562 u32 seq = dump->cb->nlh->nlmsg_seq;
563 struct nlmsghdr *nlh;
566 if (dump->idx < dump->cb->args[2])
569 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
570 sizeof(*ndm), NLM_F_MULTI);
574 ndm = nlmsg_data(nlh);
575 ndm->ndm_family = AF_BRIDGE;
578 ndm->ndm_flags = NTF_SELF;
580 ndm->ndm_ifindex = dump->dev->ifindex;
581 ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP;
583 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr))
584 goto nla_put_failure;
586 nlmsg_end(dump->skb, nlh);
593 nlmsg_cancel(dump->skb, nlh);
597 static int port_fdb_valid_entry(struct fdb_dump_entry *entry,
598 struct ethsw_port_priv *port_priv)
600 int idx = port_priv->idx;
603 if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
604 valid = entry->if_info == port_priv->idx;
606 valid = entry->if_mask[idx / 8] & BIT(idx % 8);
611 static int port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
612 struct net_device *net_dev,
613 struct net_device *filter_dev, int *idx)
615 struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
616 struct ethsw_core *ethsw = port_priv->ethsw_data;
617 struct device *dev = net_dev->dev.parent;
618 struct fdb_dump_entry *fdb_entries;
619 struct fdb_dump_entry fdb_entry;
620 struct ethsw_dump_ctx dump = {
626 dma_addr_t fdb_dump_iova;
632 fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry);
633 dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL);
637 fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size,
639 if (dma_mapping_error(dev, fdb_dump_iova)) {
640 netdev_err(net_dev, "dma_map_single() failed\n");
645 err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
646 fdb_dump_iova, fdb_dump_size, &num_fdb_entries);
648 netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err);
652 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE);
654 fdb_entries = (struct fdb_dump_entry *)dma_mem;
655 for (i = 0; i < num_fdb_entries; i++) {
656 fdb_entry = fdb_entries[i];
658 if (!port_fdb_valid_entry(&fdb_entry, port_priv))
661 err = ethsw_fdb_do_dump(&fdb_entry, &dump);
674 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE);
680 static const struct net_device_ops ethsw_port_ops = {
681 .ndo_open = port_open,
682 .ndo_stop = port_stop,
684 .ndo_set_mac_address = eth_mac_addr,
685 .ndo_get_stats64 = port_get_stats,
686 .ndo_change_mtu = port_change_mtu,
687 .ndo_has_offload_stats = port_has_offload_stats,
688 .ndo_get_offload_stats = port_get_offload_stats,
689 .ndo_fdb_add = port_fdb_add,
690 .ndo_fdb_del = port_fdb_del,
691 .ndo_fdb_dump = port_fdb_dump,
693 .ndo_start_xmit = port_dropframe,
694 .ndo_get_port_parent_id = swdev_get_port_parent_id,
695 .ndo_get_phys_port_name = port_get_phys_name,
698 static void ethsw_links_state_update(struct ethsw_core *ethsw)
702 for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
703 port_carrier_state_sync(ethsw->ports[i]->netdev);
706 static irqreturn_t ethsw_irq0_handler_thread(int irq_num, void *arg)
708 struct device *dev = (struct device *)arg;
709 struct ethsw_core *ethsw = dev_get_drvdata(dev);
711 /* Mask the events and the if_id reserved bits to be cleared on read */
712 u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
715 err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
716 DPSW_IRQ_INDEX_IF, &status);
718 dev_err(dev, "Can't get irq status (err %d)\n", err);
720 err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
721 DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
723 dev_err(dev, "Can't clear irq status (err %d)\n", err);
727 if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
728 ethsw_links_state_update(ethsw);
734 static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
736 struct device *dev = &sw_dev->dev;
737 struct ethsw_core *ethsw = dev_get_drvdata(dev);
738 u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
739 struct fsl_mc_device_irq *irq;
742 err = fsl_mc_allocate_irqs(sw_dev);
744 dev_err(dev, "MC irqs allocation failed\n");
748 if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
753 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
754 DPSW_IRQ_INDEX_IF, 0);
756 dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
760 irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
762 err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
764 ethsw_irq0_handler_thread,
765 IRQF_NO_SUSPEND | IRQF_ONESHOT,
768 dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
772 err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
773 DPSW_IRQ_INDEX_IF, mask);
775 dev_err(dev, "dpsw_set_irq_mask(): %d\n", err);
779 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
780 DPSW_IRQ_INDEX_IF, 1);
782 dev_err(dev, "dpsw_set_irq_enable(): %d\n", err);
789 devm_free_irq(dev, irq->msi_desc->irq, dev);
791 fsl_mc_free_irqs(sw_dev);
795 static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
797 struct device *dev = &sw_dev->dev;
798 struct ethsw_core *ethsw = dev_get_drvdata(dev);
801 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
802 DPSW_IRQ_INDEX_IF, 0);
804 dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
806 fsl_mc_free_irqs(sw_dev);
809 static int port_attr_stp_state_set(struct net_device *netdev,
810 struct switchdev_trans *trans,
813 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
815 if (switchdev_trans_ph_prepare(trans))
818 return ethsw_port_set_stp_state(port_priv, state);
821 static int port_attr_br_flags_pre_set(struct net_device *netdev,
822 struct switchdev_trans *trans,
825 if (flags & ~(BR_LEARNING | BR_FLOOD))
831 static int port_attr_br_flags_set(struct net_device *netdev,
832 struct switchdev_trans *trans,
835 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
838 if (switchdev_trans_ph_prepare(trans))
841 /* Learning is enabled per switch */
842 err = ethsw_set_learning(port_priv->ethsw_data,
843 !!(flags & BR_LEARNING));
847 err = ethsw_port_set_flood(port_priv, !!(flags & BR_FLOOD));
853 static int swdev_port_attr_set(struct net_device *netdev,
854 const struct switchdev_attr *attr,
855 struct switchdev_trans *trans)
860 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
861 err = port_attr_stp_state_set(netdev, trans,
864 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
865 err = port_attr_br_flags_pre_set(netdev, trans,
866 attr->u.brport_flags);
868 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
869 err = port_attr_br_flags_set(netdev, trans,
870 attr->u.brport_flags);
872 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
873 /* VLANs are supported by default */
883 static int port_vlans_add(struct net_device *netdev,
884 const struct switchdev_obj_port_vlan *vlan,
885 struct switchdev_trans *trans)
887 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
890 if (switchdev_trans_ph_prepare(trans))
893 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
894 if (!port_priv->ethsw_data->vlans[vid]) {
895 /* this is a new VLAN */
896 err = ethsw_add_vlan(port_priv->ethsw_data, vid);
900 port_priv->ethsw_data->vlans[vid] |= ETHSW_VLAN_GLOBAL;
902 err = ethsw_port_add_vlan(port_priv, vid, vlan->flags);
910 static int port_lookup_address(struct net_device *netdev, int is_uc,
911 const unsigned char *addr)
913 struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
914 struct netdev_hw_addr *ha;
916 netif_addr_lock_bh(netdev);
917 list_for_each_entry(ha, &list->list, list) {
918 if (ether_addr_equal(ha->addr, addr)) {
919 netif_addr_unlock_bh(netdev);
923 netif_addr_unlock_bh(netdev);
927 static int port_mdb_add(struct net_device *netdev,
928 const struct switchdev_obj_port_mdb *mdb,
929 struct switchdev_trans *trans)
931 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
934 if (switchdev_trans_ph_prepare(trans))
937 /* Check if address is already set on this port */
938 if (port_lookup_address(netdev, 0, mdb->addr))
941 err = ethsw_port_fdb_add_mc(port_priv, mdb->addr);
945 err = dev_mc_add(netdev, mdb->addr);
947 netdev_err(netdev, "dev_mc_add err %d\n", err);
948 ethsw_port_fdb_del_mc(port_priv, mdb->addr);
954 static int swdev_port_obj_add(struct net_device *netdev,
955 const struct switchdev_obj *obj,
956 struct switchdev_trans *trans)
961 case SWITCHDEV_OBJ_ID_PORT_VLAN:
962 err = port_vlans_add(netdev,
963 SWITCHDEV_OBJ_PORT_VLAN(obj),
966 case SWITCHDEV_OBJ_ID_PORT_MDB:
967 err = port_mdb_add(netdev,
968 SWITCHDEV_OBJ_PORT_MDB(obj),
979 static int ethsw_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
981 struct ethsw_core *ethsw = port_priv->ethsw_data;
982 struct net_device *netdev = port_priv->netdev;
983 struct dpsw_vlan_if_cfg vcfg;
986 if (!port_priv->vlans[vid])
989 if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
990 err = ethsw_port_set_pvid(port_priv, 0);
996 vcfg.if_id[0] = port_priv->idx;
997 if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
998 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
1003 "dpsw_vlan_remove_if_untagged err %d\n",
1006 port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
1009 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
1010 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1014 "dpsw_vlan_remove_if err %d\n", err);
1017 port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
1019 /* Delete VLAN from switch if it is no longer configured on
1022 for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
1023 if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
1024 return 0; /* Found a port member in VID */
1026 ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
1028 err = ethsw_dellink_switch(ethsw, vid);
1036 static int port_vlans_del(struct net_device *netdev,
1037 const struct switchdev_obj_port_vlan *vlan)
1039 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1042 if (netif_is_bridge_master(vlan->obj.orig_dev))
1045 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1046 err = ethsw_port_del_vlan(port_priv, vid);
1054 static int port_mdb_del(struct net_device *netdev,
1055 const struct switchdev_obj_port_mdb *mdb)
1057 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1060 if (!port_lookup_address(netdev, 0, mdb->addr))
1063 err = ethsw_port_fdb_del_mc(port_priv, mdb->addr);
1067 err = dev_mc_del(netdev, mdb->addr);
1069 netdev_err(netdev, "dev_mc_del err %d\n", err);
1076 static int swdev_port_obj_del(struct net_device *netdev,
1077 const struct switchdev_obj *obj)
1082 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1083 err = port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
1085 case SWITCHDEV_OBJ_ID_PORT_MDB:
1086 err = port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
1096 ethsw_switchdev_port_attr_set_event(struct net_device *netdev,
1097 struct switchdev_notifier_port_attr_info
1102 err = swdev_port_attr_set(netdev, port_attr_info->attr,
1103 port_attr_info->trans);
1105 port_attr_info->handled = true;
1106 return notifier_from_errno(err);
1109 /* For the moment, only flood setting needs to be updated */
1110 static int port_bridge_join(struct net_device *netdev,
1111 struct net_device *upper_dev)
1113 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1114 struct ethsw_core *ethsw = port_priv->ethsw_data;
1117 for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
1118 if (ethsw->ports[i]->bridge_dev &&
1119 (ethsw->ports[i]->bridge_dev != upper_dev)) {
1121 "Only one bridge supported per DPSW object!\n");
1125 /* Enable flooding */
1126 err = ethsw_port_set_flood(port_priv, 1);
1128 port_priv->bridge_dev = upper_dev;
1133 static int port_bridge_leave(struct net_device *netdev)
1135 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1138 /* Disable flooding */
1139 err = ethsw_port_set_flood(port_priv, 0);
1141 port_priv->bridge_dev = NULL;
1146 static bool ethsw_port_dev_check(const struct net_device *netdev)
1148 return netdev->netdev_ops == ðsw_port_ops;
1151 static int port_netdevice_event(struct notifier_block *unused,
1152 unsigned long event, void *ptr)
1154 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1155 struct netdev_notifier_changeupper_info *info = ptr;
1156 struct net_device *upper_dev;
1159 if (!ethsw_port_dev_check(netdev))
1162 /* Handle just upper dev link/unlink for the moment */
1163 if (event == NETDEV_CHANGEUPPER) {
1164 upper_dev = info->upper_dev;
1165 if (netif_is_bridge_master(upper_dev)) {
1167 err = port_bridge_join(netdev, upper_dev);
1169 err = port_bridge_leave(netdev);
1173 return notifier_from_errno(err);
1176 struct ethsw_switchdev_event_work {
1177 struct work_struct work;
1178 struct switchdev_notifier_fdb_info fdb_info;
1179 struct net_device *dev;
1180 unsigned long event;
1183 static void ethsw_switchdev_event_work(struct work_struct *work)
1185 struct ethsw_switchdev_event_work *switchdev_work =
1186 container_of(work, struct ethsw_switchdev_event_work, work);
1187 struct net_device *dev = switchdev_work->dev;
1188 struct switchdev_notifier_fdb_info *fdb_info;
1192 fdb_info = &switchdev_work->fdb_info;
1194 switch (switchdev_work->event) {
1195 case SWITCHDEV_FDB_ADD_TO_DEVICE:
1196 if (!fdb_info->added_by_user)
1198 if (is_unicast_ether_addr(fdb_info->addr))
1199 err = ethsw_port_fdb_add_uc(netdev_priv(dev),
1202 err = ethsw_port_fdb_add_mc(netdev_priv(dev),
1206 fdb_info->offloaded = true;
1207 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
1208 &fdb_info->info, NULL);
1210 case SWITCHDEV_FDB_DEL_TO_DEVICE:
1211 if (!fdb_info->added_by_user)
1213 if (is_unicast_ether_addr(fdb_info->addr))
1214 ethsw_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
1216 ethsw_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
1221 kfree(switchdev_work->fdb_info.addr);
1222 kfree(switchdev_work);
1226 /* Called under rcu_read_lock() */
1227 static int port_switchdev_event(struct notifier_block *unused,
1228 unsigned long event, void *ptr)
1230 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1231 struct ethsw_port_priv *port_priv = netdev_priv(dev);
1232 struct ethsw_switchdev_event_work *switchdev_work;
1233 struct switchdev_notifier_fdb_info *fdb_info = ptr;
1234 struct ethsw_core *ethsw = port_priv->ethsw_data;
1236 if (!ethsw_port_dev_check(dev))
1239 if (event == SWITCHDEV_PORT_ATTR_SET)
1240 return ethsw_switchdev_port_attr_set_event(dev, ptr);
1242 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
1243 if (!switchdev_work)
1246 INIT_WORK(&switchdev_work->work, ethsw_switchdev_event_work);
1247 switchdev_work->dev = dev;
1248 switchdev_work->event = event;
1251 case SWITCHDEV_FDB_ADD_TO_DEVICE:
1252 case SWITCHDEV_FDB_DEL_TO_DEVICE:
1253 memcpy(&switchdev_work->fdb_info, ptr,
1254 sizeof(switchdev_work->fdb_info));
1255 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
1256 if (!switchdev_work->fdb_info.addr)
1257 goto err_addr_alloc;
1259 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
1262 /* Take a reference on the device to avoid being freed. */
1266 kfree(switchdev_work);
1270 queue_work(ethsw->workqueue, &switchdev_work->work);
1275 kfree(switchdev_work);
1280 ethsw_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
1281 struct switchdev_notifier_port_obj_info
1284 int err = -EOPNOTSUPP;
1287 case SWITCHDEV_PORT_OBJ_ADD:
1288 err = swdev_port_obj_add(netdev, port_obj_info->obj,
1289 port_obj_info->trans);
1291 case SWITCHDEV_PORT_OBJ_DEL:
1292 err = swdev_port_obj_del(netdev, port_obj_info->obj);
1296 port_obj_info->handled = true;
1297 return notifier_from_errno(err);
1300 static int port_switchdev_blocking_event(struct notifier_block *unused,
1301 unsigned long event, void *ptr)
1303 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1305 if (!ethsw_port_dev_check(dev))
1309 case SWITCHDEV_PORT_OBJ_ADD: /* fall through */
1310 case SWITCHDEV_PORT_OBJ_DEL:
1311 return ethsw_switchdev_port_obj_event(event, dev, ptr);
1312 case SWITCHDEV_PORT_ATTR_SET:
1313 return ethsw_switchdev_port_attr_set_event(dev, ptr);
1319 static int ethsw_register_notifier(struct device *dev)
1321 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1324 ethsw->port_nb.notifier_call = port_netdevice_event;
1325 err = register_netdevice_notifier(ðsw->port_nb);
1327 dev_err(dev, "Failed to register netdev notifier\n");
1331 ethsw->port_switchdev_nb.notifier_call = port_switchdev_event;
1332 err = register_switchdev_notifier(ðsw->port_switchdev_nb);
1334 dev_err(dev, "Failed to register switchdev notifier\n");
1335 goto err_switchdev_nb;
1338 ethsw->port_switchdevb_nb.notifier_call = port_switchdev_blocking_event;
1339 err = register_switchdev_blocking_notifier(ðsw->port_switchdevb_nb);
1341 dev_err(dev, "Failed to register switchdev blocking notifier\n");
1342 goto err_switchdev_blocking_nb;
1347 err_switchdev_blocking_nb:
1348 unregister_switchdev_notifier(ðsw->port_switchdev_nb);
1350 unregister_netdevice_notifier(ðsw->port_nb);
1354 static int ethsw_init(struct fsl_mc_device *sw_dev)
1356 struct device *dev = &sw_dev->dev;
1357 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1358 u16 version_major, version_minor, i;
1359 struct dpsw_stp_cfg stp_cfg;
1362 ethsw->dev_id = sw_dev->obj_desc.id;
1364 err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle);
1366 dev_err(dev, "dpsw_open err %d\n", err);
1370 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
1373 dev_err(dev, "dpsw_get_attributes err %d\n", err);
1377 err = dpsw_get_api_version(ethsw->mc_io, 0,
1381 dev_err(dev, "dpsw_get_api_version err %d\n", err);
1385 /* Minimum supported DPSW version check */
1386 if (version_major < DPSW_MIN_VER_MAJOR ||
1387 (version_major == DPSW_MIN_VER_MAJOR &&
1388 version_minor < DPSW_MIN_VER_MINOR)) {
1389 dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n",
1392 DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR);
1397 err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
1399 dev_err(dev, "dpsw_reset err %d\n", err);
1403 err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
1404 DPSW_FDB_LEARNING_MODE_HW);
1406 dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
1410 stp_cfg.vlan_id = DEFAULT_VLAN_ID;
1411 stp_cfg.state = DPSW_STP_STATE_FORWARDING;
1413 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1414 err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
1417 dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
1422 err = dpsw_if_set_broadcast(ethsw->mc_io, 0,
1423 ethsw->dpsw_handle, i, 1);
1426 "dpsw_if_set_broadcast err %d for port %d\n",
1432 ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
1433 WQ_MEM_RECLAIM, "ethsw",
1435 if (!ethsw->workqueue) {
1440 err = ethsw_register_notifier(dev);
1442 goto err_destroy_ordered_workqueue;
1446 err_destroy_ordered_workqueue:
1447 destroy_workqueue(ethsw->workqueue);
1450 dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
1454 static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
1456 struct net_device *netdev = port_priv->netdev;
1457 struct ethsw_core *ethsw = port_priv->ethsw_data;
1458 struct dpsw_vlan_if_cfg vcfg;
1461 /* Switch starts with all ports configured to VLAN 1. Need to
1462 * remove this setting to allow configuration at bridge join
1465 vcfg.if_id[0] = port_priv->idx;
1467 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
1468 DEFAULT_VLAN_ID, &vcfg);
1470 netdev_err(netdev, "dpsw_vlan_remove_if_untagged err %d\n",
1475 err = ethsw_port_set_pvid(port_priv, 0);
1479 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1480 DEFAULT_VLAN_ID, &vcfg);
1482 netdev_err(netdev, "dpsw_vlan_remove_if err %d\n", err);
1487 static void ethsw_unregister_notifier(struct device *dev)
1489 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1490 struct notifier_block *nb;
1493 nb = ðsw->port_switchdevb_nb;
1494 err = unregister_switchdev_blocking_notifier(nb);
1497 "Failed to unregister switchdev blocking notifier (%d)\n",
1500 err = unregister_switchdev_notifier(ðsw->port_switchdev_nb);
1503 "Failed to unregister switchdev notifier (%d)\n", err);
1505 err = unregister_netdevice_notifier(ðsw->port_nb);
1508 "Failed to unregister netdev notifier (%d)\n", err);
1511 static void ethsw_takedown(struct fsl_mc_device *sw_dev)
1513 struct device *dev = &sw_dev->dev;
1514 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1517 ethsw_unregister_notifier(dev);
1519 err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
1521 dev_warn(dev, "dpsw_close err %d\n", err);
1524 static int ethsw_remove(struct fsl_mc_device *sw_dev)
1526 struct ethsw_port_priv *port_priv;
1527 struct ethsw_core *ethsw;
1532 ethsw = dev_get_drvdata(dev);
1534 ethsw_teardown_irqs(sw_dev);
1536 destroy_workqueue(ethsw->workqueue);
1538 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
1540 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1541 port_priv = ethsw->ports[i];
1542 unregister_netdev(port_priv->netdev);
1543 free_netdev(port_priv->netdev);
1545 kfree(ethsw->ports);
1547 ethsw_takedown(sw_dev);
1548 fsl_mc_portal_free(ethsw->mc_io);
1552 dev_set_drvdata(dev, NULL);
1557 static int ethsw_probe_port(struct ethsw_core *ethsw, u16 port_idx)
1559 struct ethsw_port_priv *port_priv;
1560 struct device *dev = ethsw->dev;
1561 struct net_device *port_netdev;
1564 port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
1566 dev_err(dev, "alloc_etherdev error\n");
1570 port_priv = netdev_priv(port_netdev);
1571 port_priv->netdev = port_netdev;
1572 port_priv->ethsw_data = ethsw;
1574 port_priv->idx = port_idx;
1575 port_priv->stp_state = BR_STATE_FORWARDING;
1577 /* Flooding is implicitly enabled */
1578 port_priv->flood = true;
1580 SET_NETDEV_DEV(port_netdev, dev);
1581 port_netdev->netdev_ops = ðsw_port_ops;
1582 port_netdev->ethtool_ops = ðsw_port_ethtool_ops;
1584 /* Set MTU limits */
1585 port_netdev->min_mtu = ETH_MIN_MTU;
1586 port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
1588 err = ethsw_port_init(port_priv, port_idx);
1590 goto err_port_probe;
1592 err = register_netdev(port_netdev);
1594 dev_err(dev, "register_netdev error %d\n", err);
1595 goto err_port_probe;
1598 ethsw->ports[port_idx] = port_priv;
1603 free_netdev(port_netdev);
1608 static int ethsw_probe(struct fsl_mc_device *sw_dev)
1610 struct device *dev = &sw_dev->dev;
1611 struct ethsw_core *ethsw;
1614 /* Allocate switch core*/
1615 ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
1621 dev_set_drvdata(dev, ethsw);
1623 err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
1627 err = -EPROBE_DEFER;
1629 dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
1630 goto err_free_drvdata;
1633 err = ethsw_init(sw_dev);
1635 goto err_free_cmdport;
1637 /* DEFAULT_VLAN_ID is implicitly configured on the switch */
1638 ethsw->vlans[DEFAULT_VLAN_ID] = ETHSW_VLAN_MEMBER;
1640 /* Learning is implicitly enabled */
1641 ethsw->learning = true;
1643 ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
1645 if (!(ethsw->ports)) {
1650 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1651 err = ethsw_probe_port(ethsw, i);
1653 goto err_free_ports;
1656 err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
1658 dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
1659 goto err_free_ports;
1663 err = ethsw_setup_irqs(sw_dev);
1667 dev_info(dev, "probed %d port switch\n", ethsw->sw_attr.num_ifs);
1671 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
1674 /* Cleanup registered ports only */
1675 for (i--; i >= 0; i--) {
1676 unregister_netdev(ethsw->ports[i]->netdev);
1677 free_netdev(ethsw->ports[i]->netdev);
1679 kfree(ethsw->ports);
1682 ethsw_takedown(sw_dev);
1685 fsl_mc_portal_free(ethsw->mc_io);
1689 dev_set_drvdata(dev, NULL);
1694 static const struct fsl_mc_device_id ethsw_match_id_table[] = {
1696 .vendor = FSL_MC_VENDOR_FREESCALE,
1701 MODULE_DEVICE_TABLE(fslmc, ethsw_match_id_table);
1703 static struct fsl_mc_driver eth_sw_drv = {
1705 .name = KBUILD_MODNAME,
1706 .owner = THIS_MODULE,
1708 .probe = ethsw_probe,
1709 .remove = ethsw_remove,
1710 .match_id_table = ethsw_match_id_table
1713 module_fsl_mc_driver(eth_sw_drv);
1715 MODULE_LICENSE("GPL v2");
1716 MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");