1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/delay.h>
9 #include <linux/module.h>
10 #include <linux/printk.h>
11 #include <linux/spi/spi.h>
12 #include <linux/errno.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/phylink.h>
16 #include <linux/of_net.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_device.h>
19 #include <linux/netdev_features.h>
20 #include <linux/netdevice.h>
21 #include <linux/if_bridge.h>
22 #include <linux/if_ether.h>
23 #include <linux/dsa/8021q.h>
25 #include "sja1105_sgmii.h"
26 #include "sja1105_tas.h"
28 #define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
30 static const struct dsa_switch_ops sja1105_switch_ops;
32 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
33 unsigned int startup_delay)
35 gpiod_set_value_cansleep(gpio, 1);
36 /* Wait for minimum reset pulse length */
38 gpiod_set_value_cansleep(gpio, 0);
39 /* Wait until chip is ready after reset */
40 msleep(startup_delay);
44 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
45 int from, int to, bool allow)
48 l2_fwd[from].reach_port |= BIT(to);
50 l2_fwd[from].reach_port &= ~BIT(to);
53 /* Structure used to temporarily transport device tree
54 * settings into sja1105_setup
56 struct sja1105_dt_port {
57 phy_interface_t phy_mode;
58 sja1105_mii_role_t role;
61 static int sja1105_init_mac_settings(struct sja1105_private *priv)
63 struct sja1105_mac_config_entry default_mac = {
64 /* Enable all 8 priority queues on egress.
65 * Every queue i holds top[i] - base[i] frames.
66 * Sum of top[i] - base[i] is 511 (max hardware limit).
68 .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
69 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
70 .enabled = {true, true, true, true, true, true, true, true},
71 /* Keep standard IFG of 12 bytes on egress. */
73 /* Always put the MAC speed in automatic mode, where it can be
74 * adjusted at runtime by PHYLINK.
76 .speed = SJA1105_SPEED_AUTO,
77 /* No static correction for 1-step 1588 events */
80 /* Disable aging for critical TTEthernet traffic */
82 /* Internal VLAN (pvid) to apply to untagged ingress */
87 /* Don't drop traffic with other EtherType than ETH_P_IP */
89 /* Don't drop double-tagged traffic */
91 /* Don't drop untagged traffic */
93 /* Don't retag 802.1p (VID 0) traffic with the pvid */
95 /* Disable learning and I/O on user ports by default -
102 struct sja1105_mac_config_entry *mac;
103 struct sja1105_table *table;
106 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
108 /* Discard previous MAC Configuration Table */
109 if (table->entry_count) {
110 kfree(table->entries);
111 table->entry_count = 0;
114 table->entries = kcalloc(SJA1105_NUM_PORTS,
115 table->ops->unpacked_entry_size, GFP_KERNEL);
119 table->entry_count = SJA1105_NUM_PORTS;
121 mac = table->entries;
123 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
124 mac[i] = default_mac;
125 if (i == dsa_upstream_port(priv->ds, i)) {
126 /* STP doesn't get called for CPU port, so we need to
127 * set the I/O parameters statically.
129 mac[i].dyn_learn = true;
130 mac[i].ingress = true;
131 mac[i].egress = true;
138 static bool sja1105_supports_sgmii(struct sja1105_private *priv, int port)
140 if (priv->info->part_no != SJA1105R_PART_NO &&
141 priv->info->part_no != SJA1105S_PART_NO)
144 if (port != SJA1105_SGMII_PORT)
147 if (dsa_is_unused_port(priv->ds, port))
153 static int sja1105_init_mii_settings(struct sja1105_private *priv,
154 struct sja1105_dt_port *ports)
156 struct device *dev = &priv->spidev->dev;
157 struct sja1105_xmii_params_entry *mii;
158 struct sja1105_table *table;
161 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
163 /* Discard previous xMII Mode Parameters Table */
164 if (table->entry_count) {
165 kfree(table->entries);
166 table->entry_count = 0;
169 table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
170 table->ops->unpacked_entry_size, GFP_KERNEL);
174 /* Override table based on PHYLINK DT bindings */
175 table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
177 mii = table->entries;
179 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
180 if (dsa_is_unused_port(priv->ds, i))
183 switch (ports[i].phy_mode) {
184 case PHY_INTERFACE_MODE_MII:
185 mii->xmii_mode[i] = XMII_MODE_MII;
187 case PHY_INTERFACE_MODE_RMII:
188 mii->xmii_mode[i] = XMII_MODE_RMII;
190 case PHY_INTERFACE_MODE_RGMII:
191 case PHY_INTERFACE_MODE_RGMII_ID:
192 case PHY_INTERFACE_MODE_RGMII_RXID:
193 case PHY_INTERFACE_MODE_RGMII_TXID:
194 mii->xmii_mode[i] = XMII_MODE_RGMII;
196 case PHY_INTERFACE_MODE_SGMII:
197 if (!sja1105_supports_sgmii(priv, i))
199 mii->xmii_mode[i] = XMII_MODE_SGMII;
202 dev_err(dev, "Unsupported PHY mode %s!\n",
203 phy_modes(ports[i].phy_mode));
206 /* Even though the SerDes port is able to drive SGMII autoneg
207 * like a PHY would, from the perspective of the XMII tables,
208 * the SGMII port should always be put in MAC mode.
210 if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII)
211 mii->phy_mac[i] = XMII_MAC;
213 mii->phy_mac[i] = ports[i].role;
218 static int sja1105_init_static_fdb(struct sja1105_private *priv)
220 struct sja1105_l2_lookup_entry *l2_lookup;
221 struct sja1105_table *table;
224 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
226 /* We only populate the FDB table through dynamic L2 Address Lookup
227 * entries, except for a special entry at the end which is a catch-all
228 * for unknown multicast and will be used to control flooding domain.
230 if (table->entry_count) {
231 kfree(table->entries);
232 table->entry_count = 0;
235 if (!priv->info->can_limit_mcast_flood)
238 table->entries = kcalloc(1, table->ops->unpacked_entry_size,
243 table->entry_count = 1;
244 l2_lookup = table->entries;
246 /* All L2 multicast addresses have an odd first octet */
247 l2_lookup[0].macaddr = SJA1105_UNKNOWN_MULTICAST;
248 l2_lookup[0].mask_macaddr = SJA1105_UNKNOWN_MULTICAST;
249 l2_lookup[0].lockeds = true;
250 l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1;
252 /* Flood multicast to every port by default */
253 for (port = 0; port < priv->ds->num_ports; port++)
254 if (!dsa_is_unused_port(priv->ds, port))
255 l2_lookup[0].destports |= BIT(port);
260 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
262 struct sja1105_table *table;
263 u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS;
264 struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
265 /* Learned FDB entries are forgotten after 300 seconds */
266 .maxage = SJA1105_AGEING_TIME_MS(300000),
267 /* All entries within a FDB bin are available for learning */
268 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
269 /* And the P/Q/R/S equivalent setting: */
271 .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries,
272 max_fdb_entries, max_fdb_entries, },
273 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
275 /* This selects between Independent VLAN Learning (IVL) and
276 * Shared VLAN Learning (SVL)
278 .shared_learn = true,
279 /* Don't discard management traffic based on ENFPORT -
280 * we don't perform SMAC port enforcement anyway, so
281 * what we are setting here doesn't matter.
283 .no_enf_hostprt = false,
284 /* Don't learn SMAC for mac_fltres1 and mac_fltres0.
285 * Maybe correlate with no_linklocal_learn from bridge driver?
287 .no_mgmt_learn = true,
290 /* Dynamically learned FDB entries can overwrite other (older)
291 * dynamic FDB entries
297 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
299 if (table->entry_count) {
300 kfree(table->entries);
301 table->entry_count = 0;
304 table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
305 table->ops->unpacked_entry_size, GFP_KERNEL);
309 table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
311 /* This table only has a single entry */
312 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
313 default_l2_lookup_params;
318 static int sja1105_init_static_vlan(struct sja1105_private *priv)
320 struct sja1105_table *table;
321 struct sja1105_vlan_lookup_entry pvid = {
329 struct dsa_switch *ds = priv->ds;
332 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
334 /* The static VLAN table will only contain the initial pvid of 1.
335 * All other VLANs are to be configured through dynamic entries,
336 * and kept in the static configuration table as backing memory.
338 if (table->entry_count) {
339 kfree(table->entries);
340 table->entry_count = 0;
343 table->entries = kzalloc(table->ops->unpacked_entry_size,
348 table->entry_count = 1;
350 /* VLAN 1: all DT-defined ports are members; no restrictions on
351 * forwarding; always transmit as untagged.
353 for (port = 0; port < ds->num_ports; port++) {
354 struct sja1105_bridge_vlan *v;
356 if (dsa_is_unused_port(ds, port))
359 pvid.vmemb_port |= BIT(port);
360 pvid.vlan_bc |= BIT(port);
361 pvid.tag_port &= ~BIT(port);
363 /* Let traffic that don't need dsa_8021q (e.g. STP, PTP) be
364 * transmitted as untagged.
366 v = kzalloc(sizeof(*v), GFP_KERNEL);
373 if (dsa_is_cpu_port(ds, port))
375 list_add(&v->list, &priv->dsa_8021q_vlans);
378 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
382 static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
384 struct sja1105_l2_forwarding_entry *l2fwd;
385 struct sja1105_table *table;
388 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
390 if (table->entry_count) {
391 kfree(table->entries);
392 table->entry_count = 0;
395 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
396 table->ops->unpacked_entry_size, GFP_KERNEL);
400 table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
402 l2fwd = table->entries;
404 /* First 5 entries define the forwarding rules */
405 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
406 unsigned int upstream = dsa_upstream_port(priv->ds, i);
408 for (j = 0; j < SJA1105_NUM_TC; j++)
409 l2fwd[i].vlan_pmap[j] = j;
414 sja1105_port_allow_traffic(l2fwd, i, upstream, true);
415 sja1105_port_allow_traffic(l2fwd, upstream, i, true);
417 l2fwd[i].bc_domain = BIT(upstream);
418 l2fwd[i].fl_domain = BIT(upstream);
420 l2fwd[upstream].bc_domain |= BIT(i);
421 l2fwd[upstream].fl_domain |= BIT(i);
423 /* Next 8 entries define VLAN PCP mapping from ingress to egress.
424 * Create a one-to-one mapping.
426 for (i = 0; i < SJA1105_NUM_TC; i++)
427 for (j = 0; j < SJA1105_NUM_PORTS; j++)
428 l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
433 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
435 struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
436 /* Disallow dynamic reconfiguration of vlan_pmap */
438 /* Use a single memory partition for all ingress queues */
439 .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
441 struct sja1105_table *table;
443 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
445 if (table->entry_count) {
446 kfree(table->entries);
447 table->entry_count = 0;
450 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
451 table->ops->unpacked_entry_size, GFP_KERNEL);
455 table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
457 /* This table only has a single entry */
458 ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
459 default_l2fwd_params;
464 void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
466 struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
467 struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
468 struct sja1105_table *table;
471 /* VLAN retagging is implemented using a loopback port that consumes
472 * frame buffers. That leaves less for us.
474 if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT)
475 max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING;
477 max_mem = SJA1105_MAX_FRAME_MEMORY;
479 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
480 l2_fwd_params = table->entries;
481 l2_fwd_params->part_spc[0] = max_mem;
483 /* If we have any critical-traffic virtual links, we need to reserve
484 * some frame buffer memory for them. At the moment, hardcode the value
485 * at 100 blocks of 128 bytes of memory each. This leaves 829 blocks
486 * remaining for best-effort traffic. TODO: figure out a more flexible
487 * way to perform the frame buffer partitioning.
489 if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count)
492 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
493 vl_fwd_params = table->entries;
495 l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY;
496 vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
499 static int sja1105_init_general_params(struct sja1105_private *priv)
501 struct sja1105_general_params_entry default_general_params = {
502 /* Allow dynamic changing of the mirror port */
504 .switchid = priv->ds->index,
505 /* Priority queue for link-local management frames
506 * (both ingress to and egress from CPU - PTP, STP etc)
509 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
510 .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
511 .incl_srcpt1 = false,
513 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
514 .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
515 .incl_srcpt0 = false,
517 /* The destination for traffic matching mac_fltres1 and
518 * mac_fltres0 on all ports except host_port. Such traffic
519 * receieved on host_port itself would be dropped, except
520 * by installing a temporary 'management route'
522 .host_port = dsa_upstream_port(priv->ds, 0),
523 /* Default to an invalid value */
524 .mirr_port = SJA1105_NUM_PORTS,
525 /* Link-local traffic received on casc_port will be forwarded
526 * to host_port without embedding the source port and device ID
527 * info in the destination MAC address (presumably because it
528 * is a cascaded port and a downstream SJA switch already did
529 * that). Default to an invalid port (to disable the feature)
530 * and overwrite this if we find any DSA (cascaded) ports.
532 .casc_port = SJA1105_NUM_PORTS,
534 .vllupformat = SJA1105_VL_FORMAT_PSFP,
537 /* Only update correctionField for 1-step PTP (L2 transport) */
539 /* Forcefully disable VLAN filtering by telling
540 * the switch that VLAN has a different EtherType.
542 .tpid = ETH_P_SJA1105,
543 .tpid2 = ETH_P_SJA1105,
545 struct sja1105_table *table;
547 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
549 if (table->entry_count) {
550 kfree(table->entries);
551 table->entry_count = 0;
554 table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
555 table->ops->unpacked_entry_size, GFP_KERNEL);
559 table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
561 /* This table only has a single entry */
562 ((struct sja1105_general_params_entry *)table->entries)[0] =
563 default_general_params;
568 static int sja1105_init_avb_params(struct sja1105_private *priv)
570 struct sja1105_avb_params_entry *avb;
571 struct sja1105_table *table;
573 table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
575 /* Discard previous AVB Parameters Table */
576 if (table->entry_count) {
577 kfree(table->entries);
578 table->entry_count = 0;
581 table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
582 table->ops->unpacked_entry_size, GFP_KERNEL);
586 table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
588 avb = table->entries;
590 /* Configure the MAC addresses for meta frames */
591 avb->destmeta = SJA1105_META_DMAC;
592 avb->srcmeta = SJA1105_META_SMAC;
593 /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by
594 * default. This is because there might be boards with a hardware
595 * layout where enabling the pin as output might cause an electrical
596 * clash. On E/T the pin is always an output, which the board designers
597 * probably already knew, so even if there are going to be electrical
598 * issues, there's nothing we can do.
600 avb->cas_master = false;
605 /* The L2 policing table is 2-stage. The table is looked up for each frame
606 * according to the ingress port, whether it was broadcast or not, and the
607 * classified traffic class (given by VLAN PCP). This portion of the lookup is
608 * fixed, and gives access to the SHARINDX, an indirection register pointing
609 * within the policing table itself, which is used to resolve the policer that
610 * will be used for this frame.
613 * +------------+--------+ +---------------------------------+
614 * |Port 0 TC 0 |SHARINDX| | Policer 0: Rate, Burst, MTU |
615 * +------------+--------+ +---------------------------------+
616 * |Port 0 TC 1 |SHARINDX| | Policer 1: Rate, Burst, MTU |
617 * +------------+--------+ +---------------------------------+
618 * ... | Policer 2: Rate, Burst, MTU |
619 * +------------+--------+ +---------------------------------+
620 * |Port 0 TC 7 |SHARINDX| | Policer 3: Rate, Burst, MTU |
621 * +------------+--------+ +---------------------------------+
622 * |Port 1 TC 0 |SHARINDX| | Policer 4: Rate, Burst, MTU |
623 * +------------+--------+ +---------------------------------+
624 * ... | Policer 5: Rate, Burst, MTU |
625 * +------------+--------+ +---------------------------------+
626 * |Port 1 TC 7 |SHARINDX| | Policer 6: Rate, Burst, MTU |
627 * +------------+--------+ +---------------------------------+
628 * ... | Policer 7: Rate, Burst, MTU |
629 * +------------+--------+ +---------------------------------+
630 * |Port 4 TC 7 |SHARINDX| ...
631 * +------------+--------+
632 * |Port 0 BCAST|SHARINDX| ...
633 * +------------+--------+
634 * |Port 1 BCAST|SHARINDX| ...
635 * +------------+--------+
637 * +------------+--------+ +---------------------------------+
638 * |Port 4 BCAST|SHARINDX| | Policer 44: Rate, Burst, MTU |
639 * +------------+--------+ +---------------------------------+
641 * In this driver, we shall use policers 0-4 as statically alocated port
642 * (matchall) policers. So we need to make the SHARINDX for all lookups
643 * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast
645 * The remaining policers (40) shall be dynamically allocated for flower
646 * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff.
648 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
650 static int sja1105_init_l2_policing(struct sja1105_private *priv)
652 struct sja1105_l2_policing_entry *policing;
653 struct sja1105_table *table;
656 table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
658 /* Discard previous L2 Policing Table */
659 if (table->entry_count) {
660 kfree(table->entries);
661 table->entry_count = 0;
664 table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
665 table->ops->unpacked_entry_size, GFP_KERNEL);
669 table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
671 policing = table->entries;
673 /* Setup shared indices for the matchall policers */
674 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
675 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
677 for (tc = 0; tc < SJA1105_NUM_TC; tc++)
678 policing[port * SJA1105_NUM_TC + tc].sharindx = port;
680 policing[bcast].sharindx = port;
683 /* Setup the matchall policer parameters */
684 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
685 int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
687 if (dsa_is_cpu_port(priv->ds, port))
690 policing[port].smax = 65535; /* Burst size in bytes */
691 policing[port].rate = SJA1105_RATE_MBPS(1000);
692 policing[port].maxlen = mtu;
693 policing[port].partition = 0;
699 static int sja1105_static_config_load(struct sja1105_private *priv,
700 struct sja1105_dt_port *ports)
704 sja1105_static_config_free(&priv->static_config);
705 rc = sja1105_static_config_init(&priv->static_config,
706 priv->info->static_ops,
707 priv->info->device_id);
711 /* Build static configuration */
712 rc = sja1105_init_mac_settings(priv);
715 rc = sja1105_init_mii_settings(priv, ports);
718 rc = sja1105_init_static_fdb(priv);
721 rc = sja1105_init_static_vlan(priv);
724 rc = sja1105_init_l2_lookup_params(priv);
727 rc = sja1105_init_l2_forwarding(priv);
730 rc = sja1105_init_l2_forwarding_params(priv);
733 rc = sja1105_init_l2_policing(priv);
736 rc = sja1105_init_general_params(priv);
739 rc = sja1105_init_avb_params(priv);
743 /* Send initial configuration to hardware via SPI */
744 return sja1105_static_config_upload(priv);
747 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
748 const struct sja1105_dt_port *ports)
752 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
753 if (ports[i].role == XMII_MAC)
756 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
757 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
758 priv->rgmii_rx_delay[i] = true;
760 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
761 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
762 priv->rgmii_tx_delay[i] = true;
764 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
765 !priv->info->setup_rgmii_delay)
771 static int sja1105_parse_ports_node(struct sja1105_private *priv,
772 struct sja1105_dt_port *ports,
773 struct device_node *ports_node)
775 struct device *dev = &priv->spidev->dev;
776 struct device_node *child;
778 for_each_available_child_of_node(ports_node, child) {
779 struct device_node *phy_node;
780 phy_interface_t phy_mode;
784 /* Get switch port number from DT */
785 if (of_property_read_u32(child, "reg", &index) < 0) {
786 dev_err(dev, "Port number not defined in device tree "
787 "(property \"reg\")\n");
792 /* Get PHY mode from DT */
793 err = of_get_phy_mode(child, &phy_mode);
795 dev_err(dev, "Failed to read phy-mode or "
796 "phy-interface-type property for port %d\n",
801 ports[index].phy_mode = phy_mode;
803 phy_node = of_parse_phandle(child, "phy-handle", 0);
805 if (!of_phy_is_fixed_link(child)) {
806 dev_err(dev, "phy-handle or fixed-link "
807 "properties missing!\n");
811 /* phy-handle is missing, but fixed-link isn't.
812 * So it's a fixed link. Default to PHY role.
814 ports[index].role = XMII_PHY;
816 /* phy-handle present => put port in MAC role */
817 ports[index].role = XMII_MAC;
818 of_node_put(phy_node);
821 /* The MAC/PHY role can be overridden with explicit bindings */
822 if (of_property_read_bool(child, "sja1105,role-mac"))
823 ports[index].role = XMII_MAC;
824 else if (of_property_read_bool(child, "sja1105,role-phy"))
825 ports[index].role = XMII_PHY;
831 static int sja1105_parse_dt(struct sja1105_private *priv,
832 struct sja1105_dt_port *ports)
834 struct device *dev = &priv->spidev->dev;
835 struct device_node *switch_node = dev->of_node;
836 struct device_node *ports_node;
839 ports_node = of_get_child_by_name(switch_node, "ports");
841 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
845 rc = sja1105_parse_ports_node(priv, ports, ports_node);
846 of_node_put(ports_node);
851 static int sja1105_sgmii_read(struct sja1105_private *priv, int pcs_reg)
853 const struct sja1105_regs *regs = priv->info->regs;
857 rc = sja1105_xfer_u32(priv, SPI_READ, regs->sgmii + pcs_reg, &val,
865 static int sja1105_sgmii_write(struct sja1105_private *priv, int pcs_reg,
868 const struct sja1105_regs *regs = priv->info->regs;
872 rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->sgmii + pcs_reg, &val,
880 static void sja1105_sgmii_pcs_config(struct sja1105_private *priv,
881 bool an_enabled, bool an_master)
883 u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII;
885 /* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to
886 * stop the clock during LPI mode, make the MAC reconfigure
887 * autonomously after PCS autoneg is done, flush the internal FIFOs.
889 sja1105_sgmii_write(priv, SJA1105_DC1, SJA1105_DC1_EN_VSMMD1 |
890 SJA1105_DC1_CLOCK_STOP_EN |
891 SJA1105_DC1_MAC_AUTO_SW |
893 /* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */
894 sja1105_sgmii_write(priv, SJA1105_DC2, SJA1105_DC2_TX_POL_INV_DISABLE);
895 /* AUTONEG_CONTROL: Use SGMII autoneg */
897 ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK;
898 sja1105_sgmii_write(priv, SJA1105_AC, ac);
899 /* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise,
900 * sja1105_sgmii_pcs_force_speed must be called later for the link
901 * to become operational.
904 sja1105_sgmii_write(priv, MII_BMCR,
905 BMCR_ANENABLE | BMCR_ANRESTART);
908 static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv,
915 pcs_speed = BMCR_SPEED1000;
918 pcs_speed = BMCR_SPEED100;
921 pcs_speed = BMCR_SPEED10;
924 dev_err(priv->ds->dev, "Invalid speed %d\n", speed);
927 sja1105_sgmii_write(priv, MII_BMCR, pcs_speed | BMCR_FULLDPLX);
930 /* Convert link speed from SJA1105 to ethtool encoding */
931 static int sja1105_speed[] = {
932 [SJA1105_SPEED_AUTO] = SPEED_UNKNOWN,
933 [SJA1105_SPEED_10MBPS] = SPEED_10,
934 [SJA1105_SPEED_100MBPS] = SPEED_100,
935 [SJA1105_SPEED_1000MBPS] = SPEED_1000,
938 /* Set link speed in the MAC configuration for a specific port. */
939 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
942 struct sja1105_xmii_params_entry *mii;
943 struct sja1105_mac_config_entry *mac;
944 struct device *dev = priv->ds->dev;
945 sja1105_phy_interface_t phy_mode;
946 sja1105_speed_t speed;
949 /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
950 * tables. On E/T, MAC reconfig tables are not readable, only writable.
951 * We have to *know* what the MAC looks like. For the sake of keeping
952 * the code common, we'll use the static configuration tables as a
953 * reasonable approximation for both E/T and P/Q/R/S.
955 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
956 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
958 switch (speed_mbps) {
960 /* PHYLINK called sja1105_mac_config() to inform us about
961 * the state->interface, but AN has not completed and the
962 * speed is not yet valid. UM10944.pdf says that setting
963 * SJA1105_SPEED_AUTO at runtime disables the port, so that is
964 * ok for power consumption in case AN will never complete -
965 * otherwise PHYLINK should come back with a new update.
967 speed = SJA1105_SPEED_AUTO;
970 speed = SJA1105_SPEED_10MBPS;
973 speed = SJA1105_SPEED_100MBPS;
976 speed = SJA1105_SPEED_1000MBPS;
979 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
983 /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration
984 * table, since this will be used for the clocking setup, and we no
985 * longer need to store it in the static config (already told hardware
986 * we want auto during upload phase).
987 * Actually for the SGMII port, the MAC is fixed at 1 Gbps and
988 * we need to configure the PCS only (if even that).
990 if (sja1105_supports_sgmii(priv, port))
991 mac[port].speed = SJA1105_SPEED_1000MBPS;
993 mac[port].speed = speed;
995 /* Write to the dynamic reconfiguration tables */
996 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
999 dev_err(dev, "Failed to write MAC config: %d\n", rc);
1003 /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
1004 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
1005 * RMII no change of the clock setup is required. Actually, changing
1006 * the clock setup does interrupt the clock signal for a certain time
1007 * which causes trouble for all PHYs relying on this signal.
1009 phy_mode = mii->xmii_mode[port];
1010 if (phy_mode != XMII_MODE_RGMII)
1013 return sja1105_clocking_setup_port(priv, port);
1016 /* The SJA1105 MAC programming model is through the static config (the xMII
1017 * Mode table cannot be dynamically reconfigured), and we have to program
1018 * that early (earlier than PHYLINK calls us, anyway).
1019 * So just error out in case the connected PHY attempts to change the initial
1020 * system interface MII protocol from what is defined in the DT, at least for
1023 static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
1024 phy_interface_t interface)
1026 struct sja1105_xmii_params_entry *mii;
1027 sja1105_phy_interface_t phy_mode;
1029 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
1030 phy_mode = mii->xmii_mode[port];
1032 switch (interface) {
1033 case PHY_INTERFACE_MODE_MII:
1034 return (phy_mode != XMII_MODE_MII);
1035 case PHY_INTERFACE_MODE_RMII:
1036 return (phy_mode != XMII_MODE_RMII);
1037 case PHY_INTERFACE_MODE_RGMII:
1038 case PHY_INTERFACE_MODE_RGMII_ID:
1039 case PHY_INTERFACE_MODE_RGMII_RXID:
1040 case PHY_INTERFACE_MODE_RGMII_TXID:
1041 return (phy_mode != XMII_MODE_RGMII);
1042 case PHY_INTERFACE_MODE_SGMII:
1043 return (phy_mode != XMII_MODE_SGMII);
1049 static void sja1105_mac_config(struct dsa_switch *ds, int port,
1051 const struct phylink_link_state *state)
1053 struct sja1105_private *priv = ds->priv;
1054 bool is_sgmii = sja1105_supports_sgmii(priv, port);
1056 if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
1057 dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
1058 phy_modes(state->interface));
1062 if (phylink_autoneg_inband(mode) && !is_sgmii) {
1063 dev_err(ds->dev, "In-band AN not supported!\n");
1068 sja1105_sgmii_pcs_config(priv, phylink_autoneg_inband(mode),
1072 static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
1074 phy_interface_t interface)
1076 sja1105_inhibit_tx(ds->priv, BIT(port), true);
1079 static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
1081 phy_interface_t interface,
1082 struct phy_device *phydev,
1083 int speed, int duplex,
1084 bool tx_pause, bool rx_pause)
1086 struct sja1105_private *priv = ds->priv;
1088 sja1105_adjust_port_config(priv, port, speed);
1090 if (sja1105_supports_sgmii(priv, port) && !phylink_autoneg_inband(mode))
1091 sja1105_sgmii_pcs_force_speed(priv, speed);
1093 sja1105_inhibit_tx(priv, BIT(port), false);
1096 static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
1097 unsigned long *supported,
1098 struct phylink_link_state *state)
1100 /* Construct a new mask which exhaustively contains all link features
1101 * supported by the MAC, and then apply that (logical AND) to what will
1102 * be sent to the PHY for "marketing".
1104 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1105 struct sja1105_private *priv = ds->priv;
1106 struct sja1105_xmii_params_entry *mii;
1108 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
1110 /* include/linux/phylink.h says:
1111 * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
1112 * expects the MAC driver to return all supported link modes.
1114 if (state->interface != PHY_INTERFACE_MODE_NA &&
1115 sja1105_phy_mode_mismatch(priv, port, state->interface)) {
1116 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1120 /* The MAC does not support pause frames, and also doesn't
1121 * support half-duplex traffic modes.
1123 phylink_set(mask, Autoneg);
1124 phylink_set(mask, MII);
1125 phylink_set(mask, 10baseT_Full);
1126 phylink_set(mask, 100baseT_Full);
1127 phylink_set(mask, 100baseT1_Full);
1128 if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
1129 mii->xmii_mode[port] == XMII_MODE_SGMII)
1130 phylink_set(mask, 1000baseT_Full);
1132 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
1133 bitmap_and(state->advertising, state->advertising, mask,
1134 __ETHTOOL_LINK_MODE_MASK_NBITS);
1137 static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port,
1138 struct phylink_link_state *state)
1140 struct sja1105_private *priv = ds->priv;
1143 /* Read the vendor-specific AUTONEG_INTR_STATUS register */
1144 ais = sja1105_sgmii_read(priv, SJA1105_AIS);
1148 switch (SJA1105_AIS_SPEED(ais)) {
1150 state->speed = SPEED_10;
1153 state->speed = SPEED_100;
1156 state->speed = SPEED_1000;
1159 dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n",
1160 SJA1105_AIS_SPEED(ais));
1162 state->duplex = SJA1105_AIS_DUPLEX_MODE(ais);
1163 state->an_complete = SJA1105_AIS_COMPLETE(ais);
1164 state->link = SJA1105_AIS_LINK_STATUS(ais);
1170 sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
1171 const struct sja1105_l2_lookup_entry *requested)
1173 struct sja1105_l2_lookup_entry *l2_lookup;
1174 struct sja1105_table *table;
1177 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
1178 l2_lookup = table->entries;
1180 for (i = 0; i < table->entry_count; i++)
1181 if (l2_lookup[i].macaddr == requested->macaddr &&
1182 l2_lookup[i].vlanid == requested->vlanid &&
1183 l2_lookup[i].destports & BIT(port))
1189 /* We want FDB entries added statically through the bridge command to persist
1190 * across switch resets, which are a common thing during normal SJA1105
1191 * operation. So we have to back them up in the static configuration tables
1192 * and hence apply them on next static config upload... yay!
1195 sja1105_static_fdb_change(struct sja1105_private *priv, int port,
1196 const struct sja1105_l2_lookup_entry *requested,
1199 struct sja1105_l2_lookup_entry *l2_lookup;
1200 struct sja1105_table *table;
1203 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
1205 match = sja1105_find_static_fdb_entry(priv, port, requested);
1207 /* Can't delete a missing entry. */
1211 /* No match => new entry */
1212 rc = sja1105_table_resize(table, table->entry_count + 1);
1216 match = table->entry_count - 1;
1219 /* Assign pointer after the resize (it may be new memory) */
1220 l2_lookup = table->entries;
1223 * If the job was to add this FDB entry, it's already done (mostly
1224 * anyway, since the port forwarding mask may have changed, case in
1225 * which we update it).
1226 * Otherwise we have to delete it.
1229 l2_lookup[match] = *requested;
1233 /* To remove, the strategy is to overwrite the element with
1234 * the last one, and then reduce the array size by 1
1236 l2_lookup[match] = l2_lookup[table->entry_count - 1];
1237 return sja1105_table_resize(table, table->entry_count - 1);
1240 /* First-generation switches have a 4-way set associative TCAM that
1241 * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
1242 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
1243 * For the placement of a newly learnt FDB entry, the switch selects the bin
1244 * based on a hash function, and the way within that bin incrementally.
1246 static int sja1105et_fdb_index(int bin, int way)
1248 return bin * SJA1105ET_FDB_BIN_SIZE + way;
1251 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
1252 const u8 *addr, u16 vid,
1253 struct sja1105_l2_lookup_entry *match,
1258 for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
1259 struct sja1105_l2_lookup_entry l2_lookup = {0};
1260 int index = sja1105et_fdb_index(bin, way);
1262 /* Skip unused entries, optionally marking them
1263 * into the return value
1265 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1266 index, &l2_lookup)) {
1272 if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
1273 l2_lookup.vlanid == vid) {
1279 /* Return an invalid entry index if not found */
1283 int sja1105et_fdb_add(struct dsa_switch *ds, int port,
1284 const unsigned char *addr, u16 vid)
1286 struct sja1105_l2_lookup_entry l2_lookup = {0};
1287 struct sja1105_private *priv = ds->priv;
1288 struct device *dev = ds->dev;
1289 int last_unused = -1;
1292 bin = sja1105et_fdb_hash(priv, addr, vid);
1294 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1295 &l2_lookup, &last_unused);
1297 /* We have an FDB entry. Is our port in the destination
1298 * mask? If yes, we need to do nothing. If not, we need
1299 * to rewrite the entry by adding this port to it.
1301 if (l2_lookup.destports & BIT(port))
1303 l2_lookup.destports |= BIT(port);
1305 int index = sja1105et_fdb_index(bin, way);
1307 /* We don't have an FDB entry. We construct a new one and
1308 * try to find a place for it within the FDB table.
1310 l2_lookup.macaddr = ether_addr_to_u64(addr);
1311 l2_lookup.destports = BIT(port);
1312 l2_lookup.vlanid = vid;
1314 if (last_unused >= 0) {
1317 /* Bin is full, need to evict somebody.
1318 * Choose victim at random. If you get these messages
1319 * often, you may need to consider changing the
1320 * distribution function:
1321 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
1323 get_random_bytes(&way, sizeof(u8));
1324 way %= SJA1105ET_FDB_BIN_SIZE;
1325 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
1328 sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1329 index, NULL, false);
1332 l2_lookup.index = sja1105et_fdb_index(bin, way);
1334 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1335 l2_lookup.index, &l2_lookup,
1340 return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
1343 int sja1105et_fdb_del(struct dsa_switch *ds, int port,
1344 const unsigned char *addr, u16 vid)
1346 struct sja1105_l2_lookup_entry l2_lookup = {0};
1347 struct sja1105_private *priv = ds->priv;
1348 int index, bin, way, rc;
1351 bin = sja1105et_fdb_hash(priv, addr, vid);
1352 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1356 index = sja1105et_fdb_index(bin, way);
1358 /* We have an FDB entry. Is our port in the destination mask? If yes,
1359 * we need to remove it. If the resulting port mask becomes empty, we
1360 * need to completely evict the FDB entry.
1361 * Otherwise we just write it back.
1363 l2_lookup.destports &= ~BIT(port);
1365 if (l2_lookup.destports)
1370 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1371 index, &l2_lookup, keep);
1375 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
1378 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
1379 const unsigned char *addr, u16 vid)
1381 struct sja1105_l2_lookup_entry l2_lookup = {0};
1382 struct sja1105_private *priv = ds->priv;
1385 /* Search for an existing entry in the FDB table */
1386 l2_lookup.macaddr = ether_addr_to_u64(addr);
1387 l2_lookup.vlanid = vid;
1388 l2_lookup.iotag = SJA1105_S_TAG;
1389 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1390 if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
1391 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1392 l2_lookup.mask_iotag = BIT(0);
1394 l2_lookup.mask_vlanid = 0;
1395 l2_lookup.mask_iotag = 0;
1397 l2_lookup.destports = BIT(port);
1399 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1400 SJA1105_SEARCH, &l2_lookup);
1402 /* Found and this port is already in the entry's
1403 * port mask => job done
1405 if (l2_lookup.destports & BIT(port))
1407 /* l2_lookup.index is populated by the switch in case it
1410 l2_lookup.destports |= BIT(port);
1411 goto skip_finding_an_index;
1414 /* Not found, so try to find an unused spot in the FDB.
1415 * This is slightly inefficient because the strategy is knock-knock at
1416 * every possible position from 0 to 1023.
1418 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1419 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1424 if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
1425 dev_err(ds->dev, "FDB is full, cannot add entry.\n");
1428 l2_lookup.lockeds = true;
1429 l2_lookup.index = i;
1431 skip_finding_an_index:
1432 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1433 l2_lookup.index, &l2_lookup,
1438 return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
1441 int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
1442 const unsigned char *addr, u16 vid)
1444 struct sja1105_l2_lookup_entry l2_lookup = {0};
1445 struct sja1105_private *priv = ds->priv;
1449 l2_lookup.macaddr = ether_addr_to_u64(addr);
1450 l2_lookup.vlanid = vid;
1451 l2_lookup.iotag = SJA1105_S_TAG;
1452 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1453 if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
1454 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1455 l2_lookup.mask_iotag = BIT(0);
1457 l2_lookup.mask_vlanid = 0;
1458 l2_lookup.mask_iotag = 0;
1460 l2_lookup.destports = BIT(port);
1462 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1463 SJA1105_SEARCH, &l2_lookup);
1467 l2_lookup.destports &= ~BIT(port);
1469 /* Decide whether we remove just this port from the FDB entry,
1470 * or if we remove it completely.
1472 if (l2_lookup.destports)
1477 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1478 l2_lookup.index, &l2_lookup, keep);
1482 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
1485 static int sja1105_fdb_add(struct dsa_switch *ds, int port,
1486 const unsigned char *addr, u16 vid)
1488 struct sja1105_private *priv = ds->priv;
1490 /* dsa_8021q is in effect when the bridge's vlan_filtering isn't,
1491 * so the switch still does some VLAN processing internally.
1492 * But Shared VLAN Learning (SVL) is also active, and it will take
1493 * care of autonomous forwarding between the unique pvid's of each
1494 * port. Here we just make sure that users can't add duplicate FDB
1495 * entries when in this mode - the actual VID doesn't matter except
1496 * for what gets printed in 'bridge fdb show'. In the case of zero,
1497 * no VID gets printed at all.
1499 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
1502 return priv->info->fdb_add_cmd(ds, port, addr, vid);
1505 static int sja1105_fdb_del(struct dsa_switch *ds, int port,
1506 const unsigned char *addr, u16 vid)
1508 struct sja1105_private *priv = ds->priv;
1510 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
1513 return priv->info->fdb_del_cmd(ds, port, addr, vid);
1516 static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1517 dsa_fdb_dump_cb_t *cb, void *data)
1519 struct sja1105_private *priv = ds->priv;
1520 struct device *dev = ds->dev;
1523 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1524 struct sja1105_l2_lookup_entry l2_lookup = {0};
1525 u8 macaddr[ETH_ALEN];
1528 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1530 /* No fdb entry at i, not an issue */
1534 dev_err(dev, "Failed to dump FDB: %d\n", rc);
1538 /* FDB dump callback is per port. This means we have to
1539 * disregard a valid entry if it's not for this port, even if
1540 * only to revisit it later. This is inefficient because the
1541 * 1024-sized FDB table needs to be traversed 4 times through
1542 * SPI during a 'bridge fdb show' command.
1544 if (!(l2_lookup.destports & BIT(port)))
1547 /* We need to hide the FDB entry for unknown multicast */
1548 if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
1549 l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
1552 u64_to_ether_addr(l2_lookup.macaddr, macaddr);
1554 /* We need to hide the dsa_8021q VLANs from the user. */
1555 if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
1556 l2_lookup.vlanid = 0;
1557 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
1562 static int sja1105_mdb_add(struct dsa_switch *ds, int port,
1563 const struct switchdev_obj_port_mdb *mdb)
1565 return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
1568 static int sja1105_mdb_del(struct dsa_switch *ds, int port,
1569 const struct switchdev_obj_port_mdb *mdb)
1571 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
1574 static int sja1105_bridge_member(struct dsa_switch *ds, int port,
1575 struct net_device *br, bool member)
1577 struct sja1105_l2_forwarding_entry *l2_fwd;
1578 struct sja1105_private *priv = ds->priv;
1581 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
1583 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1584 /* Add this port to the forwarding matrix of the
1585 * other ports in the same bridge, and viceversa.
1587 if (!dsa_is_user_port(ds, i))
1589 /* For the ports already under the bridge, only one thing needs
1590 * to be done, and that is to add this port to their
1591 * reachability domain. So we can perform the SPI write for
1592 * them immediately. However, for this port itself (the one
1593 * that is new to the bridge), we need to add all other ports
1594 * to its reachability domain. So we do that incrementally in
1595 * this loop, and perform the SPI write only at the end, once
1596 * the domain contains all other bridge ports.
1600 if (dsa_to_port(ds, i)->bridge_dev != br)
1602 sja1105_port_allow_traffic(l2_fwd, i, port, member);
1603 sja1105_port_allow_traffic(l2_fwd, port, i, member);
1605 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1606 i, &l2_fwd[i], true);
1611 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1612 port, &l2_fwd[port], true);
1615 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
1618 struct sja1105_private *priv = ds->priv;
1619 struct sja1105_mac_config_entry *mac;
1621 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1624 case BR_STATE_DISABLED:
1625 case BR_STATE_BLOCKING:
1626 /* From UM10944 description of DRPDTAG (why put this there?):
1627 * "Management traffic flows to the port regardless of the state
1628 * of the INGRESS flag". So BPDUs are still be allowed to pass.
1629 * At the moment no difference between DISABLED and BLOCKING.
1631 mac[port].ingress = false;
1632 mac[port].egress = false;
1633 mac[port].dyn_learn = false;
1635 case BR_STATE_LISTENING:
1636 mac[port].ingress = true;
1637 mac[port].egress = false;
1638 mac[port].dyn_learn = false;
1640 case BR_STATE_LEARNING:
1641 mac[port].ingress = true;
1642 mac[port].egress = false;
1643 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
1645 case BR_STATE_FORWARDING:
1646 mac[port].ingress = true;
1647 mac[port].egress = true;
1648 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
1651 dev_err(ds->dev, "invalid STP state: %d\n", state);
1655 sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1659 static int sja1105_bridge_join(struct dsa_switch *ds, int port,
1660 struct net_device *br)
1662 return sja1105_bridge_member(ds, port, br, true);
1665 static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
1666 struct net_device *br)
1668 sja1105_bridge_member(ds, port, br, false);
1671 #define BYTES_PER_KBIT (1000LL / 8)
1673 static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
1677 for (i = 0; i < priv->info->num_cbs_shapers; i++)
1678 if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
1684 static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port,
1689 for (i = 0; i < priv->info->num_cbs_shapers; i++) {
1690 struct sja1105_cbs_entry *cbs = &priv->cbs[i];
1692 if (cbs->port == port && cbs->prio == prio) {
1693 memset(cbs, 0, sizeof(*cbs));
1694 return sja1105_dynamic_config_write(priv, BLK_IDX_CBS,
1702 static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
1703 struct tc_cbs_qopt_offload *offload)
1705 struct sja1105_private *priv = ds->priv;
1706 struct sja1105_cbs_entry *cbs;
1709 if (!offload->enable)
1710 return sja1105_delete_cbs_shaper(priv, port, offload->queue);
1712 index = sja1105_find_unused_cbs_shaper(priv);
1716 cbs = &priv->cbs[index];
1718 cbs->prio = offload->queue;
1719 /* locredit and sendslope are negative by definition. In hardware,
1720 * positive values must be provided, and the negative sign is implicit.
1722 cbs->credit_hi = offload->hicredit;
1723 cbs->credit_lo = abs(offload->locredit);
1724 /* User space is in kbits/sec, hardware in bytes/sec */
1725 cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT;
1726 cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT);
1727 /* Convert the negative values from 64-bit 2's complement
1728 * to 32-bit 2's complement (for the case of 0x80000000 whose
1729 * negative is still negative).
1731 cbs->credit_lo &= GENMASK_ULL(31, 0);
1732 cbs->send_slope &= GENMASK_ULL(31, 0);
1734 return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs,
1738 static int sja1105_reload_cbs(struct sja1105_private *priv)
1742 for (i = 0; i < priv->info->num_cbs_shapers; i++) {
1743 struct sja1105_cbs_entry *cbs = &priv->cbs[i];
1745 if (!cbs->idle_slope && !cbs->send_slope)
1748 rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs,
1757 static const char * const sja1105_reset_reasons[] = {
1758 [SJA1105_VLAN_FILTERING] = "VLAN filtering",
1759 [SJA1105_RX_HWTSTAMPING] = "RX timestamping",
1760 [SJA1105_AGEING_TIME] = "Ageing time",
1761 [SJA1105_SCHEDULING] = "Time-aware scheduling",
1762 [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
1763 [SJA1105_VIRTUAL_LINKS] = "Virtual links",
1766 /* For situations where we need to change a setting at runtime that is only
1767 * available through the static configuration, resetting the switch in order
1768 * to upload the new static config is unavoidable. Back up the settings we
1769 * modify at runtime (currently only MAC) and restore them after uploading,
1770 * such that this operation is relatively seamless.
1772 int sja1105_static_config_reload(struct sja1105_private *priv,
1773 enum sja1105_reset_reason reason)
1775 struct ptp_system_timestamp ptp_sts_before;
1776 struct ptp_system_timestamp ptp_sts_after;
1777 struct sja1105_mac_config_entry *mac;
1778 int speed_mbps[SJA1105_NUM_PORTS];
1779 struct dsa_switch *ds = priv->ds;
1786 mutex_lock(&priv->mgmt_lock);
1788 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1790 /* Back up the dynamic link speed changed by sja1105_adjust_port_config
1791 * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the
1792 * switch wants to see in the static config in order to allow us to
1793 * change it through the dynamic interface later.
1795 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1796 speed_mbps[i] = sja1105_speed[mac[i].speed];
1797 mac[i].speed = SJA1105_SPEED_AUTO;
1800 if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT))
1801 bmcr = sja1105_sgmii_read(priv, MII_BMCR);
1803 /* No PTP operations can run right now */
1804 mutex_lock(&priv->ptp_data.lock);
1806 rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
1808 goto out_unlock_ptp;
1810 /* Reset switch and send updated static configuration */
1811 rc = sja1105_static_config_upload(priv);
1813 goto out_unlock_ptp;
1815 rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
1817 goto out_unlock_ptp;
1819 t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
1820 t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
1821 t3 = timespec64_to_ns(&ptp_sts_after.pre_ts);
1822 t4 = timespec64_to_ns(&ptp_sts_after.post_ts);
1823 /* Mid point, corresponds to pre-reset PTPCLKVAL */
1824 t12 = t1 + (t2 - t1) / 2;
1825 /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */
1826 t34 = t3 + (t4 - t3) / 2;
1827 /* Advance PTPCLKVAL by the time it took since its readout */
1830 __sja1105_ptp_adjtime(ds, now);
1833 mutex_unlock(&priv->ptp_data.lock);
1835 dev_info(priv->ds->dev,
1836 "Reset switch and programmed static config. Reason: %s\n",
1837 sja1105_reset_reasons[reason]);
1839 /* Configure the CGU (PLLs) for MII and RMII PHYs.
1840 * For these interfaces there is no dynamic configuration
1841 * needed, since PLLs have same settings at all speeds.
1843 rc = sja1105_clocking_setup(priv);
1847 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1848 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
1853 if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) {
1854 bool an_enabled = !!(bmcr & BMCR_ANENABLE);
1856 sja1105_sgmii_pcs_config(priv, an_enabled, false);
1859 int speed = SPEED_UNKNOWN;
1861 if (bmcr & BMCR_SPEED1000)
1863 else if (bmcr & BMCR_SPEED100)
1865 else if (bmcr & BMCR_SPEED10)
1868 sja1105_sgmii_pcs_force_speed(priv, speed);
1872 rc = sja1105_reload_cbs(priv);
1876 mutex_unlock(&priv->mgmt_lock);
1881 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
1883 struct sja1105_mac_config_entry *mac;
1885 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1887 mac[port].vlanid = pvid;
1889 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1893 static int sja1105_crosschip_bridge_join(struct dsa_switch *ds,
1894 int tree_index, int sw_index,
1895 int other_port, struct net_device *br)
1897 struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
1898 struct sja1105_private *other_priv = other_ds->priv;
1899 struct sja1105_private *priv = ds->priv;
1902 if (other_ds->ops != &sja1105_switch_ops)
1905 for (port = 0; port < ds->num_ports; port++) {
1906 if (!dsa_is_user_port(ds, port))
1908 if (dsa_to_port(ds, port)->bridge_dev != br)
1911 rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx,
1913 other_priv->dsa_8021q_ctx,
1918 rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx,
1920 priv->dsa_8021q_ctx,
1929 static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds,
1930 int tree_index, int sw_index,
1932 struct net_device *br)
1934 struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
1935 struct sja1105_private *other_priv = other_ds->priv;
1936 struct sja1105_private *priv = ds->priv;
1939 if (other_ds->ops != &sja1105_switch_ops)
1942 for (port = 0; port < ds->num_ports; port++) {
1943 if (!dsa_is_user_port(ds, port))
1945 if (dsa_to_port(ds, port)->bridge_dev != br)
1948 dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port,
1949 other_priv->dsa_8021q_ctx,
1952 dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx,
1954 priv->dsa_8021q_ctx, port);
1958 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
1960 struct sja1105_private *priv = ds->priv;
1963 rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled);
1967 dev_info(ds->dev, "%s switch tagging\n",
1968 enabled ? "Enabled" : "Disabled");
1972 static enum dsa_tag_protocol
1973 sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
1974 enum dsa_tag_protocol mp)
1976 return DSA_TAG_PROTO_SJA1105;
1979 static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid)
1986 for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
1987 if (subvlan_map[subvlan] == VLAN_N_VID)
1993 static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid)
1997 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
1998 if (subvlan_map[subvlan] == vid)
2004 static int sja1105_find_committed_subvlan(struct sja1105_private *priv,
2007 struct sja1105_port *sp = &priv->ports[port];
2009 return sja1105_find_subvlan(sp->subvlan_map, vid);
2012 static void sja1105_init_subvlan_map(u16 *subvlan_map)
2016 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2017 subvlan_map[subvlan] = VLAN_N_VID;
2020 static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port,
2023 struct sja1105_port *sp = &priv->ports[port];
2026 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2027 sp->subvlan_map[subvlan] = subvlan_map[subvlan];
2030 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
2032 struct sja1105_vlan_lookup_entry *vlan;
2035 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
2036 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
2038 for (i = 0; i < count; i++)
2039 if (vlan[i].vlanid == vid)
2042 /* Return an invalid entry index if not found */
2047 sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging,
2048 int count, int from_port, u16 from_vid,
2053 for (i = 0; i < count; i++)
2054 if (retagging[i].ing_port == BIT(from_port) &&
2055 retagging[i].vlan_ing == from_vid &&
2056 retagging[i].vlan_egr == to_vid)
2059 /* Return an invalid entry index if not found */
2063 static int sja1105_commit_vlans(struct sja1105_private *priv,
2064 struct sja1105_vlan_lookup_entry *new_vlan,
2065 struct sja1105_retagging_entry *new_retagging,
2068 struct sja1105_retagging_entry *retagging;
2069 struct sja1105_vlan_lookup_entry *vlan;
2070 struct sja1105_table *table;
2075 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2076 vlan = table->entries;
2078 for (i = 0; i < VLAN_N_VID; i++) {
2079 int match = sja1105_is_vlan_configured(priv, i);
2081 if (new_vlan[i].vlanid != VLAN_N_VID)
2084 if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) {
2085 /* Was there before, no longer is. Delete */
2086 dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i);
2087 rc = sja1105_dynamic_config_write(priv,
2088 BLK_IDX_VLAN_LOOKUP,
2089 i, &vlan[match], false);
2092 } else if (new_vlan[i].vlanid != VLAN_N_VID) {
2093 /* Nothing changed, don't do anything */
2095 vlan[match].vlanid == new_vlan[i].vlanid &&
2096 vlan[match].tag_port == new_vlan[i].tag_port &&
2097 vlan[match].vlan_bc == new_vlan[i].vlan_bc &&
2098 vlan[match].vmemb_port == new_vlan[i].vmemb_port)
2101 dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i);
2102 rc = sja1105_dynamic_config_write(priv,
2103 BLK_IDX_VLAN_LOOKUP,
2111 if (table->entry_count)
2112 kfree(table->entries);
2114 table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size,
2116 if (!table->entries)
2119 table->entry_count = num_vlans;
2120 vlan = table->entries;
2122 for (i = 0; i < VLAN_N_VID; i++) {
2123 if (new_vlan[i].vlanid == VLAN_N_VID)
2125 vlan[k++] = new_vlan[i];
2128 /* VLAN Retagging Table */
2129 table = &priv->static_config.tables[BLK_IDX_RETAGGING];
2130 retagging = table->entries;
2132 for (i = 0; i < table->entry_count; i++) {
2133 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
2134 i, &retagging[i], false);
2139 if (table->entry_count)
2140 kfree(table->entries);
2142 table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size,
2144 if (!table->entries)
2147 table->entry_count = num_retagging;
2148 retagging = table->entries;
2150 for (i = 0; i < num_retagging; i++) {
2151 retagging[i] = new_retagging[i];
2154 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
2155 i, &retagging[i], true);
2163 struct sja1105_crosschip_vlan {
2164 struct list_head list;
2169 struct dsa_8021q_context *other_ctx;
2172 struct sja1105_crosschip_switch {
2173 struct list_head list;
2174 struct dsa_8021q_context *other_ctx;
2177 static int sja1105_commit_pvid(struct sja1105_private *priv)
2179 struct sja1105_bridge_vlan *v;
2180 struct list_head *vlan_list;
2183 if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2184 vlan_list = &priv->bridge_vlans;
2186 vlan_list = &priv->dsa_8021q_vlans;
2188 list_for_each_entry(v, vlan_list, list) {
2190 rc = sja1105_pvid_apply(priv, v->port, v->vid);
2200 sja1105_build_bridge_vlans(struct sja1105_private *priv,
2201 struct sja1105_vlan_lookup_entry *new_vlan)
2203 struct sja1105_bridge_vlan *v;
2205 if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
2208 list_for_each_entry(v, &priv->bridge_vlans, list) {
2211 new_vlan[match].vlanid = v->vid;
2212 new_vlan[match].vmemb_port |= BIT(v->port);
2213 new_vlan[match].vlan_bc |= BIT(v->port);
2215 new_vlan[match].tag_port |= BIT(v->port);
2222 sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv,
2223 struct sja1105_vlan_lookup_entry *new_vlan)
2225 struct sja1105_bridge_vlan *v;
2227 if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2230 list_for_each_entry(v, &priv->dsa_8021q_vlans, list) {
2233 new_vlan[match].vlanid = v->vid;
2234 new_vlan[match].vmemb_port |= BIT(v->port);
2235 new_vlan[match].vlan_bc |= BIT(v->port);
2237 new_vlan[match].tag_port |= BIT(v->port);
2243 static int sja1105_build_subvlans(struct sja1105_private *priv,
2244 u16 subvlan_map[][DSA_8021Q_N_SUBVLAN],
2245 struct sja1105_vlan_lookup_entry *new_vlan,
2246 struct sja1105_retagging_entry *new_retagging,
2249 struct sja1105_bridge_vlan *v;
2250 int k = *num_retagging;
2252 if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
2255 list_for_each_entry(v, &priv->bridge_vlans, list) {
2256 int upstream = dsa_upstream_port(priv->ds, v->port);
2260 /* Only sub-VLANs on user ports need to be applied.
2261 * Bridge VLANs also include VLANs added automatically
2262 * by DSA on the CPU port.
2264 if (!dsa_is_user_port(priv->ds, v->port))
2267 subvlan = sja1105_find_subvlan(subvlan_map[v->port],
2270 subvlan = sja1105_find_free_subvlan(subvlan_map[v->port],
2273 dev_err(priv->ds->dev, "No more free subvlans\n");
2278 rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan);
2280 /* @v->vid on @v->port needs to be retagged to @rx_vid
2281 * on @upstream. Assume @v->vid on @v->port and on
2282 * @upstream was already configured by the previous
2283 * iteration over bridge_vlans.
2286 new_vlan[match].vlanid = rx_vid;
2287 new_vlan[match].vmemb_port |= BIT(v->port);
2288 new_vlan[match].vmemb_port |= BIT(upstream);
2289 new_vlan[match].vlan_bc |= BIT(v->port);
2290 new_vlan[match].vlan_bc |= BIT(upstream);
2291 /* The "untagged" flag is set the same as for the
2295 new_vlan[match].tag_port |= BIT(v->port);
2296 /* But it's always tagged towards the CPU */
2297 new_vlan[match].tag_port |= BIT(upstream);
2299 /* The Retagging Table generates packet *clones* with
2300 * the new VLAN. This is a very odd hardware quirk
2301 * which we need to suppress by dropping the original
2303 * Deny egress of the original VLAN towards the CPU
2304 * port. This will force the switch to drop it, and
2305 * we'll see only the retagged packets.
2308 new_vlan[match].vlan_bc &= ~BIT(upstream);
2310 /* And the retagging itself */
2311 new_retagging[k].vlan_ing = v->vid;
2312 new_retagging[k].vlan_egr = rx_vid;
2313 new_retagging[k].ing_port = BIT(v->port);
2314 new_retagging[k].egr_port = BIT(upstream);
2315 if (k++ == SJA1105_MAX_RETAGGING_COUNT) {
2316 dev_err(priv->ds->dev, "No more retagging rules\n");
2320 subvlan_map[v->port][subvlan] = v->vid;
2328 /* Sadly, in crosschip scenarios where the CPU port is also the link to another
2329 * switch, we should retag backwards (the dsa_8021q vid to the original vid) on
2330 * the CPU port of neighbour switches.
2333 sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
2334 struct sja1105_vlan_lookup_entry *new_vlan,
2335 struct sja1105_retagging_entry *new_retagging,
2338 struct sja1105_crosschip_vlan *tmp, *pos;
2339 struct dsa_8021q_crosschip_link *c;
2340 struct sja1105_bridge_vlan *v, *w;
2341 struct list_head crosschip_vlans;
2342 int k = *num_retagging;
2345 if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
2348 INIT_LIST_HEAD(&crosschip_vlans);
2350 list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
2351 struct sja1105_private *other_priv = c->other_ctx->ds->priv;
2353 if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2356 /* Crosschip links are also added to the CPU ports.
2359 if (!dsa_is_user_port(priv->ds, c->port))
2361 if (!dsa_is_user_port(c->other_ctx->ds, c->other_port))
2364 /* Search for VLANs on the remote port */
2365 list_for_each_entry(v, &other_priv->bridge_vlans, list) {
2366 bool already_added = false;
2367 bool we_have_it = false;
2369 if (v->port != c->other_port)
2372 /* If @v is a pvid on @other_ds, it does not need
2373 * re-retagging, because its SVL field is 0 and we
2374 * already allow that, via the dsa_8021q crosschip
2380 /* Search for the VLAN on our local port */
2381 list_for_each_entry(w, &priv->bridge_vlans, list) {
2382 if (w->port == c->port && w->vid == v->vid) {
2391 list_for_each_entry(tmp, &crosschip_vlans, list) {
2392 if (tmp->vid == v->vid &&
2393 tmp->untagged == v->untagged &&
2394 tmp->port == c->port &&
2395 tmp->other_port == v->port &&
2396 tmp->other_ctx == c->other_ctx) {
2397 already_added = true;
2405 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2407 dev_err(priv->ds->dev, "Failed to allocate memory\n");
2412 tmp->port = c->port;
2413 tmp->other_port = v->port;
2414 tmp->other_ctx = c->other_ctx;
2415 tmp->untagged = v->untagged;
2416 list_add(&tmp->list, &crosschip_vlans);
2420 list_for_each_entry(tmp, &crosschip_vlans, list) {
2421 struct sja1105_private *other_priv = tmp->other_ctx->ds->priv;
2422 int upstream = dsa_upstream_port(priv->ds, tmp->port);
2426 subvlan = sja1105_find_committed_subvlan(other_priv,
2429 /* If this happens, it's a bug. The neighbour switch does not
2430 * have a subvlan for tmp->vid on tmp->other_port, but it
2431 * should, since we already checked for its vlan_state.
2433 if (WARN_ON(subvlan < 0)) {
2438 rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds,
2442 /* The @rx_vid retagged from @tmp->vid on
2443 * {@tmp->other_ds, @tmp->other_port} needs to be
2444 * re-retagged to @tmp->vid on the way back to us.
2446 * Assume the original @tmp->vid is already configured
2447 * on this local switch, otherwise we wouldn't be
2448 * retagging its subvlan on the other switch in the
2449 * first place. We just need to add a reverse retagging
2450 * rule for @rx_vid and install @rx_vid on our ports.
2453 new_vlan[match].vlanid = rx_vid;
2454 new_vlan[match].vmemb_port |= BIT(tmp->port);
2455 new_vlan[match].vmemb_port |= BIT(upstream);
2456 /* The "untagged" flag is set the same as for the
2457 * original VLAN. And towards the CPU, it doesn't
2458 * really matter, because @rx_vid will only receive
2459 * traffic on that port. For consistency with other dsa_8021q
2460 * VLANs, we'll keep the CPU port tagged.
2463 new_vlan[match].tag_port |= BIT(tmp->port);
2464 new_vlan[match].tag_port |= BIT(upstream);
2465 /* Deny egress of @rx_vid towards our front-panel port.
2466 * This will force the switch to drop it, and we'll see
2467 * only the re-retagged packets (having the original,
2468 * pre-initial-retagging, VLAN @tmp->vid).
2470 new_vlan[match].vlan_bc &= ~BIT(tmp->port);
2472 /* On reverse retagging, the same ingress VLAN goes to multiple
2473 * ports. So we have an opportunity to create composite rules
2474 * to not waste the limited space in the retagging table.
2476 k = sja1105_find_retagging_entry(new_retagging, *num_retagging,
2477 upstream, rx_vid, tmp->vid);
2479 if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) {
2480 dev_err(priv->ds->dev, "No more retagging rules\n");
2484 k = (*num_retagging)++;
2486 /* And the retagging itself */
2487 new_retagging[k].vlan_ing = rx_vid;
2488 new_retagging[k].vlan_egr = tmp->vid;
2489 new_retagging[k].ing_port = BIT(upstream);
2490 new_retagging[k].egr_port |= BIT(tmp->port);
2494 list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) {
2495 list_del(&tmp->list);
2502 static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify);
2504 static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
2506 struct sja1105_crosschip_switch *s, *pos;
2507 struct list_head crosschip_switches;
2508 struct dsa_8021q_crosschip_link *c;
2511 INIT_LIST_HEAD(&crosschip_switches);
2513 list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
2514 bool already_added = false;
2516 list_for_each_entry(s, &crosschip_switches, list) {
2517 if (s->other_ctx == c->other_ctx) {
2518 already_added = true;
2526 s = kzalloc(sizeof(*s), GFP_KERNEL);
2528 dev_err(priv->ds->dev, "Failed to allocate memory\n");
2532 s->other_ctx = c->other_ctx;
2533 list_add(&s->list, &crosschip_switches);
2536 list_for_each_entry(s, &crosschip_switches, list) {
2537 struct sja1105_private *other_priv = s->other_ctx->ds->priv;
2539 rc = sja1105_build_vlan_table(other_priv, false);
2545 list_for_each_entry_safe(s, pos, &crosschip_switches, list) {
2553 static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify)
2555 u16 subvlan_map[SJA1105_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
2556 struct sja1105_retagging_entry *new_retagging;
2557 struct sja1105_vlan_lookup_entry *new_vlan;
2558 struct sja1105_table *table;
2559 int i, num_retagging = 0;
2562 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2563 new_vlan = kcalloc(VLAN_N_VID,
2564 table->ops->unpacked_entry_size, GFP_KERNEL);
2568 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2569 new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT,
2570 table->ops->unpacked_entry_size, GFP_KERNEL);
2571 if (!new_retagging) {
2576 for (i = 0; i < VLAN_N_VID; i++)
2577 new_vlan[i].vlanid = VLAN_N_VID;
2579 for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++)
2580 new_retagging[i].vlan_ing = VLAN_N_VID;
2582 for (i = 0; i < priv->ds->num_ports; i++)
2583 sja1105_init_subvlan_map(subvlan_map[i]);
2586 rc = sja1105_build_bridge_vlans(priv, new_vlan);
2590 /* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c:
2595 rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan);
2599 /* Private VLANs necessary for dsa_8021q operation, which we need to
2600 * determine on our own:
2602 * - Sub-VLANs of crosschip switches
2604 rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging,
2609 rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging,
2614 rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging);
2618 rc = sja1105_commit_pvid(priv);
2622 for (i = 0; i < priv->ds->num_ports; i++)
2623 sja1105_commit_subvlan_map(priv, i, subvlan_map[i]);
2626 rc = sja1105_notify_crosschip_switches(priv);
2633 kfree(new_retagging);
2638 /* The TPID setting belongs to the General Parameters table,
2639 * which can only be partially reconfigured at runtime (and not the TPID).
2640 * So a switch reset is required.
2642 int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
2644 struct sja1105_l2_lookup_params_entry *l2_lookup_params;
2645 struct sja1105_general_params_entry *general_params;
2646 struct sja1105_private *priv = ds->priv;
2647 enum sja1105_vlan_state state;
2648 struct sja1105_table *table;
2649 struct sja1105_rule *rule;
2654 list_for_each_entry(rule, &priv->flow_block.rules, list) {
2655 if (rule->type == SJA1105_RULE_VL) {
2657 "Cannot change VLAN filtering with active VL rules\n");
2663 /* Enable VLAN filtering. */
2665 tpid2 = ETH_P_8021AD;
2667 /* Disable VLAN filtering. */
2668 tpid = ETH_P_SJA1105;
2669 tpid2 = ETH_P_SJA1105;
2672 for (port = 0; port < ds->num_ports; port++) {
2673 struct sja1105_port *sp = &priv->ports[port];
2676 sp->xmit_tpid = priv->info->qinq_tpid;
2678 sp->xmit_tpid = ETH_P_SJA1105;
2682 state = SJA1105_VLAN_UNAWARE;
2683 else if (priv->best_effort_vlan_filtering)
2684 state = SJA1105_VLAN_BEST_EFFORT;
2686 state = SJA1105_VLAN_FILTERING_FULL;
2688 if (priv->vlan_state == state)
2691 priv->vlan_state = state;
2692 want_tagging = (state == SJA1105_VLAN_UNAWARE ||
2693 state == SJA1105_VLAN_BEST_EFFORT);
2695 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
2696 general_params = table->entries;
2697 /* EtherType used to identify inner tagged (C-tag) VLAN traffic */
2698 general_params->tpid = tpid;
2699 /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
2700 general_params->tpid2 = tpid2;
2701 /* When VLAN filtering is on, we need to at least be able to
2702 * decode management traffic through the "backup plan".
2704 general_params->incl_srcpt1 = enabled;
2705 general_params->incl_srcpt0 = enabled;
2707 want_tagging = priv->best_effort_vlan_filtering || !enabled;
2709 /* VLAN filtering => independent VLAN learning.
2710 * No VLAN filtering (or best effort) => shared VLAN learning.
2712 * In shared VLAN learning mode, untagged traffic still gets
2713 * pvid-tagged, and the FDB table gets populated with entries
2714 * containing the "real" (pvid or from VLAN tag) VLAN ID.
2715 * However the switch performs a masked L2 lookup in the FDB,
2716 * effectively only looking up a frame's DMAC (and not VID) for the
2717 * forwarding decision.
2719 * This is extremely convenient for us, because in modes with
2720 * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
2721 * each front panel port. This is good for identification but breaks
2722 * learning badly - the VID of the learnt FDB entry is unique, aka
2723 * no frames coming from any other port are going to have it. So
2724 * for forwarding purposes, this is as though learning was broken
2725 * (all frames get flooded).
2727 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
2728 l2_lookup_params = table->entries;
2729 l2_lookup_params->shared_learn = want_tagging;
2731 sja1105_frame_memory_partitioning(priv);
2733 rc = sja1105_build_vlan_table(priv, false);
2737 rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
2739 dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
2741 /* Switch port identification based on 802.1Q is only passable
2742 * if we are not under a vlan_filtering bridge. So make sure
2743 * the two configurations are mutually exclusive (of course, the
2744 * user may know better, i.e. best_effort_vlan_filtering).
2746 return sja1105_setup_8021q_tagging(ds, want_tagging);
2749 /* Returns number of VLANs added (0 or 1) on success,
2750 * or a negative error code.
2752 static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
2753 u16 flags, struct list_head *vlan_list)
2755 bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
2756 bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
2757 struct sja1105_bridge_vlan *v;
2759 list_for_each_entry(v, vlan_list, list)
2760 if (v->port == port && v->vid == vid &&
2761 v->untagged == untagged && v->pvid == pvid)
2765 v = kzalloc(sizeof(*v), GFP_KERNEL);
2767 dev_err(ds->dev, "Out of memory while storing VLAN\n");
2773 v->untagged = untagged;
2775 list_add(&v->list, vlan_list);
2780 /* Returns number of VLANs deleted (0 or 1) */
2781 static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid,
2782 struct list_head *vlan_list)
2784 struct sja1105_bridge_vlan *v, *n;
2786 list_for_each_entry_safe(v, n, vlan_list, list) {
2787 if (v->port == port && v->vid == vid) {
2797 static int sja1105_vlan_add(struct dsa_switch *ds, int port,
2798 const struct switchdev_obj_port_vlan *vlan)
2800 struct sja1105_private *priv = ds->priv;
2801 bool vlan_table_changed = false;
2804 /* If the user wants best-effort VLAN filtering (aka vlan_filtering
2805 * bridge plus tagging), be sure to at least deny alterations to the
2806 * configuration done by dsa_8021q.
2808 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL &&
2809 vid_is_dsa_8021q(vlan->vid)) {
2810 dev_err(ds->dev, "Range 1024-3071 reserved for dsa_8021q operation\n");
2814 rc = sja1105_vlan_add_one(ds, port, vlan->vid, vlan->flags,
2815 &priv->bridge_vlans);
2819 vlan_table_changed = true;
2821 if (!vlan_table_changed)
2824 return sja1105_build_vlan_table(priv, true);
2827 static int sja1105_vlan_del(struct dsa_switch *ds, int port,
2828 const struct switchdev_obj_port_vlan *vlan)
2830 struct sja1105_private *priv = ds->priv;
2831 bool vlan_table_changed = false;
2834 rc = sja1105_vlan_del_one(ds, port, vlan->vid, &priv->bridge_vlans);
2836 vlan_table_changed = true;
2838 if (!vlan_table_changed)
2841 return sja1105_build_vlan_table(priv, true);
2844 static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
2847 struct sja1105_private *priv = ds->priv;
2850 rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans);
2854 return sja1105_build_vlan_table(priv, true);
2857 static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
2859 struct sja1105_private *priv = ds->priv;
2862 rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans);
2866 return sja1105_build_vlan_table(priv, true);
2869 static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = {
2870 .vlan_add = sja1105_dsa_8021q_vlan_add,
2871 .vlan_del = sja1105_dsa_8021q_vlan_del,
2874 /* The programming model for the SJA1105 switch is "all-at-once" via static
2875 * configuration tables. Some of these can be dynamically modified at runtime,
2876 * but not the xMII mode parameters table.
2877 * Furthermode, some PHYs may not have crystals for generating their clocks
2878 * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
2879 * ref_clk pin. So port clocking needs to be initialized early, before
2880 * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
2881 * Setting correct PHY link speed does not matter now.
2882 * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
2883 * bindings are not yet parsed by DSA core. We need to parse early so that we
2884 * can populate the xMII mode parameters table.
2886 static int sja1105_setup(struct dsa_switch *ds)
2888 struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
2889 struct sja1105_private *priv = ds->priv;
2892 rc = sja1105_parse_dt(priv, ports);
2894 dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
2898 /* Error out early if internal delays are required through DT
2899 * and we can't apply them.
2901 rc = sja1105_parse_rgmii_delays(priv, ports);
2903 dev_err(ds->dev, "RGMII delay not supported\n");
2907 rc = sja1105_ptp_clock_register(ds);
2909 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
2912 /* Create and send configuration down to device */
2913 rc = sja1105_static_config_load(priv, ports);
2915 dev_err(ds->dev, "Failed to load static config: %d\n", rc);
2918 /* Configure the CGU (PHY link modes and speeds) */
2919 rc = sja1105_clocking_setup(priv);
2921 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
2924 /* On SJA1105, VLAN filtering per se is always enabled in hardware.
2925 * The only thing we can do to disable it is lie about what the 802.1Q
2927 * So it will still try to apply VLAN filtering, but all ingress
2928 * traffic (except frames received with EtherType of ETH_P_SJA1105)
2929 * will be internally tagged with a distorted VLAN header where the
2930 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
2932 ds->vlan_filtering_is_global = true;
2934 /* Advertise the 8 egress queues */
2935 ds->num_tx_queues = SJA1105_NUM_TC;
2937 ds->mtu_enforcement_ingress = true;
2939 rc = sja1105_devlink_setup(ds);
2943 /* The DSA/switchdev model brings up switch ports in standalone mode by
2944 * default, and that means vlan_filtering is 0 since they're not under
2945 * a bridge, so it's safe to set up switch tagging at this time.
2948 rc = sja1105_setup_8021q_tagging(ds, true);
2954 static void sja1105_teardown(struct dsa_switch *ds)
2956 struct sja1105_private *priv = ds->priv;
2957 struct sja1105_bridge_vlan *v, *n;
2960 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
2961 struct sja1105_port *sp = &priv->ports[port];
2963 if (!dsa_is_user_port(ds, port))
2966 if (sp->xmit_worker)
2967 kthread_destroy_worker(sp->xmit_worker);
2970 sja1105_devlink_teardown(ds);
2971 sja1105_flower_teardown(ds);
2972 sja1105_tas_teardown(ds);
2973 sja1105_ptp_clock_unregister(ds);
2974 sja1105_static_config_free(&priv->static_config);
2976 list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) {
2981 list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) {
2987 static int sja1105_port_enable(struct dsa_switch *ds, int port,
2988 struct phy_device *phy)
2990 struct net_device *slave;
2992 if (!dsa_is_user_port(ds, port))
2995 slave = dsa_to_port(ds, port)->slave;
2997 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
3002 static void sja1105_port_disable(struct dsa_switch *ds, int port)
3004 struct sja1105_private *priv = ds->priv;
3005 struct sja1105_port *sp = &priv->ports[port];
3007 if (!dsa_is_user_port(ds, port))
3010 kthread_cancel_work_sync(&sp->xmit_work);
3011 skb_queue_purge(&sp->xmit_queue);
3014 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
3015 struct sk_buff *skb, bool takets)
3017 struct sja1105_mgmt_entry mgmt_route = {0};
3018 struct sja1105_private *priv = ds->priv;
3025 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
3026 mgmt_route.destports = BIT(port);
3027 mgmt_route.enfport = 1;
3028 mgmt_route.tsreg = 0;
3029 mgmt_route.takets = takets;
3031 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
3032 slot, &mgmt_route, true);
3038 /* Transfer skb to the host port. */
3039 dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
3041 /* Wait until the switch has processed the frame */
3043 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
3046 dev_err_ratelimited(priv->ds->dev,
3047 "failed to poll for mgmt route\n");
3051 /* UM10944: The ENFPORT flag of the respective entry is
3052 * cleared when a match is found. The host can use this
3053 * flag as an acknowledgment.
3056 } while (mgmt_route.enfport && --timeout);
3059 /* Clean up the management route so that a follow-up
3060 * frame may not match on it by mistake.
3061 * This is only hardware supported on P/Q/R/S - on E/T it is
3062 * a no-op and we are silently discarding the -EOPNOTSUPP.
3064 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
3065 slot, &mgmt_route, false);
3066 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
3069 return NETDEV_TX_OK;
3072 #define work_to_port(work) \
3073 container_of((work), struct sja1105_port, xmit_work)
3074 #define tagger_to_sja1105(t) \
3075 container_of((t), struct sja1105_private, tagger_data)
3077 /* Deferred work is unfortunately necessary because setting up the management
3078 * route cannot be done from atomit context (SPI transfer takes a sleepable
3081 static void sja1105_port_deferred_xmit(struct kthread_work *work)
3083 struct sja1105_port *sp = work_to_port(work);
3084 struct sja1105_tagger_data *tagger_data = sp->data;
3085 struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
3086 int port = sp - priv->ports;
3087 struct sk_buff *skb;
3089 while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
3090 struct sk_buff *clone = DSA_SKB_CB(skb)->clone;
3092 mutex_lock(&priv->mgmt_lock);
3094 sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
3096 /* The clone, if there, was made by dsa_skb_tx_timestamp */
3098 sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
3100 mutex_unlock(&priv->mgmt_lock);
3104 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
3105 * which cannot be reconfigured at runtime. So a switch reset is required.
3107 static int sja1105_set_ageing_time(struct dsa_switch *ds,
3108 unsigned int ageing_time)
3110 struct sja1105_l2_lookup_params_entry *l2_lookup_params;
3111 struct sja1105_private *priv = ds->priv;
3112 struct sja1105_table *table;
3113 unsigned int maxage;
3115 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
3116 l2_lookup_params = table->entries;
3118 maxage = SJA1105_AGEING_TIME_MS(ageing_time);
3120 if (l2_lookup_params->maxage == maxage)
3123 l2_lookup_params->maxage = maxage;
3125 return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
3128 static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
3130 struct sja1105_l2_policing_entry *policing;
3131 struct sja1105_private *priv = ds->priv;
3133 new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
3135 if (dsa_is_cpu_port(ds, port))
3136 new_mtu += VLAN_HLEN;
3138 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3140 if (policing[port].maxlen == new_mtu)
3143 policing[port].maxlen = new_mtu;
3145 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3148 static int sja1105_get_max_mtu(struct dsa_switch *ds, int port)
3150 return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN;
3153 static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
3154 enum tc_setup_type type,
3158 case TC_SETUP_QDISC_TAPRIO:
3159 return sja1105_setup_tc_taprio(ds, port, type_data);
3160 case TC_SETUP_QDISC_CBS:
3161 return sja1105_setup_tc_cbs(ds, port, type_data);
3167 /* We have a single mirror (@to) port, but can configure ingress and egress
3168 * mirroring on all other (@from) ports.
3169 * We need to allow mirroring rules only as long as the @to port is always the
3170 * same, and we need to unset the @to port from mirr_port only when there is no
3171 * mirroring rule that references it.
3173 static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
3174 bool ingress, bool enabled)
3176 struct sja1105_general_params_entry *general_params;
3177 struct sja1105_mac_config_entry *mac;
3178 struct sja1105_table *table;
3179 bool already_enabled;
3183 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
3184 general_params = table->entries;
3186 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
3188 already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS);
3189 if (already_enabled && enabled && general_params->mirr_port != to) {
3190 dev_err(priv->ds->dev,
3191 "Delete mirroring rules towards port %llu first\n",
3192 general_params->mirr_port);
3201 /* Anybody still referencing mirr_port? */
3202 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
3203 if (mac[port].ing_mirr || mac[port].egr_mirr) {
3208 /* Unset already_enabled for next time */
3210 new_mirr_port = SJA1105_NUM_PORTS;
3212 if (new_mirr_port != general_params->mirr_port) {
3213 general_params->mirr_port = new_mirr_port;
3215 rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS,
3216 0, general_params, true);
3222 mac[from].ing_mirr = enabled;
3224 mac[from].egr_mirr = enabled;
3226 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from,
3230 static int sja1105_mirror_add(struct dsa_switch *ds, int port,
3231 struct dsa_mall_mirror_tc_entry *mirror,
3234 return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
3238 static void sja1105_mirror_del(struct dsa_switch *ds, int port,
3239 struct dsa_mall_mirror_tc_entry *mirror)
3241 sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
3242 mirror->ingress, false);
3245 static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
3246 struct dsa_mall_policer_tc_entry *policer)
3248 struct sja1105_l2_policing_entry *policing;
3249 struct sja1105_private *priv = ds->priv;
3251 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3253 /* In hardware, every 8 microseconds the credit level is incremented by
3254 * the value of RATE bytes divided by 64, up to a maximum of SMAX
3257 policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec,
3259 policing[port].smax = policer->burst;
3261 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3264 static void sja1105_port_policer_del(struct dsa_switch *ds, int port)
3266 struct sja1105_l2_policing_entry *policing;
3267 struct sja1105_private *priv = ds->priv;
3269 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3271 policing[port].rate = SJA1105_RATE_MBPS(1000);
3272 policing[port].smax = 65535;
3274 sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3277 static int sja1105_port_set_learning(struct sja1105_private *priv, int port,
3280 struct sja1105_mac_config_entry *mac;
3283 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
3285 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
3287 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
3293 priv->learn_ena |= BIT(port);
3295 priv->learn_ena &= ~BIT(port);
3300 /* Common function for unicast and broadcast flood configuration.
3301 * Flooding is configured between each {ingress, egress} port pair, and since
3302 * the bridge's semantics are those of "egress flooding", it means we must
3303 * enable flooding towards this port from all ingress ports that are in the
3304 * same bridge. In practice, we just enable flooding from all possible ingress
3305 * ports regardless of whether they're in the same bridge or not, since the
3306 * reach_port configuration will not allow flooded frames to leak across
3307 * bridging domains anyway.
3309 static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to,
3310 struct switchdev_brport_flags flags)
3312 struct sja1105_l2_forwarding_entry *l2_fwd;
3315 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
3317 for (from = 0; from < priv->ds->num_ports; from++) {
3318 if (dsa_is_unused_port(priv->ds, from))
3324 if (flags.mask & BR_FLOOD) {
3325 if (flags.val & BR_FLOOD)
3326 l2_fwd[from].fl_domain |= BIT(to);
3328 l2_fwd[from].fl_domain &= ~BIT(to);
3331 if (flags.mask & BR_BCAST_FLOOD) {
3332 if (flags.val & BR_BCAST_FLOOD)
3333 l2_fwd[from].bc_domain |= BIT(to);
3335 l2_fwd[from].bc_domain &= ~BIT(to);
3338 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
3339 from, &l2_fwd[from], true);
3347 static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
3348 struct switchdev_brport_flags flags,
3349 struct netlink_ext_ack *extack)
3351 struct sja1105_l2_lookup_entry *l2_lookup;
3352 struct sja1105_table *table;
3355 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
3356 l2_lookup = table->entries;
3358 for (match = 0; match < table->entry_count; match++)
3359 if (l2_lookup[match].macaddr == SJA1105_UNKNOWN_MULTICAST &&
3360 l2_lookup[match].mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
3363 if (match == table->entry_count) {
3364 NL_SET_ERR_MSG_MOD(extack,
3365 "Could not find FDB entry for unknown multicast");
3369 if (flags.val & BR_MCAST_FLOOD)
3370 l2_lookup[match].destports |= BIT(to);
3372 l2_lookup[match].destports &= ~BIT(to);
3374 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
3375 l2_lookup[match].index,
3380 static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
3381 struct switchdev_brport_flags flags,
3382 struct netlink_ext_ack *extack)
3384 struct sja1105_private *priv = ds->priv;
3386 if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
3390 if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD) &&
3391 !priv->info->can_limit_mcast_flood) {
3392 bool multicast = !!(flags.val & BR_MCAST_FLOOD);
3393 bool unicast = !!(flags.val & BR_FLOOD);
3395 if (unicast != multicast) {
3396 NL_SET_ERR_MSG_MOD(extack,
3397 "This chip cannot configure multicast flooding independently of unicast");
3405 static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
3406 struct switchdev_brport_flags flags,
3407 struct netlink_ext_ack *extack)
3409 struct sja1105_private *priv = ds->priv;
3412 if (flags.mask & BR_LEARNING) {
3413 bool learn_ena = !!(flags.val & BR_LEARNING);
3415 rc = sja1105_port_set_learning(priv, port, learn_ena);
3420 if (flags.mask & (BR_FLOOD | BR_BCAST_FLOOD)) {
3421 rc = sja1105_port_ucast_bcast_flood(priv, port, flags);
3426 /* For chips that can't offload BR_MCAST_FLOOD independently, there
3427 * is nothing to do here, we ensured the configuration is in sync by
3428 * offloading BR_FLOOD.
3430 if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) {
3431 rc = sja1105_port_mcast_flood(priv, port, flags,
3440 static const struct dsa_switch_ops sja1105_switch_ops = {
3441 .get_tag_protocol = sja1105_get_tag_protocol,
3442 .setup = sja1105_setup,
3443 .teardown = sja1105_teardown,
3444 .set_ageing_time = sja1105_set_ageing_time,
3445 .port_change_mtu = sja1105_change_mtu,
3446 .port_max_mtu = sja1105_get_max_mtu,
3447 .phylink_validate = sja1105_phylink_validate,
3448 .phylink_mac_link_state = sja1105_mac_pcs_get_state,
3449 .phylink_mac_config = sja1105_mac_config,
3450 .phylink_mac_link_up = sja1105_mac_link_up,
3451 .phylink_mac_link_down = sja1105_mac_link_down,
3452 .get_strings = sja1105_get_strings,
3453 .get_ethtool_stats = sja1105_get_ethtool_stats,
3454 .get_sset_count = sja1105_get_sset_count,
3455 .get_ts_info = sja1105_get_ts_info,
3456 .port_enable = sja1105_port_enable,
3457 .port_disable = sja1105_port_disable,
3458 .port_fdb_dump = sja1105_fdb_dump,
3459 .port_fdb_add = sja1105_fdb_add,
3460 .port_fdb_del = sja1105_fdb_del,
3461 .port_bridge_join = sja1105_bridge_join,
3462 .port_bridge_leave = sja1105_bridge_leave,
3463 .port_pre_bridge_flags = sja1105_port_pre_bridge_flags,
3464 .port_bridge_flags = sja1105_port_bridge_flags,
3465 .port_stp_state_set = sja1105_bridge_stp_state_set,
3466 .port_vlan_filtering = sja1105_vlan_filtering,
3467 .port_vlan_add = sja1105_vlan_add,
3468 .port_vlan_del = sja1105_vlan_del,
3469 .port_mdb_add = sja1105_mdb_add,
3470 .port_mdb_del = sja1105_mdb_del,
3471 .port_hwtstamp_get = sja1105_hwtstamp_get,
3472 .port_hwtstamp_set = sja1105_hwtstamp_set,
3473 .port_rxtstamp = sja1105_port_rxtstamp,
3474 .port_txtstamp = sja1105_port_txtstamp,
3475 .port_setup_tc = sja1105_port_setup_tc,
3476 .port_mirror_add = sja1105_mirror_add,
3477 .port_mirror_del = sja1105_mirror_del,
3478 .port_policer_add = sja1105_port_policer_add,
3479 .port_policer_del = sja1105_port_policer_del,
3480 .cls_flower_add = sja1105_cls_flower_add,
3481 .cls_flower_del = sja1105_cls_flower_del,
3482 .cls_flower_stats = sja1105_cls_flower_stats,
3483 .crosschip_bridge_join = sja1105_crosschip_bridge_join,
3484 .crosschip_bridge_leave = sja1105_crosschip_bridge_leave,
3485 .devlink_param_get = sja1105_devlink_param_get,
3486 .devlink_param_set = sja1105_devlink_param_set,
3487 .devlink_info_get = sja1105_devlink_info_get,
3490 static const struct of_device_id sja1105_dt_ids[];
3492 static int sja1105_check_device_id(struct sja1105_private *priv)
3494 const struct sja1105_regs *regs = priv->info->regs;
3495 u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
3496 struct device *dev = &priv->spidev->dev;
3497 const struct of_device_id *match;
3502 rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id,
3507 rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id,
3508 SJA1105_SIZE_DEVICE_ID);
3512 sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
3514 for (match = sja1105_dt_ids; match->compatible[0]; match++) {
3515 const struct sja1105_info *info = match->data;
3517 /* Is what's been probed in our match table at all? */
3518 if (info->device_id != device_id || info->part_no != part_no)
3521 /* But is it what's in the device tree? */
3522 if (priv->info->device_id != device_id ||
3523 priv->info->part_no != part_no) {
3524 dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n",
3525 priv->info->name, info->name);
3526 /* It isn't. No problem, pick that up. */
3533 dev_err(dev, "Unexpected {device ID, part number}: 0x%x 0x%llx\n",
3534 device_id, part_no);
3539 static int sja1105_probe(struct spi_device *spi)
3541 struct sja1105_tagger_data *tagger_data;
3542 struct device *dev = &spi->dev;
3543 struct sja1105_private *priv;
3544 struct dsa_switch *ds;
3547 if (!dev->of_node) {
3548 dev_err(dev, "No DTS bindings for SJA1105 driver\n");
3552 priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
3556 /* Configure the optional reset pin and bring up switch */
3557 priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
3558 if (IS_ERR(priv->reset_gpio))
3559 dev_dbg(dev, "reset-gpios not defined, ignoring\n");
3561 sja1105_hw_reset(priv->reset_gpio, 1, 1);
3563 /* Populate our driver private structure (priv) based on
3564 * the device tree node that was probed (spi)
3567 spi_set_drvdata(spi, priv);
3569 /* Configure the SPI bus */
3570 spi->bits_per_word = 8;
3571 rc = spi_setup(spi);
3573 dev_err(dev, "Could not init SPI\n");
3577 priv->info = of_device_get_match_data(dev);
3579 /* Detect hardware device */
3580 rc = sja1105_check_device_id(priv);
3582 dev_err(dev, "Device ID check failed: %d\n", rc);
3586 dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
3588 ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
3593 ds->num_ports = SJA1105_NUM_PORTS;
3594 ds->ops = &sja1105_switch_ops;
3598 tagger_data = &priv->tagger_data;
3600 mutex_init(&priv->ptp_data.lock);
3601 mutex_init(&priv->mgmt_lock);
3603 priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx),
3605 if (!priv->dsa_8021q_ctx)
3608 priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops;
3609 priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q);
3610 priv->dsa_8021q_ctx->ds = ds;
3612 INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links);
3613 INIT_LIST_HEAD(&priv->bridge_vlans);
3614 INIT_LIST_HEAD(&priv->dsa_8021q_vlans);
3616 sja1105_tas_setup(ds);
3617 sja1105_flower_setup(ds);
3619 rc = dsa_register_switch(priv->ds);
3623 if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
3624 priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
3625 sizeof(struct sja1105_cbs_entry),
3631 /* Connections between dsa_port and sja1105_port */
3632 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
3633 struct sja1105_port *sp = &priv->ports[port];
3634 struct dsa_port *dp = dsa_to_port(ds, port);
3635 struct net_device *slave;
3638 if (!dsa_is_user_port(ds, port))
3643 sp->data = tagger_data;
3645 kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
3646 sp->xmit_worker = kthread_create_worker(0, "%s_xmit",
3648 if (IS_ERR(sp->xmit_worker)) {
3649 rc = PTR_ERR(sp->xmit_worker);
3651 "failed to create deferred xmit thread: %d\n",
3655 skb_queue_head_init(&sp->xmit_queue);
3656 sp->xmit_tpid = ETH_P_SJA1105;
3658 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
3659 sp->subvlan_map[subvlan] = VLAN_N_VID;
3664 while (port-- > 0) {
3665 struct sja1105_port *sp = &priv->ports[port];
3667 if (!dsa_is_user_port(ds, port))
3670 kthread_destroy_worker(sp->xmit_worker);
3675 static int sja1105_remove(struct spi_device *spi)
3677 struct sja1105_private *priv = spi_get_drvdata(spi);
3679 dsa_unregister_switch(priv->ds);
3683 static const struct of_device_id sja1105_dt_ids[] = {
3684 { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
3685 { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
3686 { .compatible = "nxp,sja1105p", .data = &sja1105p_info },
3687 { .compatible = "nxp,sja1105q", .data = &sja1105q_info },
3688 { .compatible = "nxp,sja1105r", .data = &sja1105r_info },
3689 { .compatible = "nxp,sja1105s", .data = &sja1105s_info },
3692 MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
3694 static struct spi_driver sja1105_driver = {
3697 .owner = THIS_MODULE,
3698 .of_match_table = of_match_ptr(sja1105_dt_ids),
3700 .probe = sja1105_probe,
3701 .remove = sja1105_remove,
3704 module_spi_driver(sja1105_driver);
3706 MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
3707 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
3708 MODULE_DESCRIPTION("SJA1105 Driver");
3709 MODULE_LICENSE("GPL v2");