1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
4 * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
6 * Copyright (c) 2016 John Crispin <john@phrozen.org>
9 #include <linux/module.h>
10 #include <linux/phy.h>
11 #include <linux/netdevice.h>
12 #include <linux/bitfield.h>
13 #include <linux/regmap.h>
15 #include <linux/of_net.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_platform.h>
18 #include <linux/if_bridge.h>
19 #include <linux/mdio.h>
20 #include <linux/phylink.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/etherdevice.h>
26 #define MIB_DESC(_s, _o, _n) \
33 static const struct qca8k_mib_desc ar8327_mib[] = {
34 MIB_DESC(1, 0x00, "RxBroad"),
35 MIB_DESC(1, 0x04, "RxPause"),
36 MIB_DESC(1, 0x08, "RxMulti"),
37 MIB_DESC(1, 0x0c, "RxFcsErr"),
38 MIB_DESC(1, 0x10, "RxAlignErr"),
39 MIB_DESC(1, 0x14, "RxRunt"),
40 MIB_DESC(1, 0x18, "RxFragment"),
41 MIB_DESC(1, 0x1c, "Rx64Byte"),
42 MIB_DESC(1, 0x20, "Rx128Byte"),
43 MIB_DESC(1, 0x24, "Rx256Byte"),
44 MIB_DESC(1, 0x28, "Rx512Byte"),
45 MIB_DESC(1, 0x2c, "Rx1024Byte"),
46 MIB_DESC(1, 0x30, "Rx1518Byte"),
47 MIB_DESC(1, 0x34, "RxMaxByte"),
48 MIB_DESC(1, 0x38, "RxTooLong"),
49 MIB_DESC(2, 0x3c, "RxGoodByte"),
50 MIB_DESC(2, 0x44, "RxBadByte"),
51 MIB_DESC(1, 0x4c, "RxOverFlow"),
52 MIB_DESC(1, 0x50, "Filtered"),
53 MIB_DESC(1, 0x54, "TxBroad"),
54 MIB_DESC(1, 0x58, "TxPause"),
55 MIB_DESC(1, 0x5c, "TxMulti"),
56 MIB_DESC(1, 0x60, "TxUnderRun"),
57 MIB_DESC(1, 0x64, "Tx64Byte"),
58 MIB_DESC(1, 0x68, "Tx128Byte"),
59 MIB_DESC(1, 0x6c, "Tx256Byte"),
60 MIB_DESC(1, 0x70, "Tx512Byte"),
61 MIB_DESC(1, 0x74, "Tx1024Byte"),
62 MIB_DESC(1, 0x78, "Tx1518Byte"),
63 MIB_DESC(1, 0x7c, "TxMaxByte"),
64 MIB_DESC(1, 0x80, "TxOverSize"),
65 MIB_DESC(2, 0x84, "TxByte"),
66 MIB_DESC(1, 0x8c, "TxCollision"),
67 MIB_DESC(1, 0x90, "TxAbortCol"),
68 MIB_DESC(1, 0x94, "TxMultiCol"),
69 MIB_DESC(1, 0x98, "TxSingleCol"),
70 MIB_DESC(1, 0x9c, "TxExcDefer"),
71 MIB_DESC(1, 0xa0, "TxDefer"),
72 MIB_DESC(1, 0xa4, "TxLateCol"),
73 MIB_DESC(1, 0xa8, "RXUnicast"),
74 MIB_DESC(1, 0xac, "TXUnicast"),
77 /* The 32bit switch registers are accessed indirectly. To achieve this we need
78 * to set the page of the register. Track the last page that was set to reduce
81 static u16 qca8k_current_page = 0xffff;
84 qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
93 *page = regaddr & 0x3ff;
97 qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
101 ret = bus->read(bus, phy_id, regnum);
104 ret = bus->read(bus, phy_id, regnum + 1);
109 dev_err_ratelimited(&bus->dev,
110 "failed to read qca8k 32bit register\n");
119 qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
125 hi = (u16)(val >> 16);
127 ret = bus->write(bus, phy_id, regnum, lo);
129 ret = bus->write(bus, phy_id, regnum + 1, hi);
131 dev_err_ratelimited(&bus->dev,
132 "failed to write qca8k 32bit register\n");
136 qca8k_set_page(struct mii_bus *bus, u16 page)
140 if (page == qca8k_current_page)
143 ret = bus->write(bus, 0x18, 0, page);
145 dev_err_ratelimited(&bus->dev,
146 "failed to set qca8k page\n");
150 qca8k_current_page = page;
151 usleep_range(1000, 2000);
156 qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
158 return regmap_read(priv->regmap, reg, val);
162 qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
164 return regmap_write(priv->regmap, reg, val);
168 qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
170 return regmap_update_bits(priv->regmap, reg, mask, write_val);
174 qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
176 struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
177 struct mii_bus *bus = priv->bus;
181 qca8k_split_addr(reg, &r1, &r2, &page);
183 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
185 ret = qca8k_set_page(bus, page);
189 ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
192 mutex_unlock(&bus->mdio_lock);
197 qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
199 struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
200 struct mii_bus *bus = priv->bus;
204 qca8k_split_addr(reg, &r1, &r2, &page);
206 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
208 ret = qca8k_set_page(bus, page);
212 qca8k_mii_write32(bus, 0x10 | r2, r1, val);
215 mutex_unlock(&bus->mdio_lock);
220 qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
222 struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
223 struct mii_bus *bus = priv->bus;
228 qca8k_split_addr(reg, &r1, &r2, &page);
230 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
232 ret = qca8k_set_page(bus, page);
236 ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
242 qca8k_mii_write32(bus, 0x10 | r2, r1, val);
245 mutex_unlock(&bus->mdio_lock);
250 static const struct regmap_range qca8k_readable_ranges[] = {
251 regmap_reg_range(0x0000, 0x00e4), /* Global control */
252 regmap_reg_range(0x0100, 0x0168), /* EEE control */
253 regmap_reg_range(0x0200, 0x0270), /* Parser control */
254 regmap_reg_range(0x0400, 0x0454), /* ACL */
255 regmap_reg_range(0x0600, 0x0718), /* Lookup */
256 regmap_reg_range(0x0800, 0x0b70), /* QM */
257 regmap_reg_range(0x0c00, 0x0c80), /* PKT */
258 regmap_reg_range(0x0e00, 0x0e98), /* L3 */
259 regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
260 regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
261 regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
262 regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
263 regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
264 regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
265 regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
269 static const struct regmap_access_table qca8k_readable_table = {
270 .yes_ranges = qca8k_readable_ranges,
271 .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
274 static struct regmap_config qca8k_regmap_config = {
278 .max_register = 0x16ac, /* end MIB - Port6 range */
279 .reg_read = qca8k_regmap_read,
280 .reg_write = qca8k_regmap_write,
281 .reg_update_bits = qca8k_regmap_update_bits,
282 .rd_table = &qca8k_readable_table,
283 .disable_locking = true, /* Locking is handled by qca8k read/write */
284 .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
288 qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
292 return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
293 QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
297 qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
302 /* load the ARL table into an array */
303 for (i = 0; i < 4; i++) {
304 ret = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4), &val);
312 fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
314 fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
315 /* portmask - 54:48 */
316 fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
318 fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
319 fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
320 fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
321 fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
322 fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
323 fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
329 qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
336 reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
338 reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
339 /* portmask - 54:48 */
340 reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
342 reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
343 reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
344 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
345 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
346 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
347 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
349 /* load the array into the ARL table */
350 for (i = 0; i < 3; i++)
351 qca8k_write(priv, QCA8K_REG_ATU_DATA0 + (i * 4), reg[i]);
355 qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
360 /* Set the command and FDB index */
361 reg = QCA8K_ATU_FUNC_BUSY;
364 reg |= QCA8K_ATU_FUNC_PORT_EN;
365 reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
368 /* Write the function register triggering the table access */
369 ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
373 /* wait for completion */
374 ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
378 /* Check for table full violation when adding an entry */
379 if (cmd == QCA8K_FDB_LOAD) {
380 ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®);
383 if (reg & QCA8K_ATU_FUNC_FULL)
391 qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
395 qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
396 ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
400 return qca8k_fdb_read(priv, fdb);
404 qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
409 mutex_lock(&priv->reg_mutex);
410 qca8k_fdb_write(priv, vid, port_mask, mac, aging);
411 ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
412 mutex_unlock(&priv->reg_mutex);
418 qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
422 mutex_lock(&priv->reg_mutex);
423 qca8k_fdb_write(priv, vid, port_mask, mac, 0);
424 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
425 mutex_unlock(&priv->reg_mutex);
431 qca8k_fdb_flush(struct qca8k_priv *priv)
433 mutex_lock(&priv->reg_mutex);
434 qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
435 mutex_unlock(&priv->reg_mutex);
439 qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
440 const u8 *mac, u16 vid)
442 struct qca8k_fdb fdb = { 0 };
445 mutex_lock(&priv->reg_mutex);
447 qca8k_fdb_write(priv, vid, 0, mac, 0);
448 ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
452 ret = qca8k_fdb_read(priv, &fdb);
456 /* Rule exist. Delete first */
458 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
463 /* Add port to fdb portmask */
464 fdb.port_mask |= port_mask;
466 qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
467 ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
470 mutex_unlock(&priv->reg_mutex);
475 qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
476 const u8 *mac, u16 vid)
478 struct qca8k_fdb fdb = { 0 };
481 mutex_lock(&priv->reg_mutex);
483 qca8k_fdb_write(priv, vid, 0, mac, 0);
484 ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
488 /* Rule doesn't exist. Why delete? */
494 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
498 /* Only port in the rule is this port. Don't re insert */
499 if (fdb.port_mask == port_mask)
502 /* Remove port from port mask */
503 fdb.port_mask &= ~port_mask;
505 qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
506 ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
509 mutex_unlock(&priv->reg_mutex);
514 qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
519 /* Set the command and VLAN index */
520 reg = QCA8K_VTU_FUNC1_BUSY;
522 reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
524 /* Write the function register triggering the table access */
525 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
529 /* wait for completion */
530 ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
534 /* Check for table full violation when adding an entry */
535 if (cmd == QCA8K_VLAN_LOAD) {
536 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®);
539 if (reg & QCA8K_VTU_FUNC1_FULL)
547 qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
553 We do the right thing with VLAN 0 and treat it as untagged while
554 preserving the tag on egress.
559 mutex_lock(&priv->reg_mutex);
560 ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
564 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
567 reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
568 reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
570 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
572 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
574 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
577 ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
580 mutex_unlock(&priv->reg_mutex);
586 qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
592 mutex_lock(&priv->reg_mutex);
593 ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
597 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
600 reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
601 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
603 /* Check if we're the last member to be removed */
605 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
606 mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
608 if ((reg & mask) != mask) {
615 ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
617 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
620 ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
624 mutex_unlock(&priv->reg_mutex);
630 qca8k_mib_init(struct qca8k_priv *priv)
634 mutex_lock(&priv->reg_mutex);
635 ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
639 ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
643 ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
647 ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
650 mutex_unlock(&priv->reg_mutex);
655 qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
657 u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
659 /* Port 0 and 6 have no internal PHY */
660 if (port > 0 && port < 6)
661 mask |= QCA8K_PORT_STATUS_LINK_AUTO;
664 regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
666 regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
670 qca8k_port_to_phy(int port)
673 * Port 0 has no internal phy.
674 * Port 1 has an internal PHY at MDIO address 0.
675 * Port 2 has an internal PHY at MDIO address 1.
677 * Port 5 has an internal PHY at MDIO address 4.
678 * Port 6 has no internal PHY.
685 qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
691 qca8k_split_addr(reg, &r1, &r2, &page);
693 ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
694 QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
695 bus, 0x10 | r2, r1, &val);
697 /* Check if qca8k_read has failed for a different reason
698 * before returnting -ETIMEDOUT
700 if (ret < 0 && ret1 < 0)
707 qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
713 if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
716 val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
717 QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
718 QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
719 QCA8K_MDIO_MASTER_DATA(data);
721 qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
723 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
725 ret = qca8k_set_page(bus, page);
729 qca8k_mii_write32(bus, 0x10 | r2, r1, val);
731 ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
732 QCA8K_MDIO_MASTER_BUSY);
735 /* even if the busy_wait timeouts try to clear the MASTER_EN */
736 qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
738 mutex_unlock(&bus->mdio_lock);
744 qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
750 if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
753 val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
754 QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
755 QCA8K_MDIO_MASTER_REG_ADDR(regnum);
757 qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
759 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
761 ret = qca8k_set_page(bus, page);
765 qca8k_mii_write32(bus, 0x10 | r2, r1, val);
767 ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
768 QCA8K_MDIO_MASTER_BUSY);
772 ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
775 /* even if the busy_wait timeouts try to clear the MASTER_EN */
776 qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
778 mutex_unlock(&bus->mdio_lock);
781 ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
787 qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
789 struct qca8k_priv *priv = slave_bus->priv;
790 struct mii_bus *bus = priv->bus;
792 return qca8k_mdio_write(bus, phy, regnum, data);
796 qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
798 struct qca8k_priv *priv = slave_bus->priv;
799 struct mii_bus *bus = priv->bus;
801 return qca8k_mdio_read(bus, phy, regnum);
805 qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
807 struct qca8k_priv *priv = ds->priv;
809 /* Check if the legacy mapping should be used and the
810 * port is not correctly mapped to the right PHY in the
813 if (priv->legacy_phy_port_mapping)
814 port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
816 return qca8k_mdio_write(priv->bus, port, regnum, data);
820 qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
822 struct qca8k_priv *priv = ds->priv;
825 /* Check if the legacy mapping should be used and the
826 * port is not correctly mapped to the right PHY in the
829 if (priv->legacy_phy_port_mapping)
830 port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
832 ret = qca8k_mdio_read(priv->bus, port, regnum);
841 qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
843 struct dsa_switch *ds = priv->ds;
846 bus = devm_mdiobus_alloc(ds->dev);
851 bus->priv = (void *)priv;
852 bus->name = "qca8k slave mii";
853 bus->read = qca8k_internal_mdio_read;
854 bus->write = qca8k_internal_mdio_write;
855 snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
858 bus->parent = ds->dev;
859 bus->phy_mask = ~ds->phys_mii_mask;
861 ds->slave_mii_bus = bus;
863 return devm_of_mdiobus_register(priv->dev, bus, mdio);
867 qca8k_setup_mdio_bus(struct qca8k_priv *priv)
869 u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
870 struct device_node *ports, *port, *mdio;
871 phy_interface_t mode;
874 ports = of_get_child_by_name(priv->dev->of_node, "ports");
876 ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
881 for_each_available_child_of_node(ports, port) {
882 err = of_property_read_u32(port, "reg", ®);
889 if (!dsa_is_user_port(priv->ds, reg))
892 of_get_phy_mode(port, &mode);
894 if (of_property_read_bool(port, "phy-handle") &&
895 mode != PHY_INTERFACE_MODE_INTERNAL)
896 external_mdio_mask |= BIT(reg);
898 internal_mdio_mask |= BIT(reg);
902 if (!external_mdio_mask && !internal_mdio_mask) {
903 dev_err(priv->dev, "no PHYs are defined.\n");
907 /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
908 * the MDIO_MASTER register also _disconnects_ the external MDC
909 * passthrough to the internal PHYs. It's not possible to use both
910 * configurations at the same time!
912 * Because this came up during the review process:
913 * If the external mdio-bus driver is capable magically disabling
914 * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
915 * accessors for the time being, it would be possible to pull this
918 if (!!external_mdio_mask && !!internal_mdio_mask) {
919 dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
923 if (external_mdio_mask) {
924 /* Make sure to disable the internal mdio bus in cases
925 * a dt-overlay and driver reload changed the configuration
928 return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
929 QCA8K_MDIO_MASTER_EN);
932 /* Check if the devicetree declare the port:phy mapping */
933 mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
934 if (of_device_is_available(mdio)) {
935 err = qca8k_mdio_register(priv, mdio);
942 /* If a mapping can't be found the legacy mapping is used,
943 * using the qca8k_port_to_phy function
945 priv->legacy_phy_port_mapping = true;
946 priv->ops.phy_read = qca8k_phy_read;
947 priv->ops.phy_write = qca8k_phy_write;
953 qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
958 /* SoC specific settings for ipq8064.
959 * If more device require this consider adding
960 * a dedicated binding.
962 if (of_machine_is_compatible("qcom,ipq8064"))
963 mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
965 /* SoC specific settings for ipq8065 */
966 if (of_machine_is_compatible("qcom,ipq8065"))
967 mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
970 ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
971 QCA8K_MAC_PWR_RGMII0_1_8V |
972 QCA8K_MAC_PWR_RGMII1_1_8V,
979 static int qca8k_find_cpu_port(struct dsa_switch *ds)
981 struct qca8k_priv *priv = ds->priv;
983 /* Find the connected cpu port. Valid port are 0 or 6 */
984 if (dsa_is_cpu_port(ds, 0))
987 dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
989 if (dsa_is_cpu_port(ds, 6))
996 qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
998 struct device_node *node = priv->dev->of_node;
999 const struct qca8k_match_data *data;
1003 /* QCA8327 require to set to the correct mode.
1004 * His bigger brother QCA8328 have the 172 pin layout.
1005 * Should be applied by default but we set this just to make sure.
1007 if (priv->switch_id == QCA8K_ID_QCA8327) {
1008 data = of_device_get_match_data(priv->dev);
1010 /* Set the correct package of 148 pin for QCA8327 */
1011 if (data->reduced_package)
1012 val |= QCA8327_PWS_PACKAGE148_EN;
1014 ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
1020 if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
1021 val |= QCA8K_PWS_POWER_ON_SEL;
1023 if (of_property_read_bool(node, "qca,led-open-drain")) {
1024 if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
1025 dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
1029 val |= QCA8K_PWS_LED_OPEN_EN_CSR;
1032 return qca8k_rmw(priv, QCA8K_REG_PWS,
1033 QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
1038 qca8k_parse_port_config(struct qca8k_priv *priv)
1040 int port, cpu_port_index = -1, ret;
1041 struct device_node *port_dn;
1042 phy_interface_t mode;
1043 struct dsa_port *dp;
1046 /* We have 2 CPU port. Check them */
1047 for (port = 0; port < QCA8K_NUM_PORTS; port++) {
1048 /* Skip every other port */
1049 if (port != 0 && port != 6)
1052 dp = dsa_to_port(priv->ds, port);
1056 if (!of_device_is_available(port_dn))
1059 ret = of_get_phy_mode(port_dn, &mode);
1064 case PHY_INTERFACE_MODE_RGMII:
1065 case PHY_INTERFACE_MODE_RGMII_ID:
1066 case PHY_INTERFACE_MODE_RGMII_TXID:
1067 case PHY_INTERFACE_MODE_RGMII_RXID:
1068 case PHY_INTERFACE_MODE_SGMII:
1071 if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
1072 /* Switch regs accept value in ns, convert ps to ns */
1073 delay = delay / 1000;
1074 else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1075 mode == PHY_INTERFACE_MODE_RGMII_TXID)
1078 if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
1079 dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
1083 priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
1087 if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
1088 /* Switch regs accept value in ns, convert ps to ns */
1089 delay = delay / 1000;
1090 else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1091 mode == PHY_INTERFACE_MODE_RGMII_RXID)
1094 if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
1095 dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
1099 priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
1101 /* Skip sgmii parsing for rgmii* mode */
1102 if (mode == PHY_INTERFACE_MODE_RGMII ||
1103 mode == PHY_INTERFACE_MODE_RGMII_ID ||
1104 mode == PHY_INTERFACE_MODE_RGMII_TXID ||
1105 mode == PHY_INTERFACE_MODE_RGMII_RXID)
1108 if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
1109 priv->ports_config.sgmii_tx_clk_falling_edge = true;
1111 if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
1112 priv->ports_config.sgmii_rx_clk_falling_edge = true;
1114 if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
1115 priv->ports_config.sgmii_enable_pll = true;
1117 if (priv->switch_id == QCA8K_ID_QCA8327) {
1118 dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
1119 priv->ports_config.sgmii_enable_pll = false;
1122 if (priv->switch_revision < 2)
1123 dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
1136 qca8k_setup(struct dsa_switch *ds)
1138 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1139 int cpu_port, ret, i;
1142 cpu_port = qca8k_find_cpu_port(ds);
1144 dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
1148 /* Parse CPU port config to be later used in phy_link mac_config */
1149 ret = qca8k_parse_port_config(priv);
1153 ret = qca8k_setup_mdio_bus(priv);
1157 ret = qca8k_setup_of_pws_reg(priv);
1161 ret = qca8k_setup_mac_pwr_sel(priv);
1165 /* Make sure MAC06 is disabled */
1166 ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
1167 QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
1169 dev_err(priv->dev, "failed disabling MAC06 exchange");
1173 /* Enable CPU Port */
1174 ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
1175 QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
1177 dev_err(priv->dev, "failed enabling CPU port");
1181 /* Enable MIB counters */
1182 ret = qca8k_mib_init(priv);
1184 dev_warn(priv->dev, "mib init failed");
1186 /* Initial setup of all ports */
1187 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1188 /* Disable forwarding by default on all ports */
1189 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1190 QCA8K_PORT_LOOKUP_MEMBER, 0);
1194 /* Enable QCA header mode on all cpu ports */
1195 if (dsa_is_cpu_port(ds, i)) {
1196 ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
1197 FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
1198 FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
1200 dev_err(priv->dev, "failed enabling QCA header mode");
1205 /* Disable MAC by default on all user ports */
1206 if (dsa_is_user_port(ds, i))
1207 qca8k_port_set_status(priv, i, 0);
1210 /* Forward all unknown frames to CPU port for Linux processing
1211 * Notice that in multi-cpu config only one port should be set
1212 * for igmp, unknown, multicast and broadcast packet
1214 ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
1215 FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
1216 FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
1217 FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
1218 FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
1222 /* Setup connection between CPU port & user ports
1223 * Configure specific switch configuration for ports
1225 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1226 /* CPU port gets connected to all user ports of the switch */
1227 if (dsa_is_cpu_port(ds, i)) {
1228 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1229 QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
1234 /* Individual user ports get connected to CPU port only */
1235 if (dsa_is_user_port(ds, i)) {
1236 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1237 QCA8K_PORT_LOOKUP_MEMBER,
1242 /* Enable ARP Auto-learning by default */
1243 ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
1244 QCA8K_PORT_LOOKUP_LEARN);
1248 /* For port based vlans to work we need to set the
1249 * default egress vid
1251 ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
1252 QCA8K_EGREES_VLAN_PORT_MASK(i),
1253 QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
1257 ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
1258 QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
1259 QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
1264 /* The port 5 of the qca8337 have some problem in flood condition. The
1265 * original legacy driver had some specific buffer and priority settings
1266 * for the different port suggested by the QCA switch team. Add this
1267 * missing settings to improve switch stability under load condition.
1268 * This problem is limited to qca8337 and other qca8k switch are not affected.
1270 if (priv->switch_id == QCA8K_ID_QCA8337) {
1272 /* The 2 CPU port and port 5 requires some different
1273 * priority than any other ports.
1278 mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1279 QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1280 QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
1281 QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
1282 QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
1283 QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
1284 QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
1287 mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1288 QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1289 QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
1290 QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
1291 QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
1293 qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
1295 mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
1296 QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1297 QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1298 QCA8K_PORT_HOL_CTRL1_WRED_EN;
1299 qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
1300 QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
1301 QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1302 QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1303 QCA8K_PORT_HOL_CTRL1_WRED_EN,
1307 /* Set initial MTU for every port.
1308 * We have only have a general MTU setting. So track
1309 * every port and set the max across all port.
1310 * Set per port MTU to 1500 as the MTU change function
1311 * will add the overhead and if its set to 1518 then it
1312 * will apply the overhead again and we will end up with
1313 * MTU of 1536 instead of 1518
1315 priv->port_mtu[i] = ETH_DATA_LEN;
1318 /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
1319 if (priv->switch_id == QCA8K_ID_QCA8327) {
1320 mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
1321 QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
1322 qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
1323 QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
1324 QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
1328 /* Setup our port MTUs to match power on defaults */
1329 ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
1331 dev_warn(priv->dev, "failed setting MTU settings");
1333 /* Flush the FDB table */
1334 qca8k_fdb_flush(priv);
1336 /* We don't have interrupts for link changes, so we need to poll */
1337 ds->pcs_poll = true;
1339 /* Set min a max ageing value supported */
1340 ds->ageing_time_min = 7000;
1341 ds->ageing_time_max = 458745000;
1343 /* Set max number of LAGs supported */
1344 ds->num_lag_ids = QCA8K_NUM_LAGS;
1350 qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
1356 /* Delay can be declared in 3 different way.
1357 * Mode to rgmii and internal-delay standard binding defined
1358 * rgmii-id or rgmii-tx/rx phy mode set.
1359 * The parse logic set a delay different than 0 only when one
1360 * of the 3 different way is used. In all other case delay is
1361 * not enabled. With ID or TX/RXID delay is enabled and set
1362 * to the default and recommended value.
1364 if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
1365 delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
1367 val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
1368 QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
1371 if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
1372 delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
1374 val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
1375 QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
1378 /* Set RGMII delay based on the selected values */
1379 ret = qca8k_rmw(priv, reg,
1380 QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
1381 QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
1382 QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
1383 QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
1386 dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
1387 cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
1391 qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
1392 const struct phylink_link_state *state)
1394 struct qca8k_priv *priv = ds->priv;
1395 int cpu_port_index, ret;
1399 case 0: /* 1st CPU port */
1400 if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1401 state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1402 state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1403 state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1404 state->interface != PHY_INTERFACE_MODE_SGMII)
1407 reg = QCA8K_REG_PORT0_PAD_CTRL;
1408 cpu_port_index = QCA8K_CPU_PORT0;
1415 /* Internal PHY, nothing to do */
1417 case 6: /* 2nd CPU port / external PHY */
1418 if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1419 state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1420 state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1421 state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1422 state->interface != PHY_INTERFACE_MODE_SGMII &&
1423 state->interface != PHY_INTERFACE_MODE_1000BASEX)
1426 reg = QCA8K_REG_PORT6_PAD_CTRL;
1427 cpu_port_index = QCA8K_CPU_PORT6;
1430 dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
1434 if (port != 6 && phylink_autoneg_inband(mode)) {
1435 dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
1440 switch (state->interface) {
1441 case PHY_INTERFACE_MODE_RGMII:
1442 case PHY_INTERFACE_MODE_RGMII_ID:
1443 case PHY_INTERFACE_MODE_RGMII_TXID:
1444 case PHY_INTERFACE_MODE_RGMII_RXID:
1445 qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
1447 /* Configure rgmii delay */
1448 qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1450 /* QCA8337 requires to set rgmii rx delay for all ports.
1451 * This is enabled through PORT5_PAD_CTRL for all ports,
1452 * rather than individual port registers.
1454 if (priv->switch_id == QCA8K_ID_QCA8337)
1455 qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
1456 QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
1458 case PHY_INTERFACE_MODE_SGMII:
1459 case PHY_INTERFACE_MODE_1000BASEX:
1460 /* Enable SGMII on the port */
1461 qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
1463 /* Enable/disable SerDes auto-negotiation as necessary */
1464 ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
1467 if (phylink_autoneg_inband(mode))
1468 val &= ~QCA8K_PWS_SERDES_AEN_DIS;
1470 val |= QCA8K_PWS_SERDES_AEN_DIS;
1471 qca8k_write(priv, QCA8K_REG_PWS, val);
1473 /* Configure the SGMII parameters */
1474 ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
1478 val |= QCA8K_SGMII_EN_SD;
1480 if (priv->ports_config.sgmii_enable_pll)
1481 val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
1484 if (dsa_is_cpu_port(ds, port)) {
1485 /* CPU port, we're talking to the CPU MAC, be a PHY */
1486 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1487 val |= QCA8K_SGMII_MODE_CTRL_PHY;
1488 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
1489 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1490 val |= QCA8K_SGMII_MODE_CTRL_MAC;
1491 } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
1492 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1493 val |= QCA8K_SGMII_MODE_CTRL_BASEX;
1496 qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
1498 /* From original code is reported port instability as SGMII also
1499 * require delay set. Apply advised values here or take them from DT.
1501 if (state->interface == PHY_INTERFACE_MODE_SGMII)
1502 qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1504 /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
1505 * falling edge is set writing in the PORT0 PAD reg
1507 if (priv->switch_id == QCA8K_ID_QCA8327 ||
1508 priv->switch_id == QCA8K_ID_QCA8337)
1509 reg = QCA8K_REG_PORT0_PAD_CTRL;
1513 /* SGMII Clock phase configuration */
1514 if (priv->ports_config.sgmii_rx_clk_falling_edge)
1515 val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
1517 if (priv->ports_config.sgmii_tx_clk_falling_edge)
1518 val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
1521 ret = qca8k_rmw(priv, reg,
1522 QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
1523 QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
1528 dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
1529 phy_modes(state->interface), port);
1535 qca8k_phylink_validate(struct dsa_switch *ds, int port,
1536 unsigned long *supported,
1537 struct phylink_link_state *state)
1539 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1542 case 0: /* 1st CPU port */
1543 if (state->interface != PHY_INTERFACE_MODE_NA &&
1544 state->interface != PHY_INTERFACE_MODE_RGMII &&
1545 state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1546 state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1547 state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1548 state->interface != PHY_INTERFACE_MODE_SGMII)
1557 if (state->interface != PHY_INTERFACE_MODE_NA &&
1558 state->interface != PHY_INTERFACE_MODE_GMII &&
1559 state->interface != PHY_INTERFACE_MODE_INTERNAL)
1562 case 6: /* 2nd CPU port / external PHY */
1563 if (state->interface != PHY_INTERFACE_MODE_NA &&
1564 state->interface != PHY_INTERFACE_MODE_RGMII &&
1565 state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1566 state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1567 state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1568 state->interface != PHY_INTERFACE_MODE_SGMII &&
1569 state->interface != PHY_INTERFACE_MODE_1000BASEX)
1574 linkmode_zero(supported);
1578 phylink_set_port_modes(mask);
1579 phylink_set(mask, Autoneg);
1581 phylink_set(mask, 1000baseT_Full);
1582 phylink_set(mask, 10baseT_Half);
1583 phylink_set(mask, 10baseT_Full);
1584 phylink_set(mask, 100baseT_Half);
1585 phylink_set(mask, 100baseT_Full);
1587 if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
1588 phylink_set(mask, 1000baseX_Full);
1590 phylink_set(mask, Pause);
1591 phylink_set(mask, Asym_Pause);
1593 linkmode_and(supported, supported, mask);
1594 linkmode_and(state->advertising, state->advertising, mask);
1598 qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
1599 struct phylink_link_state *state)
1601 struct qca8k_priv *priv = ds->priv;
1605 ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®);
1609 state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
1610 state->an_complete = state->link;
1611 state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
1612 state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
1615 switch (reg & QCA8K_PORT_STATUS_SPEED) {
1616 case QCA8K_PORT_STATUS_SPEED_10:
1617 state->speed = SPEED_10;
1619 case QCA8K_PORT_STATUS_SPEED_100:
1620 state->speed = SPEED_100;
1622 case QCA8K_PORT_STATUS_SPEED_1000:
1623 state->speed = SPEED_1000;
1626 state->speed = SPEED_UNKNOWN;
1630 state->pause = MLO_PAUSE_NONE;
1631 if (reg & QCA8K_PORT_STATUS_RXFLOW)
1632 state->pause |= MLO_PAUSE_RX;
1633 if (reg & QCA8K_PORT_STATUS_TXFLOW)
1634 state->pause |= MLO_PAUSE_TX;
1640 qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
1641 phy_interface_t interface)
1643 struct qca8k_priv *priv = ds->priv;
1645 qca8k_port_set_status(priv, port, 0);
1649 qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
1650 phy_interface_t interface, struct phy_device *phydev,
1651 int speed, int duplex, bool tx_pause, bool rx_pause)
1653 struct qca8k_priv *priv = ds->priv;
1656 if (phylink_autoneg_inband(mode)) {
1657 reg = QCA8K_PORT_STATUS_LINK_AUTO;
1661 reg = QCA8K_PORT_STATUS_SPEED_10;
1664 reg = QCA8K_PORT_STATUS_SPEED_100;
1667 reg = QCA8K_PORT_STATUS_SPEED_1000;
1670 reg = QCA8K_PORT_STATUS_LINK_AUTO;
1674 if (duplex == DUPLEX_FULL)
1675 reg |= QCA8K_PORT_STATUS_DUPLEX;
1677 if (rx_pause || dsa_is_cpu_port(ds, port))
1678 reg |= QCA8K_PORT_STATUS_RXFLOW;
1680 if (tx_pause || dsa_is_cpu_port(ds, port))
1681 reg |= QCA8K_PORT_STATUS_TXFLOW;
1684 reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
1686 qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
1690 qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
1692 const struct qca8k_match_data *match_data;
1693 struct qca8k_priv *priv = ds->priv;
1696 if (stringset != ETH_SS_STATS)
1699 match_data = of_device_get_match_data(priv->dev);
1701 for (i = 0; i < match_data->mib_count; i++)
1702 strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
1707 qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
1710 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1711 const struct qca8k_match_data *match_data;
1712 const struct qca8k_mib_desc *mib;
1717 match_data = of_device_get_match_data(priv->dev);
1719 for (i = 0; i < match_data->mib_count; i++) {
1720 mib = &ar8327_mib[i];
1721 reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
1723 ret = qca8k_read(priv, reg, &val);
1727 if (mib->size == 2) {
1728 ret = qca8k_read(priv, reg + 4, &hi);
1735 data[i] |= (u64)hi << 32;
1740 qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
1742 const struct qca8k_match_data *match_data;
1743 struct qca8k_priv *priv = ds->priv;
1745 if (sset != ETH_SS_STATS)
1748 match_data = of_device_get_match_data(priv->dev);
1750 return match_data->mib_count;
1754 qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
1756 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1757 u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
1761 mutex_lock(&priv->reg_mutex);
1762 ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®);
1766 if (eee->eee_enabled)
1770 ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
1773 mutex_unlock(&priv->reg_mutex);
1778 qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
1780 /* Nothing to do on the port's MAC */
1785 qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
1787 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1791 case BR_STATE_DISABLED:
1792 stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
1794 case BR_STATE_BLOCKING:
1795 stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
1797 case BR_STATE_LISTENING:
1798 stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
1800 case BR_STATE_LEARNING:
1801 stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
1803 case BR_STATE_FORWARDING:
1805 stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
1809 qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
1810 QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
1813 static int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
1814 struct dsa_bridge bridge,
1815 bool *tx_fwd_offload)
1817 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1818 int port_mask, cpu_port;
1821 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
1822 port_mask = BIT(cpu_port);
1824 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1825 if (dsa_is_cpu_port(ds, i))
1827 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
1829 /* Add this port to the portvlan mask of the other ports
1832 ret = regmap_set_bits(priv->regmap,
1833 QCA8K_PORT_LOOKUP_CTRL(i),
1838 port_mask |= BIT(i);
1841 /* Add all other ports to this ports portvlan mask */
1842 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
1843 QCA8K_PORT_LOOKUP_MEMBER, port_mask);
1848 static void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
1849 struct dsa_bridge bridge)
1851 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1854 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
1856 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1857 if (dsa_is_cpu_port(ds, i))
1859 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
1861 /* Remove this port to the portvlan mask of the other ports
1864 regmap_clear_bits(priv->regmap,
1865 QCA8K_PORT_LOOKUP_CTRL(i),
1869 /* Set the cpu port to be the only one in the portvlan mask of
1872 qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
1873 QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
1877 qca8k_port_fast_age(struct dsa_switch *ds, int port)
1879 struct qca8k_priv *priv = ds->priv;
1881 mutex_lock(&priv->reg_mutex);
1882 qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
1883 mutex_unlock(&priv->reg_mutex);
1887 qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
1889 struct qca8k_priv *priv = ds->priv;
1890 unsigned int secs = msecs / 1000;
1893 /* AGE_TIME reg is set in 7s step */
1896 /* Handle case with 0 as val to NOT disable
1902 return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
1903 QCA8K_ATU_AGE_TIME(val));
1907 qca8k_port_enable(struct dsa_switch *ds, int port,
1908 struct phy_device *phy)
1910 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1912 qca8k_port_set_status(priv, port, 1);
1913 priv->port_sts[port].enabled = 1;
1915 if (dsa_is_user_port(ds, port))
1916 phy_support_asym_pause(phy);
1922 qca8k_port_disable(struct dsa_switch *ds, int port)
1924 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1926 qca8k_port_set_status(priv, port, 0);
1927 priv->port_sts[port].enabled = 0;
1931 qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
1933 struct qca8k_priv *priv = ds->priv;
1936 priv->port_mtu[port] = new_mtu;
1938 for (i = 0; i < QCA8K_NUM_PORTS; i++)
1939 if (priv->port_mtu[i] > mtu)
1940 mtu = priv->port_mtu[i];
1942 /* Include L2 header / FCS length */
1943 return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
1947 qca8k_port_max_mtu(struct dsa_switch *ds, int port)
1949 return QCA8K_MAX_MTU;
1953 qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
1954 u16 port_mask, u16 vid)
1956 /* Set the vid to the port vlan id if no vid is set */
1958 vid = QCA8K_PORT_VID_DEF;
1960 return qca8k_fdb_add(priv, addr, port_mask, vid,
1961 QCA8K_ATU_STATUS_STATIC);
1965 qca8k_port_fdb_add(struct dsa_switch *ds, int port,
1966 const unsigned char *addr, u16 vid)
1968 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1969 u16 port_mask = BIT(port);
1971 return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
1975 qca8k_port_fdb_del(struct dsa_switch *ds, int port,
1976 const unsigned char *addr, u16 vid)
1978 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1979 u16 port_mask = BIT(port);
1982 vid = QCA8K_PORT_VID_DEF;
1984 return qca8k_fdb_del(priv, addr, port_mask, vid);
1988 qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
1989 dsa_fdb_dump_cb_t *cb, void *data)
1991 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1992 struct qca8k_fdb _fdb = { 0 };
1993 int cnt = QCA8K_NUM_FDB_RECORDS;
1997 mutex_lock(&priv->reg_mutex);
1998 while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
2001 is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
2002 ret = cb(_fdb.mac, _fdb.vid, is_static, data);
2006 mutex_unlock(&priv->reg_mutex);
2012 qca8k_port_mdb_add(struct dsa_switch *ds, int port,
2013 const struct switchdev_obj_port_mdb *mdb)
2015 struct qca8k_priv *priv = ds->priv;
2016 const u8 *addr = mdb->addr;
2019 return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
2023 qca8k_port_mdb_del(struct dsa_switch *ds, int port,
2024 const struct switchdev_obj_port_mdb *mdb)
2026 struct qca8k_priv *priv = ds->priv;
2027 const u8 *addr = mdb->addr;
2030 return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
2034 qca8k_port_mirror_add(struct dsa_switch *ds, int port,
2035 struct dsa_mall_mirror_tc_entry *mirror,
2038 struct qca8k_priv *priv = ds->priv;
2039 int monitor_port, ret;
2042 /* Check for existent entry */
2043 if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
2046 ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
2050 /* QCA83xx can have only one port set to mirror mode.
2051 * Check that the correct port is requested and return error otherwise.
2052 * When no mirror port is set, the values is set to 0xF
2054 monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2055 if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
2058 /* Set the monitor port */
2059 val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
2060 mirror->to_local_port);
2061 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2062 QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2067 reg = QCA8K_PORT_LOOKUP_CTRL(port);
2068 val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
2070 reg = QCA8K_REG_PORT_HOL_CTRL1(port);
2071 val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
2074 ret = regmap_update_bits(priv->regmap, reg, val, val);
2078 /* Track mirror port for tx and rx to decide when the
2079 * mirror port has to be disabled.
2082 priv->mirror_rx |= BIT(port);
2084 priv->mirror_tx |= BIT(port);
2090 qca8k_port_mirror_del(struct dsa_switch *ds, int port,
2091 struct dsa_mall_mirror_tc_entry *mirror)
2093 struct qca8k_priv *priv = ds->priv;
2097 if (mirror->ingress) {
2098 reg = QCA8K_PORT_LOOKUP_CTRL(port);
2099 val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
2101 reg = QCA8K_REG_PORT_HOL_CTRL1(port);
2102 val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
2105 ret = regmap_clear_bits(priv->regmap, reg, val);
2109 if (mirror->ingress)
2110 priv->mirror_rx &= ~BIT(port);
2112 priv->mirror_tx &= ~BIT(port);
2114 /* No port set to send packet to mirror port. Disable mirror port */
2115 if (!priv->mirror_rx && !priv->mirror_tx) {
2116 val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
2117 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2118 QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2123 dev_err(priv->dev, "Failed to del mirror port from %d", port);
2127 qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
2128 struct netlink_ext_ack *extack)
2130 struct qca8k_priv *priv = ds->priv;
2133 if (vlan_filtering) {
2134 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2135 QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
2136 QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
2138 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2139 QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
2140 QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
2147 qca8k_port_vlan_add(struct dsa_switch *ds, int port,
2148 const struct switchdev_obj_port_vlan *vlan,
2149 struct netlink_ext_ack *extack)
2151 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
2152 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
2153 struct qca8k_priv *priv = ds->priv;
2156 ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
2158 dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
2163 ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
2164 QCA8K_EGREES_VLAN_PORT_MASK(port),
2165 QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
2169 ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
2170 QCA8K_PORT_VLAN_CVID(vlan->vid) |
2171 QCA8K_PORT_VLAN_SVID(vlan->vid));
2178 qca8k_port_vlan_del(struct dsa_switch *ds, int port,
2179 const struct switchdev_obj_port_vlan *vlan)
2181 struct qca8k_priv *priv = ds->priv;
2184 ret = qca8k_vlan_del(priv, port, vlan->vid);
2186 dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
2191 static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
2193 struct qca8k_priv *priv = ds->priv;
2195 /* Communicate to the phy internal driver the switch revision.
2196 * Based on the switch revision different values needs to be
2197 * set to the dbg and mmd reg on the phy.
2198 * The first 2 bit are used to communicate the switch revision
2199 * to the phy driver.
2201 if (port > 0 && port < 6)
2202 return priv->switch_revision;
2207 static enum dsa_tag_protocol
2208 qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
2209 enum dsa_tag_protocol mp)
2211 return DSA_TAG_PROTO_QCA;
2215 qca8k_lag_can_offload(struct dsa_switch *ds,
2216 struct net_device *lag,
2217 struct netdev_lag_upper_info *info)
2219 struct dsa_port *dp;
2220 int id, members = 0;
2222 id = dsa_lag_id(ds->dst, lag);
2223 if (id < 0 || id >= ds->num_lag_ids)
2226 dsa_lag_foreach_port(dp, ds->dst, lag)
2227 /* Includes the port joining the LAG */
2230 if (members > QCA8K_NUM_PORTS_FOR_LAG)
2233 if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2236 if (info->hash_type != NETDEV_LAG_HASH_L2 &&
2237 info->hash_type != NETDEV_LAG_HASH_L23)
2244 qca8k_lag_setup_hash(struct dsa_switch *ds,
2245 struct net_device *lag,
2246 struct netdev_lag_upper_info *info)
2248 struct qca8k_priv *priv = ds->priv;
2249 bool unique_lag = true;
2253 id = dsa_lag_id(ds->dst, lag);
2255 switch (info->hash_type) {
2256 case NETDEV_LAG_HASH_L23:
2257 hash |= QCA8K_TRUNK_HASH_SIP_EN;
2258 hash |= QCA8K_TRUNK_HASH_DIP_EN;
2260 case NETDEV_LAG_HASH_L2:
2261 hash |= QCA8K_TRUNK_HASH_SA_EN;
2262 hash |= QCA8K_TRUNK_HASH_DA_EN;
2264 default: /* We should NEVER reach this */
2268 /* Check if we are the unique configured LAG */
2269 dsa_lags_foreach_id(i, ds->dst)
2270 if (i != id && dsa_lag_dev(ds->dst, i)) {
2275 /* Hash Mode is global. Make sure the same Hash Mode
2276 * is set to all the 4 possible lag.
2277 * If we are the unique LAG we can set whatever hash
2279 * To change hash mode it's needed to remove all LAG
2280 * and change the mode with the latest.
2283 priv->lag_hash_mode = hash;
2284 } else if (priv->lag_hash_mode != hash) {
2285 netdev_err(lag, "Error: Mismatched Hash Mode across different lag is not supported\n");
2289 return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
2290 QCA8K_TRUNK_HASH_MASK, hash);
2294 qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
2295 struct net_device *lag, bool delete)
2297 struct qca8k_priv *priv = ds->priv;
2301 id = dsa_lag_id(ds->dst, lag);
2303 /* Read current port member */
2304 ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
2308 /* Shift val to the correct trunk */
2309 val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
2310 val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
2316 /* Update port member. With empty portmap disable trunk */
2317 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
2318 QCA8K_REG_GOL_TRUNK_MEMBER(id) |
2319 QCA8K_REG_GOL_TRUNK_EN(id),
2320 !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
2321 val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
2323 /* Search empty member if adding or port on deleting */
2324 for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
2325 ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
2329 val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
2330 val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
2333 /* If port flagged to be disabled assume this member is
2336 if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
2339 val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
2343 /* If port flagged to be enabled assume this member is
2346 if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
2350 /* We have found the member to add/remove */
2354 /* Set port in the correct port mask or disable port if in delete mode */
2355 return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
2356 QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
2357 QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
2358 !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
2359 port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
2363 qca8k_port_lag_join(struct dsa_switch *ds, int port,
2364 struct net_device *lag,
2365 struct netdev_lag_upper_info *info)
2369 if (!qca8k_lag_can_offload(ds, lag, info))
2372 ret = qca8k_lag_setup_hash(ds, lag, info);
2376 return qca8k_lag_refresh_portmap(ds, port, lag, false);
2380 qca8k_port_lag_leave(struct dsa_switch *ds, int port,
2381 struct net_device *lag)
2383 return qca8k_lag_refresh_portmap(ds, port, lag, true);
2386 static const struct dsa_switch_ops qca8k_switch_ops = {
2387 .get_tag_protocol = qca8k_get_tag_protocol,
2388 .setup = qca8k_setup,
2389 .get_strings = qca8k_get_strings,
2390 .get_ethtool_stats = qca8k_get_ethtool_stats,
2391 .get_sset_count = qca8k_get_sset_count,
2392 .set_ageing_time = qca8k_set_ageing_time,
2393 .get_mac_eee = qca8k_get_mac_eee,
2394 .set_mac_eee = qca8k_set_mac_eee,
2395 .port_enable = qca8k_port_enable,
2396 .port_disable = qca8k_port_disable,
2397 .port_change_mtu = qca8k_port_change_mtu,
2398 .port_max_mtu = qca8k_port_max_mtu,
2399 .port_stp_state_set = qca8k_port_stp_state_set,
2400 .port_bridge_join = qca8k_port_bridge_join,
2401 .port_bridge_leave = qca8k_port_bridge_leave,
2402 .port_fast_age = qca8k_port_fast_age,
2403 .port_fdb_add = qca8k_port_fdb_add,
2404 .port_fdb_del = qca8k_port_fdb_del,
2405 .port_fdb_dump = qca8k_port_fdb_dump,
2406 .port_mdb_add = qca8k_port_mdb_add,
2407 .port_mdb_del = qca8k_port_mdb_del,
2408 .port_mirror_add = qca8k_port_mirror_add,
2409 .port_mirror_del = qca8k_port_mirror_del,
2410 .port_vlan_filtering = qca8k_port_vlan_filtering,
2411 .port_vlan_add = qca8k_port_vlan_add,
2412 .port_vlan_del = qca8k_port_vlan_del,
2413 .phylink_validate = qca8k_phylink_validate,
2414 .phylink_mac_link_state = qca8k_phylink_mac_link_state,
2415 .phylink_mac_config = qca8k_phylink_mac_config,
2416 .phylink_mac_link_down = qca8k_phylink_mac_link_down,
2417 .phylink_mac_link_up = qca8k_phylink_mac_link_up,
2418 .get_phy_flags = qca8k_get_phy_flags,
2419 .port_lag_join = qca8k_port_lag_join,
2420 .port_lag_leave = qca8k_port_lag_leave,
2423 static int qca8k_read_switch_id(struct qca8k_priv *priv)
2425 const struct qca8k_match_data *data;
2430 /* get the switches ID from the compatible */
2431 data = of_device_get_match_data(priv->dev);
2435 ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
2439 id = QCA8K_MASK_CTRL_DEVICE_ID(val);
2440 if (id != data->id) {
2441 dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
2445 priv->switch_id = id;
2447 /* Save revision to communicate to the internal PHY driver */
2448 priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
2454 qca8k_sw_probe(struct mdio_device *mdiodev)
2456 struct qca8k_priv *priv;
2459 /* allocate the private data struct so that we can probe the switches
2462 priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
2466 priv->bus = mdiodev->bus;
2467 priv->dev = &mdiodev->dev;
2469 priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
2471 if (IS_ERR(priv->reset_gpio))
2472 return PTR_ERR(priv->reset_gpio);
2474 if (priv->reset_gpio) {
2475 gpiod_set_value_cansleep(priv->reset_gpio, 1);
2476 /* The active low duration must be greater than 10 ms
2477 * and checkpatch.pl wants 20 ms.
2480 gpiod_set_value_cansleep(priv->reset_gpio, 0);
2483 /* Start by setting up the register mapping */
2484 priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
2485 &qca8k_regmap_config);
2486 if (IS_ERR(priv->regmap)) {
2487 dev_err(priv->dev, "regmap initialization failed");
2488 return PTR_ERR(priv->regmap);
2491 /* Check the detected switch id */
2492 ret = qca8k_read_switch_id(priv);
2496 priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
2500 priv->ds->dev = &mdiodev->dev;
2501 priv->ds->num_ports = QCA8K_NUM_PORTS;
2502 priv->ds->priv = priv;
2503 priv->ops = qca8k_switch_ops;
2504 priv->ds->ops = &priv->ops;
2505 mutex_init(&priv->reg_mutex);
2506 dev_set_drvdata(&mdiodev->dev, priv);
2508 return dsa_register_switch(priv->ds);
2512 qca8k_sw_remove(struct mdio_device *mdiodev)
2514 struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
2520 for (i = 0; i < QCA8K_NUM_PORTS; i++)
2521 qca8k_port_set_status(priv, i, 0);
2523 dsa_unregister_switch(priv->ds);
2525 dev_set_drvdata(&mdiodev->dev, NULL);
2528 static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
2530 struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
2535 dsa_switch_shutdown(priv->ds);
2537 dev_set_drvdata(&mdiodev->dev, NULL);
2540 #ifdef CONFIG_PM_SLEEP
2542 qca8k_set_pm(struct qca8k_priv *priv, int enable)
2546 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2547 if (!priv->port_sts[i].enabled)
2550 qca8k_port_set_status(priv, i, enable);
2554 static int qca8k_suspend(struct device *dev)
2556 struct qca8k_priv *priv = dev_get_drvdata(dev);
2558 qca8k_set_pm(priv, 0);
2560 return dsa_switch_suspend(priv->ds);
2563 static int qca8k_resume(struct device *dev)
2565 struct qca8k_priv *priv = dev_get_drvdata(dev);
2567 qca8k_set_pm(priv, 1);
2569 return dsa_switch_resume(priv->ds);
2571 #endif /* CONFIG_PM_SLEEP */
2573 static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
2574 qca8k_suspend, qca8k_resume);
2576 static const struct qca8k_match_data qca8327 = {
2577 .id = QCA8K_ID_QCA8327,
2578 .reduced_package = true,
2579 .mib_count = QCA8K_QCA832X_MIB_COUNT,
2582 static const struct qca8k_match_data qca8328 = {
2583 .id = QCA8K_ID_QCA8327,
2584 .mib_count = QCA8K_QCA832X_MIB_COUNT,
2587 static const struct qca8k_match_data qca833x = {
2588 .id = QCA8K_ID_QCA8337,
2589 .mib_count = QCA8K_QCA833X_MIB_COUNT,
2592 static const struct of_device_id qca8k_of_match[] = {
2593 { .compatible = "qca,qca8327", .data = &qca8327 },
2594 { .compatible = "qca,qca8328", .data = &qca8328 },
2595 { .compatible = "qca,qca8334", .data = &qca833x },
2596 { .compatible = "qca,qca8337", .data = &qca833x },
2600 static struct mdio_driver qca8kmdio_driver = {
2601 .probe = qca8k_sw_probe,
2602 .remove = qca8k_sw_remove,
2603 .shutdown = qca8k_sw_shutdown,
2606 .of_match_table = qca8k_of_match,
2607 .pm = &qca8k_pm_ops,
2611 mdio_module_driver(qca8kmdio_driver);
2613 MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
2614 MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
2615 MODULE_LICENSE("GPL v2");
2616 MODULE_ALIAS("platform:qca8k");