1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
7 #include <net/switchdev.h>
8 #include <linux/if_bridge.h>
9 #include <linux/iopoll.h>
11 #include "sparx5_main_regs.h"
12 #include "sparx5_main.h"
14 /* Commands for Mac Table Command register */
15 #define MAC_CMD_LEARN 0 /* Insert (Learn) 1 entry */
16 #define MAC_CMD_UNLEARN 1 /* Unlearn (Forget) 1 entry */
17 #define MAC_CMD_LOOKUP 2 /* Look up 1 entry */
18 #define MAC_CMD_READ 3 /* Read entry at Mac Table Index */
19 #define MAC_CMD_WRITE 4 /* Write entry at Mac Table Index */
20 #define MAC_CMD_SCAN 5 /* Scan (Age or find next) */
21 #define MAC_CMD_FIND_SMALLEST 6 /* Get next entry */
22 #define MAC_CMD_CLEAR_ALL 7 /* Delete all entries in table */
24 /* Commands for MAC_ENTRY_ADDR_TYPE */
25 #define MAC_ENTRY_ADDR_TYPE_UPSID_PN 0
26 #define MAC_ENTRY_ADDR_TYPE_UPSID_CPU_OR_INT 1
27 #define MAC_ENTRY_ADDR_TYPE_GLAG 2
28 #define MAC_ENTRY_ADDR_TYPE_MC_IDX 3
30 #define TABLE_UPDATE_SLEEP_US 10
31 #define TABLE_UPDATE_TIMEOUT_US 100000
33 struct sparx5_mact_entry {
34 struct list_head list;
35 unsigned char mac[ETH_ALEN];
37 #define MAC_ENT_ALIVE BIT(0)
38 #define MAC_ENT_MOVED BIT(1)
39 #define MAC_ENT_LOCK BIT(2)
44 static int sparx5_mact_get_status(struct sparx5 *sparx5)
46 return spx5_rd(sparx5, LRN_COMMON_ACCESS_CTRL);
49 static int sparx5_mact_wait_for_completion(struct sparx5 *sparx5)
53 return readx_poll_timeout(sparx5_mact_get_status,
55 LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(val) == 0,
56 TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
59 static void sparx5_mact_select(struct sparx5 *sparx5,
60 const unsigned char mac[ETH_ALEN],
63 u32 macl = 0, mach = 0;
65 /* Set the MAC address to handle and the vlan associated in a format
66 * understood by the hardware.
76 spx5_wr(mach, sparx5, LRN_MAC_ACCESS_CFG_0);
77 spx5_wr(macl, sparx5, LRN_MAC_ACCESS_CFG_1);
80 int sparx5_mact_learn(struct sparx5 *sparx5, int pgid,
81 const unsigned char mac[ETH_ALEN], u16 vid)
85 if (pgid < SPX5_PORTS) {
86 type = MAC_ENTRY_ADDR_TYPE_UPSID_PN;
88 addr += (pgid / 32) << 5; /* Add upsid */
90 type = MAC_ENTRY_ADDR_TYPE_MC_IDX;
91 addr = pgid - SPX5_PORTS;
94 mutex_lock(&sparx5->lock);
96 sparx5_mact_select(sparx5, mac, vid);
98 /* MAC entry properties */
99 spx5_wr(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(addr) |
100 LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(type) |
101 LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(1) |
102 LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(1),
103 sparx5, LRN_MAC_ACCESS_CFG_2);
104 spx5_wr(0, sparx5, LRN_MAC_ACCESS_CFG_3);
106 /* Insert/learn new entry */
107 spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LEARN) |
108 LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
109 sparx5, LRN_COMMON_ACCESS_CTRL);
111 ret = sparx5_mact_wait_for_completion(sparx5);
113 mutex_unlock(&sparx5->lock);
118 int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr)
120 struct sparx5_port *port = netdev_priv(dev);
121 struct sparx5 *sparx5 = port->sparx5;
123 return sparx5_mact_forget(sparx5, addr, port->pvid);
126 int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr)
128 struct sparx5_port *port = netdev_priv(dev);
129 struct sparx5 *sparx5 = port->sparx5;
131 return sparx5_mact_learn(sparx5, PGID_CPU, addr, port->pvid);
134 static int sparx5_mact_get(struct sparx5 *sparx5,
135 unsigned char mac[ETH_ALEN],
136 u16 *vid, u32 *pcfg2)
138 u32 mach, macl, cfg2;
141 cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
142 if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2)) {
143 mach = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_0);
144 macl = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_1);
145 mac[0] = ((mach >> 8) & 0xff);
146 mac[1] = ((mach >> 0) & 0xff);
147 mac[2] = ((macl >> 24) & 0xff);
148 mac[3] = ((macl >> 16) & 0xff);
149 mac[4] = ((macl >> 8) & 0xff);
150 mac[5] = ((macl >> 0) & 0xff);
159 bool sparx5_mact_getnext(struct sparx5 *sparx5,
160 unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2)
165 mutex_lock(&sparx5->lock);
167 sparx5_mact_select(sparx5, mac, *vid);
169 spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(1) |
170 LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
171 sparx5, LRN_SCAN_NEXT_CFG);
172 spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
173 (MAC_CMD_FIND_SMALLEST) |
174 LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
175 sparx5, LRN_COMMON_ACCESS_CTRL);
177 ret = sparx5_mact_wait_for_completion(sparx5);
179 ret = sparx5_mact_get(sparx5, mac, vid, &cfg2);
184 mutex_unlock(&sparx5->lock);
189 static int sparx5_mact_lookup(struct sparx5 *sparx5,
190 const unsigned char mac[ETH_ALEN],
195 mutex_lock(&sparx5->lock);
197 sparx5_mact_select(sparx5, mac, vid);
199 /* Issue a lookup command */
200 spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LOOKUP) |
201 LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
202 sparx5, LRN_COMMON_ACCESS_CTRL);
204 ret = sparx5_mact_wait_for_completion(sparx5);
208 ret = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET
209 (spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2));
212 mutex_unlock(&sparx5->lock);
217 int sparx5_mact_forget(struct sparx5 *sparx5,
218 const unsigned char mac[ETH_ALEN], u16 vid)
222 mutex_lock(&sparx5->lock);
224 sparx5_mact_select(sparx5, mac, vid);
226 /* Issue an unlearn command */
227 spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_UNLEARN) |
228 LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
229 sparx5, LRN_COMMON_ACCESS_CTRL);
231 ret = sparx5_mact_wait_for_completion(sparx5);
233 mutex_unlock(&sparx5->lock);
238 static struct sparx5_mact_entry *alloc_mact_entry(struct sparx5 *sparx5,
239 const unsigned char *mac,
240 u16 vid, u16 port_index)
242 struct sparx5_mact_entry *mact_entry;
244 mact_entry = devm_kzalloc(sparx5->dev,
245 sizeof(*mact_entry), GFP_ATOMIC);
249 memcpy(mact_entry->mac, mac, ETH_ALEN);
250 mact_entry->vid = vid;
251 mact_entry->port = port_index;
255 static struct sparx5_mact_entry *find_mact_entry(struct sparx5 *sparx5,
256 const unsigned char *mac,
257 u16 vid, u16 port_index)
259 struct sparx5_mact_entry *mact_entry;
260 struct sparx5_mact_entry *res = NULL;
262 mutex_lock(&sparx5->mact_lock);
263 list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
264 if (mact_entry->vid == vid &&
265 ether_addr_equal(mac, mact_entry->mac) &&
266 mact_entry->port == port_index) {
271 mutex_unlock(&sparx5->mact_lock);
276 static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
277 const char *mac, u16 vid,
278 struct net_device *dev, bool offloaded)
280 struct switchdev_notifier_fdb_info info;
284 info.offloaded = offloaded;
285 call_switchdev_notifiers(type, dev, &info.info, NULL);
288 int sparx5_add_mact_entry(struct sparx5 *sparx5,
289 struct sparx5_port *port,
290 const unsigned char *addr, u16 vid)
292 struct sparx5_mact_entry *mact_entry;
295 ret = sparx5_mact_lookup(sparx5, addr, vid);
299 /* In case the entry already exists, don't add it again to SW,
300 * just update HW, but we need to look in the actual HW because
301 * it is possible for an entry to be learn by HW and before the
302 * mact thread to start the frame will reach CPU and the CPU will
303 * add the entry but without the extern_learn flag.
305 mact_entry = find_mact_entry(sparx5, addr, vid, port->portno);
309 /* Add the entry in SW MAC table not to get the notification when
310 * SW is pulling again
312 mact_entry = alloc_mact_entry(sparx5, addr, vid, port->portno);
316 mutex_lock(&sparx5->mact_lock);
317 list_add_tail(&mact_entry->list, &sparx5->mact_entries);
318 mutex_unlock(&sparx5->mact_lock);
321 ret = sparx5_mact_learn(sparx5, port->portno, addr, vid);
324 if (mact_entry->flags == 0) {
325 mact_entry->flags |= MAC_ENT_LOCK; /* Don't age this */
326 sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, addr, vid,
333 int sparx5_del_mact_entry(struct sparx5 *sparx5,
334 const unsigned char *addr,
337 struct sparx5_mact_entry *mact_entry, *tmp;
339 /* Delete the entry in SW MAC table not to get the notification when
340 * SW is pulling again
342 mutex_lock(&sparx5->mact_lock);
343 list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
345 if ((vid == 0 || mact_entry->vid == vid) &&
346 ether_addr_equal(addr, mact_entry->mac)) {
347 list_del(&mact_entry->list);
348 devm_kfree(sparx5->dev, mact_entry);
350 sparx5_mact_forget(sparx5, addr, mact_entry->vid);
353 mutex_unlock(&sparx5->mact_lock);
358 static void sparx5_mact_handle_entry(struct sparx5 *sparx5,
359 unsigned char mac[ETH_ALEN],
362 struct sparx5_mact_entry *mact_entry;
366 if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(cfg2) !=
367 MAC_ENTRY_ADDR_TYPE_UPSID_PN)
370 port = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(cfg2);
371 if (port >= SPX5_PORTS)
374 if (!test_bit(port, sparx5->bridge_mask))
377 mutex_lock(&sparx5->mact_lock);
378 list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
379 if (mact_entry->vid == vid &&
380 ether_addr_equal(mac, mact_entry->mac)) {
382 mact_entry->flags |= MAC_ENT_ALIVE;
383 if (mact_entry->port != port) {
384 dev_warn(sparx5->dev, "Entry move: %d -> %d\n",
385 mact_entry->port, port);
386 mact_entry->port = port;
387 mact_entry->flags |= MAC_ENT_MOVED;
393 mutex_unlock(&sparx5->mact_lock);
395 if (found && !(mact_entry->flags & MAC_ENT_MOVED))
396 /* Present, not moved */
400 /* Entry not found - now add */
401 mact_entry = alloc_mact_entry(sparx5, mac, vid, port);
405 mact_entry->flags |= MAC_ENT_ALIVE;
406 mutex_lock(&sparx5->mact_lock);
407 list_add_tail(&mact_entry->list, &sparx5->mact_entries);
408 mutex_unlock(&sparx5->mact_lock);
411 /* New or moved entry - notify bridge */
412 sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
413 mac, vid, sparx5->ports[port]->ndev,
417 void sparx5_mact_pull_work(struct work_struct *work)
419 struct delayed_work *del_work = to_delayed_work(work);
420 struct sparx5 *sparx5 = container_of(del_work, struct sparx5,
422 struct sparx5_mact_entry *mact_entry, *tmp;
423 unsigned char mac[ETH_ALEN];
428 /* Reset MAC entry flags */
429 mutex_lock(&sparx5->mact_lock);
430 list_for_each_entry(mact_entry, &sparx5->mact_entries, list)
431 mact_entry->flags &= MAC_ENT_LOCK;
432 mutex_unlock(&sparx5->mact_lock);
434 /* MAIN mac address processing loop */
436 memset(mac, 0, sizeof(mac));
438 mutex_lock(&sparx5->lock);
439 sparx5_mact_select(sparx5, mac, vid);
440 spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
441 sparx5, LRN_SCAN_NEXT_CFG);
442 spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
443 (MAC_CMD_FIND_SMALLEST) |
444 LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
445 sparx5, LRN_COMMON_ACCESS_CTRL);
446 ret = sparx5_mact_wait_for_completion(sparx5);
448 ret = sparx5_mact_get(sparx5, mac, &vid, &cfg2);
449 mutex_unlock(&sparx5->lock);
451 sparx5_mact_handle_entry(sparx5, mac, vid, cfg2);
454 mutex_lock(&sparx5->mact_lock);
455 list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
457 /* If the entry is in HW or permanent, then skip */
458 if (mact_entry->flags & (MAC_ENT_ALIVE | MAC_ENT_LOCK))
461 sparx5_fdb_call_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
462 mact_entry->mac, mact_entry->vid,
463 sparx5->ports[mact_entry->port]->ndev,
466 list_del(&mact_entry->list);
467 devm_kfree(sparx5->dev, mact_entry);
469 mutex_unlock(&sparx5->mact_lock);
471 queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
472 SPX5_MACT_PULL_DELAY);
475 void sparx5_set_ageing(struct sparx5 *sparx5, int msecs)
477 int value = max(1, msecs / 10); /* unit 10 ms */
479 spx5_rmw(LRN_AUTOAGE_CFG_UNIT_SIZE_SET(2) | /* 10 ms */
480 LRN_AUTOAGE_CFG_PERIOD_VAL_SET(value / 2), /* one bit ageing */
481 LRN_AUTOAGE_CFG_UNIT_SIZE |
482 LRN_AUTOAGE_CFG_PERIOD_VAL,
487 void sparx5_mact_init(struct sparx5 *sparx5)
489 mutex_init(&sparx5->lock);
491 /* Flush MAC table */
492 spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_CLEAR_ALL) |
493 LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
494 sparx5, LRN_COMMON_ACCESS_CTRL);
496 if (sparx5_mact_wait_for_completion(sparx5) != 0)
497 dev_warn(sparx5->dev, "MAC flush error\n");
499 sparx5_set_ageing(sparx5, BR_DEFAULT_AGEING_TIME / HZ * 1000);