1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
4 #include <linux/kernel.h>
6 #include <linux/iopoll.h>
7 #include <linux/etherdevice.h>
8 #include <linux/platform_device.h>
9 #include "mtk_eth_soc.h"
11 #include "mtk_ppe_regs.h"
13 static DEFINE_SPINLOCK(ppe_lock);
15 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
17 writel(val, ppe->base + reg);
20 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
22 return readl(ppe->base + reg);
25 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
29 val = ppe_r32(ppe, reg);
32 ppe_w32(ppe, reg, val);
37 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
39 return ppe_m32(ppe, reg, 0, val);
42 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
44 return ppe_m32(ppe, reg, val, 0);
47 static u32 mtk_eth_timestamp(struct mtk_eth *eth)
49 return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
52 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
57 ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
58 !(val & MTK_PPE_GLO_CFG_BUSY),
59 20, MTK_PPE_WAIT_TIMEOUT_US);
62 dev_err(ppe->dev, "PPE table busy");
67 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
69 ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
70 ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
73 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
75 mtk_ppe_cache_clear(ppe);
77 ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
78 enable * MTK_PPE_CACHE_CTL_EN);
81 static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
86 switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
87 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
88 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
89 hv1 = e->ipv4.orig.ports;
90 hv2 = e->ipv4.orig.dest_ip;
91 hv3 = e->ipv4.orig.src_ip;
93 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
94 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
95 hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
98 hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
99 hv2 ^= e->ipv6.dest_ip[0];
101 hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
102 hv3 ^= e->ipv6.src_ip[0];
104 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
105 case MTK_PPE_PKT_TYPE_IPV6_6RD:
108 return MTK_PPE_HASH_MASK;
111 hash = (hv1 & hv2) | ((~hv1) & hv3);
112 hash = (hash >> 24) | ((hash & 0xffffff) << 8);
113 hash ^= hv1 ^ hv2 ^ hv3;
116 hash &= MTK_PPE_ENTRIES - 1;
121 static inline struct mtk_foe_mac_info *
122 mtk_foe_entry_l2(struct mtk_foe_entry *entry)
124 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
126 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
127 return &entry->ipv6.l2;
129 return &entry->ipv4.l2;
133 mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
135 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
137 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
138 return &entry->ipv6.ib2;
140 return &entry->ipv4.ib2;
143 int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
144 u8 pse_port, u8 *src_mac, u8 *dest_mac)
146 struct mtk_foe_mac_info *l2;
149 memset(entry, 0, sizeof(*entry));
151 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
152 FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
153 FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
154 MTK_FOE_IB1_BIND_TTL |
155 MTK_FOE_IB1_BIND_CACHE;
158 val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
159 FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
160 FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
162 if (is_multicast_ether_addr(dest_mac))
163 val |= MTK_FOE_IB2_MULTICAST;
165 ports_pad = 0xa5a5a500 | (l4proto & 0xff);
166 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
167 entry->ipv4.orig.ports = ports_pad;
168 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
169 entry->ipv6.ports = ports_pad;
171 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
172 entry->ipv6.ib2 = val;
173 l2 = &entry->ipv6.l2;
175 entry->ipv4.ib2 = val;
176 l2 = &entry->ipv4.l2;
179 l2->dest_mac_hi = get_unaligned_be32(dest_mac);
180 l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
181 l2->src_mac_hi = get_unaligned_be32(src_mac);
182 l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
184 if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
185 l2->etype = ETH_P_IPV6;
187 l2->etype = ETH_P_IP;
192 int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
194 u32 *ib2 = mtk_foe_entry_ib2(entry);
198 val &= ~MTK_FOE_IB2_DEST_PORT;
199 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
205 int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
206 __be32 src_addr, __be16 src_port,
207 __be32 dest_addr, __be16 dest_port)
209 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
210 struct mtk_ipv4_tuple *t;
213 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
215 t = &entry->ipv4.new;
219 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
220 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
221 t = &entry->ipv4.orig;
223 case MTK_PPE_PKT_TYPE_IPV6_6RD:
224 entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
225 entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
232 t->src_ip = be32_to_cpu(src_addr);
233 t->dest_ip = be32_to_cpu(dest_addr);
235 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
238 t->src_port = be16_to_cpu(src_port);
239 t->dest_port = be16_to_cpu(dest_port);
244 int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
245 __be32 *src_addr, __be16 src_port,
246 __be32 *dest_addr, __be16 dest_port)
248 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
253 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
254 src = entry->dslite.tunnel_src_ip;
255 dest = entry->dslite.tunnel_dest_ip;
257 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
258 case MTK_PPE_PKT_TYPE_IPV6_6RD:
259 entry->ipv6.src_port = be16_to_cpu(src_port);
260 entry->ipv6.dest_port = be16_to_cpu(dest_port);
262 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
263 src = entry->ipv6.src_ip;
264 dest = entry->ipv6.dest_ip;
271 for (i = 0; i < 4; i++)
272 src[i] = be32_to_cpu(src_addr[i]);
273 for (i = 0; i < 4; i++)
274 dest[i] = be32_to_cpu(dest_addr[i]);
279 int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
281 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
283 l2->etype = BIT(port);
285 if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
286 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
290 entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
295 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
297 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
299 switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
301 entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
302 FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
306 if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
311 entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
319 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
321 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
323 if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
324 (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
325 l2->etype = ETH_P_PPP_SES;
327 entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
333 int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
336 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
337 u32 *ib2 = mtk_foe_entry_ib2(entry);
339 *ib2 &= ~MTK_FOE_IB2_PORT_MG;
340 *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
342 *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
344 l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
345 FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
346 FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
351 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
353 return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
354 FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
358 mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
362 if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
365 type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
366 if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
367 len = offsetof(struct mtk_foe_entry, ipv6._rsv);
369 len = offsetof(struct mtk_foe_entry, ipv4.ib2);
371 return !memcmp(&entry->data.data, &data->data, len - 4);
375 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
377 struct mtk_foe_entry *hwe;
378 struct mtk_foe_entry foe;
380 spin_lock_bh(&ppe_lock);
381 if (entry->hash == 0xffff)
384 hwe = &ppe->foe_table[entry->hash];
385 memcpy(&foe, hwe, sizeof(foe));
386 if (!mtk_flow_entry_match(entry, &foe)) {
387 entry->hash = 0xffff;
391 entry->data.ib1 = foe.ib1;
394 spin_unlock_bh(&ppe_lock);
398 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
401 struct mtk_foe_entry *hwe;
404 timestamp = mtk_eth_timestamp(ppe->eth);
405 timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
406 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
407 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
409 hwe = &ppe->foe_table[hash];
410 memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
412 hwe->ib1 = entry->ib1;
416 mtk_ppe_cache_clear(ppe);
419 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
421 spin_lock_bh(&ppe_lock);
422 hlist_del_init(&entry->list);
423 if (entry->hash != 0xffff) {
424 ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
425 ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
429 entry->hash = 0xffff;
430 spin_unlock_bh(&ppe_lock);
433 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
435 u32 hash = mtk_ppe_hash_entry(&entry->data);
437 entry->hash = 0xffff;
438 spin_lock_bh(&ppe_lock);
439 hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]);
440 spin_unlock_bh(&ppe_lock);
445 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
447 struct hlist_head *head = &ppe->foe_flow[hash / 2];
448 struct mtk_flow_entry *entry;
449 struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
452 if (hlist_empty(head))
455 spin_lock_bh(&ppe_lock);
456 hlist_for_each_entry(entry, head, list) {
457 if (found || !mtk_flow_entry_match(entry, hwe)) {
458 if (entry->hash != 0xffff)
459 entry->hash = 0xffff;
464 __mtk_foe_entry_commit(ppe, &entry->data, hash);
467 spin_unlock_bh(&ppe_lock);
470 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
472 u16 now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
475 mtk_flow_entry_update(ppe, entry);
476 timestamp = entry->data.ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
479 return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
481 return now - timestamp;
484 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
487 struct device *dev = eth->dev;
488 struct mtk_foe_entry *foe;
491 ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
495 /* need to allocate a separate device, since it PPE DMA access is
501 ppe->version = version;
503 foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
504 &ppe->foe_phys, GFP_KERNEL);
508 ppe->foe_table = foe;
510 mtk_ppe_debugfs_init(ppe);
515 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
517 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
520 memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
522 if (!IS_ENABLED(CONFIG_SOC_MT7621))
525 /* skip all entries that cross the 1024 byte boundary */
526 for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
527 for (k = 0; k < ARRAY_SIZE(skip); k++)
528 ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
531 int mtk_ppe_start(struct mtk_ppe *ppe)
535 mtk_ppe_init_foe_table(ppe);
536 ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
538 val = MTK_PPE_TB_CFG_ENTRY_80B |
539 MTK_PPE_TB_CFG_AGE_NON_L4 |
540 MTK_PPE_TB_CFG_AGE_UNBIND |
541 MTK_PPE_TB_CFG_AGE_TCP |
542 MTK_PPE_TB_CFG_AGE_UDP |
543 MTK_PPE_TB_CFG_AGE_TCP_FIN |
544 FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
545 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
546 FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
547 MTK_PPE_KEEPALIVE_DISABLE) |
548 FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
549 FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
550 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
551 FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
552 MTK_PPE_ENTRIES_SHIFT);
553 ppe_w32(ppe, MTK_PPE_TB_CFG, val);
555 ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
556 MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
558 mtk_ppe_cache_enable(ppe, true);
560 val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
561 MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
562 MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
563 MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
564 MTK_PPE_FLOW_CFG_IP6_6RD |
565 MTK_PPE_FLOW_CFG_IP4_NAT |
566 MTK_PPE_FLOW_CFG_IP4_NAPT |
567 MTK_PPE_FLOW_CFG_IP4_DSLITE |
568 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
569 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
571 val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
572 FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
573 ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
575 val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
576 FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
577 ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
579 val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
580 FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
581 ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
583 val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
584 ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
586 val = MTK_PPE_BIND_LIMIT1_FULL |
587 FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
588 ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
590 val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
591 FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
592 ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
595 val = MTK_PPE_GLO_CFG_EN |
596 MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
597 MTK_PPE_GLO_CFG_IP4_CS_DROP |
598 MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
599 ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
601 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
606 int mtk_ppe_stop(struct mtk_ppe *ppe)
611 for (i = 0; i < MTK_PPE_ENTRIES; i++)
612 ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
613 MTK_FOE_STATE_INVALID);
615 mtk_ppe_cache_enable(ppe, false);
617 /* disable offload engine */
618 ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
619 ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
622 val = MTK_PPE_TB_CFG_AGE_NON_L4 |
623 MTK_PPE_TB_CFG_AGE_UNBIND |
624 MTK_PPE_TB_CFG_AGE_TCP |
625 MTK_PPE_TB_CFG_AGE_UDP |
626 MTK_PPE_TB_CFG_AGE_TCP_FIN;
627 ppe_clear(ppe, MTK_PPE_TB_CFG, val);
629 return mtk_ppe_wait_busy(ppe);