net: ethernet: mtk_eth_soc: remove bridge flow offload type entry support
[linux-2.6-microblaze.git] / drivers / net / ethernet / mediatek / mtk_ppe.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
3
4 #include <linux/kernel.h>
5 #include <linux/io.h>
6 #include <linux/iopoll.h>
7 #include <linux/etherdevice.h>
8 #include <linux/platform_device.h>
9 #include "mtk_eth_soc.h"
10 #include "mtk_ppe.h"
11 #include "mtk_ppe_regs.h"
12
13 static DEFINE_SPINLOCK(ppe_lock);
14
15 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
16 {
17         writel(val, ppe->base + reg);
18 }
19
20 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
21 {
22         return readl(ppe->base + reg);
23 }
24
25 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
26 {
27         u32 val;
28
29         val = ppe_r32(ppe, reg);
30         val &= ~mask;
31         val |= set;
32         ppe_w32(ppe, reg, val);
33
34         return val;
35 }
36
37 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
38 {
39         return ppe_m32(ppe, reg, 0, val);
40 }
41
42 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
43 {
44         return ppe_m32(ppe, reg, val, 0);
45 }
46
47 static u32 mtk_eth_timestamp(struct mtk_eth *eth)
48 {
49         return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
50 }
51
52 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
53 {
54         int ret;
55         u32 val;
56
57         ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
58                                  !(val & MTK_PPE_GLO_CFG_BUSY),
59                                  20, MTK_PPE_WAIT_TIMEOUT_US);
60
61         if (ret)
62                 dev_err(ppe->dev, "PPE table busy");
63
64         return ret;
65 }
66
67 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
68 {
69         ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
70         ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
71 }
72
73 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
74 {
75         mtk_ppe_cache_clear(ppe);
76
77         ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
78                 enable * MTK_PPE_CACHE_CTL_EN);
79 }
80
81 static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
82 {
83         u32 hv1, hv2, hv3;
84         u32 hash;
85
86         switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
87                 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
88                 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
89                         hv1 = e->ipv4.orig.ports;
90                         hv2 = e->ipv4.orig.dest_ip;
91                         hv3 = e->ipv4.orig.src_ip;
92                         break;
93                 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
94                 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
95                         hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
96                         hv1 ^= e->ipv6.ports;
97
98                         hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
99                         hv2 ^= e->ipv6.dest_ip[0];
100
101                         hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
102                         hv3 ^= e->ipv6.src_ip[0];
103                         break;
104                 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
105                 case MTK_PPE_PKT_TYPE_IPV6_6RD:
106                 default:
107                         WARN_ON_ONCE(1);
108                         return MTK_PPE_HASH_MASK;
109         }
110
111         hash = (hv1 & hv2) | ((~hv1) & hv3);
112         hash = (hash >> 24) | ((hash & 0xffffff) << 8);
113         hash ^= hv1 ^ hv2 ^ hv3;
114         hash ^= hash >> 16;
115         hash <<= 1;
116         hash &= MTK_PPE_ENTRIES - 1;
117
118         return hash;
119 }
120
121 static inline struct mtk_foe_mac_info *
122 mtk_foe_entry_l2(struct mtk_foe_entry *entry)
123 {
124         int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
125
126         if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
127                 return &entry->ipv6.l2;
128
129         return &entry->ipv4.l2;
130 }
131
132 static inline u32 *
133 mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
134 {
135         int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
136
137         if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
138                 return &entry->ipv6.ib2;
139
140         return &entry->ipv4.ib2;
141 }
142
143 int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
144                           u8 pse_port, u8 *src_mac, u8 *dest_mac)
145 {
146         struct mtk_foe_mac_info *l2;
147         u32 ports_pad, val;
148
149         memset(entry, 0, sizeof(*entry));
150
151         val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
152               FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
153               FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
154               MTK_FOE_IB1_BIND_TTL |
155               MTK_FOE_IB1_BIND_CACHE;
156         entry->ib1 = val;
157
158         val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
159               FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
160               FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
161
162         if (is_multicast_ether_addr(dest_mac))
163                 val |= MTK_FOE_IB2_MULTICAST;
164
165         ports_pad = 0xa5a5a500 | (l4proto & 0xff);
166         if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
167                 entry->ipv4.orig.ports = ports_pad;
168         if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
169                 entry->ipv6.ports = ports_pad;
170
171         if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
172                 entry->ipv6.ib2 = val;
173                 l2 = &entry->ipv6.l2;
174         } else {
175                 entry->ipv4.ib2 = val;
176                 l2 = &entry->ipv4.l2;
177         }
178
179         l2->dest_mac_hi = get_unaligned_be32(dest_mac);
180         l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
181         l2->src_mac_hi = get_unaligned_be32(src_mac);
182         l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
183
184         if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
185                 l2->etype = ETH_P_IPV6;
186         else
187                 l2->etype = ETH_P_IP;
188
189         return 0;
190 }
191
192 int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
193 {
194         u32 *ib2 = mtk_foe_entry_ib2(entry);
195         u32 val;
196
197         val = *ib2;
198         val &= ~MTK_FOE_IB2_DEST_PORT;
199         val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
200         *ib2 = val;
201
202         return 0;
203 }
204
205 int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
206                                  __be32 src_addr, __be16 src_port,
207                                  __be32 dest_addr, __be16 dest_port)
208 {
209         int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
210         struct mtk_ipv4_tuple *t;
211
212         switch (type) {
213         case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
214                 if (egress) {
215                         t = &entry->ipv4.new;
216                         break;
217                 }
218                 fallthrough;
219         case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
220         case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
221                 t = &entry->ipv4.orig;
222                 break;
223         case MTK_PPE_PKT_TYPE_IPV6_6RD:
224                 entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
225                 entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
226                 return 0;
227         default:
228                 WARN_ON_ONCE(1);
229                 return -EINVAL;
230         }
231
232         t->src_ip = be32_to_cpu(src_addr);
233         t->dest_ip = be32_to_cpu(dest_addr);
234
235         if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
236                 return 0;
237
238         t->src_port = be16_to_cpu(src_port);
239         t->dest_port = be16_to_cpu(dest_port);
240
241         return 0;
242 }
243
244 int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
245                                  __be32 *src_addr, __be16 src_port,
246                                  __be32 *dest_addr, __be16 dest_port)
247 {
248         int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
249         u32 *src, *dest;
250         int i;
251
252         switch (type) {
253         case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
254                 src = entry->dslite.tunnel_src_ip;
255                 dest = entry->dslite.tunnel_dest_ip;
256                 break;
257         case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
258         case MTK_PPE_PKT_TYPE_IPV6_6RD:
259                 entry->ipv6.src_port = be16_to_cpu(src_port);
260                 entry->ipv6.dest_port = be16_to_cpu(dest_port);
261                 fallthrough;
262         case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
263                 src = entry->ipv6.src_ip;
264                 dest = entry->ipv6.dest_ip;
265                 break;
266         default:
267                 WARN_ON_ONCE(1);
268                 return -EINVAL;
269         }
270
271         for (i = 0; i < 4; i++)
272                 src[i] = be32_to_cpu(src_addr[i]);
273         for (i = 0; i < 4; i++)
274                 dest[i] = be32_to_cpu(dest_addr[i]);
275
276         return 0;
277 }
278
279 int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
280 {
281         struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
282
283         l2->etype = BIT(port);
284
285         if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
286                 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
287         else
288                 l2->etype |= BIT(8);
289
290         entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
291
292         return 0;
293 }
294
295 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
296 {
297         struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
298
299         switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
300         case 0:
301                 entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
302                               FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
303                 l2->vlan1 = vid;
304                 return 0;
305         case 1:
306                 if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
307                         l2->vlan1 = vid;
308                         l2->etype |= BIT(8);
309                 } else {
310                         l2->vlan2 = vid;
311                         entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
312                 }
313                 return 0;
314         default:
315                 return -ENOSPC;
316         }
317 }
318
319 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
320 {
321         struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
322
323         if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
324             (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
325                 l2->etype = ETH_P_PPP_SES;
326
327         entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
328         l2->pppoe_id = sid;
329
330         return 0;
331 }
332
333 int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
334                            int bss, int wcid)
335 {
336         struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
337         u32 *ib2 = mtk_foe_entry_ib2(entry);
338
339         *ib2 &= ~MTK_FOE_IB2_PORT_MG;
340         *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
341         if (wdma_idx)
342                 *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
343
344         l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
345                     FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
346                     FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
347
348         return 0;
349 }
350
351 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
352 {
353         return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
354                FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
355 }
356
357 static bool
358 mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
359 {
360         int type, len;
361
362         if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
363                 return false;
364
365         type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
366         if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
367                 len = offsetof(struct mtk_foe_entry, ipv6._rsv);
368         else
369                 len = offsetof(struct mtk_foe_entry, ipv4.ib2);
370
371         return !memcmp(&entry->data.data, &data->data, len - 4);
372 }
373
374 static void
375 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
376 {
377         struct mtk_foe_entry *hwe;
378         struct mtk_foe_entry foe;
379
380         spin_lock_bh(&ppe_lock);
381         if (entry->hash == 0xffff)
382                 goto out;
383
384         hwe = &ppe->foe_table[entry->hash];
385         memcpy(&foe, hwe, sizeof(foe));
386         if (!mtk_flow_entry_match(entry, &foe)) {
387                 entry->hash = 0xffff;
388                 goto out;
389         }
390
391         entry->data.ib1 = foe.ib1;
392
393 out:
394         spin_unlock_bh(&ppe_lock);
395 }
396
397 static void
398 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
399                        u16 hash)
400 {
401         struct mtk_foe_entry *hwe;
402         u16 timestamp;
403
404         timestamp = mtk_eth_timestamp(ppe->eth);
405         timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
406         entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
407         entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
408
409         hwe = &ppe->foe_table[hash];
410         memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
411         wmb();
412         hwe->ib1 = entry->ib1;
413
414         dma_wmb();
415
416         mtk_ppe_cache_clear(ppe);
417 }
418
419 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
420 {
421         spin_lock_bh(&ppe_lock);
422         hlist_del_init(&entry->list);
423         if (entry->hash != 0xffff) {
424                 ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
425                 ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
426                                                               MTK_FOE_STATE_BIND);
427                 dma_wmb();
428         }
429         entry->hash = 0xffff;
430         spin_unlock_bh(&ppe_lock);
431 }
432
433 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
434 {
435         u32 hash = mtk_ppe_hash_entry(&entry->data);
436
437         entry->hash = 0xffff;
438         spin_lock_bh(&ppe_lock);
439         hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]);
440         spin_unlock_bh(&ppe_lock);
441
442         return 0;
443 }
444
445 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
446 {
447         struct hlist_head *head = &ppe->foe_flow[hash / 2];
448         struct mtk_flow_entry *entry;
449         struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
450         bool found = false;
451
452         if (hlist_empty(head))
453                 return;
454
455         spin_lock_bh(&ppe_lock);
456         hlist_for_each_entry(entry, head, list) {
457                 if (found || !mtk_flow_entry_match(entry, hwe)) {
458                         if (entry->hash != 0xffff)
459                                 entry->hash = 0xffff;
460                         continue;
461                 }
462
463                 entry->hash = hash;
464                 __mtk_foe_entry_commit(ppe, &entry->data, hash);
465                 found = true;
466         }
467         spin_unlock_bh(&ppe_lock);
468 }
469
470 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
471 {
472         u16 now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
473         u16 timestamp;
474
475         mtk_flow_entry_update(ppe, entry);
476         timestamp = entry->data.ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
477
478         if (timestamp > now)
479                 return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
480         else
481                 return now - timestamp;
482 }
483
484 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
485                  int version)
486 {
487         struct device *dev = eth->dev;
488         struct mtk_foe_entry *foe;
489         struct mtk_ppe *ppe;
490
491         ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
492         if (!ppe)
493                 return NULL;
494
495         /* need to allocate a separate device, since it PPE DMA access is
496          * not coherent.
497          */
498         ppe->base = base;
499         ppe->eth = eth;
500         ppe->dev = dev;
501         ppe->version = version;
502
503         foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
504                                   &ppe->foe_phys, GFP_KERNEL);
505         if (!foe)
506                 return NULL;
507
508         ppe->foe_table = foe;
509
510         mtk_ppe_debugfs_init(ppe);
511
512         return ppe;
513 }
514
515 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
516 {
517         static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
518         int i, k;
519
520         memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
521
522         if (!IS_ENABLED(CONFIG_SOC_MT7621))
523                 return;
524
525         /* skip all entries that cross the 1024 byte boundary */
526         for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
527                 for (k = 0; k < ARRAY_SIZE(skip); k++)
528                         ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
529 }
530
531 int mtk_ppe_start(struct mtk_ppe *ppe)
532 {
533         u32 val;
534
535         mtk_ppe_init_foe_table(ppe);
536         ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
537
538         val = MTK_PPE_TB_CFG_ENTRY_80B |
539               MTK_PPE_TB_CFG_AGE_NON_L4 |
540               MTK_PPE_TB_CFG_AGE_UNBIND |
541               MTK_PPE_TB_CFG_AGE_TCP |
542               MTK_PPE_TB_CFG_AGE_UDP |
543               MTK_PPE_TB_CFG_AGE_TCP_FIN |
544               FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
545                          MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
546               FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
547                          MTK_PPE_KEEPALIVE_DISABLE) |
548               FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
549               FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
550                          MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
551               FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
552                          MTK_PPE_ENTRIES_SHIFT);
553         ppe_w32(ppe, MTK_PPE_TB_CFG, val);
554
555         ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
556                 MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
557
558         mtk_ppe_cache_enable(ppe, true);
559
560         val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
561               MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
562               MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
563               MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
564               MTK_PPE_FLOW_CFG_IP6_6RD |
565               MTK_PPE_FLOW_CFG_IP4_NAT |
566               MTK_PPE_FLOW_CFG_IP4_NAPT |
567               MTK_PPE_FLOW_CFG_IP4_DSLITE |
568               MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
569         ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
570
571         val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
572               FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
573         ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
574
575         val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
576               FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
577         ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
578
579         val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
580               FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
581         ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
582
583         val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
584         ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
585
586         val = MTK_PPE_BIND_LIMIT1_FULL |
587               FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
588         ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
589
590         val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
591               FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
592         ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
593
594         /* enable PPE */
595         val = MTK_PPE_GLO_CFG_EN |
596               MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
597               MTK_PPE_GLO_CFG_IP4_CS_DROP |
598               MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
599         ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
600
601         ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
602
603         return 0;
604 }
605
606 int mtk_ppe_stop(struct mtk_ppe *ppe)
607 {
608         u32 val;
609         int i;
610
611         for (i = 0; i < MTK_PPE_ENTRIES; i++)
612                 ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
613                                                    MTK_FOE_STATE_INVALID);
614
615         mtk_ppe_cache_enable(ppe, false);
616
617         /* disable offload engine */
618         ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
619         ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
620
621         /* disable aging */
622         val = MTK_PPE_TB_CFG_AGE_NON_L4 |
623               MTK_PPE_TB_CFG_AGE_UNBIND |
624               MTK_PPE_TB_CFG_AGE_TCP |
625               MTK_PPE_TB_CFG_AGE_UDP |
626               MTK_PPE_TB_CFG_AGE_TCP_FIN;
627         ppe_clear(ppe, MTK_PPE_TB_CFG, val);
628
629         return mtk_ppe_wait_busy(ppe);
630 }