1 // SPDX-License-Identifier: GPL-2.0
3 * Header Parser helpers for Marvell PPv2 Network Controller
5 * Copyright (C) 2014 Marvell
7 * Marcin Wojtas <mw@semihalf.com>
10 #include <linux/kernel.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <uapi/linux/ppp_defs.h>
19 #include "mvpp2_prs.h"
21 /* Update parser tcam and sram hw entries */
22 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
26 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
29 /* Clear entry invalidation bit */
30 pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
32 /* Write sram index - indirect access */
33 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
34 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
35 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
37 /* Write tcam index - indirect access */
38 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
39 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
40 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
45 /* Initialize tcam entry from hw */
46 int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
51 if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
54 memset(pe, 0, sizeof(*pe));
57 /* Write tcam index - indirect access */
58 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
60 pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
61 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
62 if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
63 return MVPP2_PRS_TCAM_ENTRY_INVALID;
65 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
66 pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
68 /* Write sram index - indirect access */
69 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
70 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
71 pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
76 /* Invalidate tcam hw entry */
77 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
79 /* Write index - indirect access */
80 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
81 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
82 MVPP2_PRS_TCAM_INV_MASK);
85 /* Enable shadow table entry and set its lookup ID */
86 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
88 priv->prs_shadow[index].valid = true;
89 priv->prs_shadow[index].lu = lu;
92 /* Update ri fields in shadow table entry */
93 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
94 unsigned int ri, unsigned int ri_mask)
96 priv->prs_shadow[index].ri_mask = ri_mask;
97 priv->prs_shadow[index].ri = ri;
100 /* Update lookup field in tcam sw entry */
101 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
103 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
104 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
105 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
106 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
109 /* Update mask for single port in tcam sw entry */
110 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
111 unsigned int port, bool add)
114 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
116 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
119 /* Update port map in tcam sw entry */
120 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
123 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
124 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
125 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
128 /* Obtain port map from tcam sw entry */
129 unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
131 return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
134 /* Set byte of data and its enable bits in tcam sw entry */
135 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
136 unsigned int offs, unsigned char byte,
137 unsigned char enable)
139 int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
141 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
142 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
143 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
144 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
147 /* Get byte of data and its enable bits from tcam sw entry */
148 void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
149 unsigned int offs, unsigned char *byte,
150 unsigned char *enable)
152 int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
154 *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
155 *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
158 /* Compare tcam data bytes with a pattern */
159 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
164 tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
165 return tcam_data == data;
168 /* Update ai bits in tcam sw entry */
169 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
170 unsigned int bits, unsigned int enable)
174 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
175 if (!(enable & BIT(i)))
179 pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
181 pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
184 pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
187 /* Get ai bits from tcam sw entry */
188 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
190 return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
193 /* Set ethertype in tcam sw entry */
194 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
195 unsigned short ethertype)
197 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
198 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
201 /* Set vid in tcam sw entry */
202 static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
205 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
206 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
209 /* Set bits in sram sw entry */
210 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
213 pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
216 /* Clear bits in sram sw entry */
217 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
220 pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
223 /* Update ri bits in sram sw entry */
224 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
225 unsigned int bits, unsigned int mask)
229 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
230 if (!(mask & BIT(i)))
234 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
237 mvpp2_prs_sram_bits_clear(pe,
238 MVPP2_PRS_SRAM_RI_OFFS + i,
241 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
245 /* Obtain ri bits from sram sw entry */
246 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
248 return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
251 /* Update ai bits in sram sw entry */
252 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
253 unsigned int bits, unsigned int mask)
257 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
258 if (!(mask & BIT(i)))
262 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
265 mvpp2_prs_sram_bits_clear(pe,
266 MVPP2_PRS_SRAM_AI_OFFS + i,
269 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
273 /* Read ai bits from sram sw entry */
274 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
277 /* ai is stored on bits 90->97; so it spreads across two u32 */
278 int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
279 int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
281 bits = (pe->sram[ai_off] >> ai_shift) |
282 (pe->sram[ai_off + 1] << (32 - ai_shift));
287 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
290 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
293 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
295 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
296 MVPP2_PRS_SRAM_NEXT_LU_MASK);
297 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
300 /* In the sram sw entry set sign and value of the next lookup offset
301 * and the offset value generated to the classifier
303 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
308 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
311 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
315 pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
316 shift & MVPP2_PRS_SRAM_SHIFT_MASK;
318 /* Reset and set operation */
319 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
320 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
321 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
323 /* Set base offset as current */
324 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
327 /* In the sram sw entry set sign and value of the user defined offset
328 * generated to the classifier
330 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
331 unsigned int type, int offset,
336 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
339 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
343 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
344 MVPP2_PRS_SRAM_UDF_MASK);
345 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
346 offset & MVPP2_PRS_SRAM_UDF_MASK);
348 /* Set offset type */
349 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
350 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
351 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
353 /* Set offset operation */
354 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
355 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
356 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
357 op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
359 /* Set base offset as current */
360 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
363 /* Find parser flow entry */
364 static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
366 struct mvpp2_prs_entry pe;
369 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
370 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
373 if (!priv->prs_shadow[tid].valid ||
374 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
377 mvpp2_prs_init_from_hw(priv, &pe, tid);
378 bits = mvpp2_prs_sram_ai_get(&pe);
380 /* Sram store classification lookup ID in AI bits [5:0] */
381 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
388 /* Return first free tcam index, seeking from start to end */
389 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
397 for (tid = start; tid <= end; tid++) {
398 if (!priv->prs_shadow[tid].valid)
405 /* Drop flow control pause frames */
406 static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
408 unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
409 struct mvpp2_prs_entry pe;
412 memset(&pe, 0, sizeof(pe));
414 /* For all ports - drop flow control frames */
415 pe.index = MVPP2_PE_FC_DROP;
416 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
418 /* Set match on DA */
421 mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
423 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
424 MVPP2_PRS_RI_DROP_MASK);
426 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
427 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
430 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
432 /* Update shadow table and hw entry */
433 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
434 mvpp2_prs_hw_write(priv, &pe);
437 /* Enable/disable dropping all mac da's */
438 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
440 struct mvpp2_prs_entry pe;
442 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
443 /* Entry exist - update port only */
444 mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
446 /* Entry doesn't exist - create new */
447 memset(&pe, 0, sizeof(pe));
448 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
449 pe.index = MVPP2_PE_DROP_ALL;
451 /* Non-promiscuous mode for all ports - DROP unknown packets */
452 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
453 MVPP2_PRS_RI_DROP_MASK);
455 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
456 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
458 /* Update shadow table */
459 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
462 mvpp2_prs_tcam_port_map_set(&pe, 0);
465 /* Update port mask */
466 mvpp2_prs_tcam_port_set(&pe, port, add);
468 mvpp2_prs_hw_write(priv, &pe);
471 /* Set port to unicast or multicast promiscuous mode */
472 void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
473 enum mvpp2_prs_l2_cast l2_cast, bool add)
475 struct mvpp2_prs_entry pe;
476 unsigned char cast_match;
480 if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
481 cast_match = MVPP2_PRS_UCAST_VAL;
482 tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
483 ri = MVPP2_PRS_RI_L2_UCAST;
485 cast_match = MVPP2_PRS_MCAST_VAL;
486 tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
487 ri = MVPP2_PRS_RI_L2_MCAST;
490 /* promiscuous mode - Accept unknown unicast or multicast packets */
491 if (priv->prs_shadow[tid].valid) {
492 mvpp2_prs_init_from_hw(priv, &pe, tid);
494 memset(&pe, 0, sizeof(pe));
495 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
498 /* Continue - set next lookup */
499 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
501 /* Set result info bits */
502 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
504 /* Match UC or MC addresses */
505 mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
506 MVPP2_PRS_CAST_MASK);
508 /* Shift to ethertype */
509 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
510 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
513 mvpp2_prs_tcam_port_map_set(&pe, 0);
515 /* Update shadow table */
516 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
519 /* Update port mask */
520 mvpp2_prs_tcam_port_set(&pe, port, add);
522 mvpp2_prs_hw_write(priv, &pe);
525 /* Set entry for dsa packets */
526 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
527 bool tagged, bool extend)
529 struct mvpp2_prs_entry pe;
533 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
536 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
540 if (priv->prs_shadow[tid].valid) {
541 /* Entry exist - update port only */
542 mvpp2_prs_init_from_hw(priv, &pe, tid);
544 /* Entry doesn't exist - create new */
545 memset(&pe, 0, sizeof(pe));
546 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
549 /* Update shadow table */
550 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
553 /* Set tagged bit in DSA tag */
554 mvpp2_prs_tcam_data_byte_set(&pe, 0,
555 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
556 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
558 /* Set ai bits for next iteration */
560 mvpp2_prs_sram_ai_update(&pe, 1,
561 MVPP2_PRS_SRAM_AI_MASK);
563 mvpp2_prs_sram_ai_update(&pe, 0,
564 MVPP2_PRS_SRAM_AI_MASK);
566 /* Set result info bits to 'single vlan' */
567 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
568 MVPP2_PRS_RI_VLAN_MASK);
569 /* If packet is tagged continue check vid filtering */
570 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
572 /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
573 mvpp2_prs_sram_shift_set(&pe, shift,
574 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
576 /* Set result info bits to 'no vlans' */
577 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
578 MVPP2_PRS_RI_VLAN_MASK);
579 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
583 mvpp2_prs_tcam_port_map_set(&pe, 0);
586 /* Update port mask */
587 mvpp2_prs_tcam_port_set(&pe, port, add);
589 mvpp2_prs_hw_write(priv, &pe);
592 /* Set entry for dsa ethertype */
593 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
594 bool add, bool tagged, bool extend)
596 struct mvpp2_prs_entry pe;
597 int tid, shift, port_mask;
600 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
601 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
605 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
606 MVPP2_PE_ETYPE_DSA_UNTAGGED;
607 port_mask = MVPP2_PRS_PORT_MASK;
611 if (priv->prs_shadow[tid].valid) {
612 /* Entry exist - update port only */
613 mvpp2_prs_init_from_hw(priv, &pe, tid);
615 /* Entry doesn't exist - create new */
616 memset(&pe, 0, sizeof(pe));
617 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
621 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
622 mvpp2_prs_match_etype(&pe, 2, 0);
624 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
625 MVPP2_PRS_RI_DSA_MASK);
626 /* Shift ethertype + 2 byte reserved + tag*/
627 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
628 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
630 /* Update shadow table */
631 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
634 /* Set tagged bit in DSA tag */
635 mvpp2_prs_tcam_data_byte_set(&pe,
636 MVPP2_ETH_TYPE_LEN + 2 + 3,
637 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
638 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
639 /* Clear all ai bits for next iteration */
640 mvpp2_prs_sram_ai_update(&pe, 0,
641 MVPP2_PRS_SRAM_AI_MASK);
642 /* If packet is tagged continue check vlans */
643 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
645 /* Set result info bits to 'no vlans' */
646 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
647 MVPP2_PRS_RI_VLAN_MASK);
648 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
650 /* Mask/unmask all ports, depending on dsa type */
651 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
654 /* Update port mask */
655 mvpp2_prs_tcam_port_set(&pe, port, add);
657 mvpp2_prs_hw_write(priv, &pe);
660 /* Search for existing single/triple vlan entry */
661 static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
663 struct mvpp2_prs_entry pe;
666 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
667 for (tid = MVPP2_PE_FIRST_FREE_TID;
668 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
669 unsigned int ri_bits, ai_bits;
672 if (!priv->prs_shadow[tid].valid ||
673 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
676 mvpp2_prs_init_from_hw(priv, &pe, tid);
677 match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
682 ri_bits = mvpp2_prs_sram_ri_get(&pe);
683 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
685 /* Get current ai value from tcam */
686 ai_bits = mvpp2_prs_tcam_ai_get(&pe);
687 /* Clear double vlan bit */
688 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
693 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
694 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
701 /* Add/update single/triple vlan entry */
702 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
703 unsigned int port_map)
705 struct mvpp2_prs_entry pe;
709 memset(&pe, 0, sizeof(pe));
711 tid = mvpp2_prs_vlan_find(priv, tpid, ai);
714 /* Create new tcam entry */
715 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
716 MVPP2_PE_FIRST_FREE_TID);
720 /* Get last double vlan tid */
721 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
722 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
723 unsigned int ri_bits;
725 if (!priv->prs_shadow[tid_aux].valid ||
726 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
729 mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
730 ri_bits = mvpp2_prs_sram_ri_get(&pe);
731 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
732 MVPP2_PRS_RI_VLAN_DOUBLE)
739 memset(&pe, 0, sizeof(pe));
741 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
743 mvpp2_prs_match_etype(&pe, 0, tpid);
745 /* VLAN tag detected, proceed with VID filtering */
746 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
748 /* Clear all ai bits for next iteration */
749 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
751 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
752 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
753 MVPP2_PRS_RI_VLAN_MASK);
755 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
756 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
757 MVPP2_PRS_RI_VLAN_MASK);
759 mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
761 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
763 mvpp2_prs_init_from_hw(priv, &pe, tid);
765 /* Update ports' mask */
766 mvpp2_prs_tcam_port_map_set(&pe, port_map);
768 mvpp2_prs_hw_write(priv, &pe);
773 /* Get first free double vlan ai number */
774 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
778 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
779 if (!priv->prs_double_vlans[i])
786 /* Search for existing double vlan entry */
787 static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
788 unsigned short tpid2)
790 struct mvpp2_prs_entry pe;
793 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
794 for (tid = MVPP2_PE_FIRST_FREE_TID;
795 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
796 unsigned int ri_mask;
799 if (!priv->prs_shadow[tid].valid ||
800 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
803 mvpp2_prs_init_from_hw(priv, &pe, tid);
805 match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
806 mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
811 ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
812 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
819 /* Add or update double vlan entry */
820 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
821 unsigned short tpid2,
822 unsigned int port_map)
824 int tid_aux, tid, ai, ret = 0;
825 struct mvpp2_prs_entry pe;
827 memset(&pe, 0, sizeof(pe));
829 tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
832 /* Create new tcam entry */
833 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
834 MVPP2_PE_LAST_FREE_TID);
838 /* Set ai value for new double vlan entry */
839 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
843 /* Get first single/triple vlan tid */
844 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
845 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
846 unsigned int ri_bits;
848 if (!priv->prs_shadow[tid_aux].valid ||
849 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
852 mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
853 ri_bits = mvpp2_prs_sram_ri_get(&pe);
854 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
855 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
856 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
863 memset(&pe, 0, sizeof(pe));
864 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
867 priv->prs_double_vlans[ai] = true;
869 mvpp2_prs_match_etype(&pe, 0, tpid1);
870 mvpp2_prs_match_etype(&pe, 4, tpid2);
872 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
873 /* Shift 4 bytes - skip outer vlan tag */
874 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
875 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
876 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
877 MVPP2_PRS_RI_VLAN_MASK);
878 mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
879 MVPP2_PRS_SRAM_AI_MASK);
881 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
883 mvpp2_prs_init_from_hw(priv, &pe, tid);
886 /* Update ports' mask */
887 mvpp2_prs_tcam_port_map_set(&pe, port_map);
888 mvpp2_prs_hw_write(priv, &pe);
893 /* IPv4 header parsing for fragmentation and L4 offset */
894 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
895 unsigned int ri, unsigned int ri_mask)
897 struct mvpp2_prs_entry pe;
900 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
901 (proto != IPPROTO_IGMP))
904 /* Not fragmented packet */
905 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
906 MVPP2_PE_LAST_FREE_TID);
910 memset(&pe, 0, sizeof(pe));
911 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
914 /* Finished: go to flowid generation */
915 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
916 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
919 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
920 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
921 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
922 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
924 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
925 MVPP2_PRS_TCAM_PROTO_MASK_L);
926 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
927 MVPP2_PRS_TCAM_PROTO_MASK);
929 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
930 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
931 MVPP2_PRS_IPV4_DIP_AI_BIT);
932 /* Unmask all ports */
933 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
935 /* Update shadow table and hw entry */
936 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
937 mvpp2_prs_hw_write(priv, &pe);
939 /* Fragmented packet */
940 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
941 MVPP2_PE_LAST_FREE_TID);
946 /* Clear ri before updating */
947 pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
948 pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
949 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
951 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
952 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
954 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
955 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
957 /* Update shadow table and hw entry */
958 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
959 mvpp2_prs_hw_write(priv, &pe);
964 /* IPv4 L3 multicast or broadcast */
965 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
967 struct mvpp2_prs_entry pe;
970 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
971 MVPP2_PE_LAST_FREE_TID);
975 memset(&pe, 0, sizeof(pe));
976 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
980 case MVPP2_PRS_L3_MULTI_CAST:
981 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
982 MVPP2_PRS_IPV4_MC_MASK);
983 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
984 MVPP2_PRS_RI_L3_ADDR_MASK);
986 case MVPP2_PRS_L3_BROAD_CAST:
987 mask = MVPP2_PRS_IPV4_BC_MASK;
988 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
989 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
990 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
991 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
992 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
993 MVPP2_PRS_RI_L3_ADDR_MASK);
999 /* Go again to ipv4 */
1000 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1002 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1003 MVPP2_PRS_IPV4_DIP_AI_BIT);
1005 /* Shift back to IPv4 proto */
1006 mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1008 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1010 /* Unmask all ports */
1011 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1013 /* Update shadow table and hw entry */
1014 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1015 mvpp2_prs_hw_write(priv, &pe);
1020 /* Set entries for protocols over IPv6 */
1021 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
1022 unsigned int ri, unsigned int ri_mask)
1024 struct mvpp2_prs_entry pe;
1027 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1028 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
1031 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1032 MVPP2_PE_LAST_FREE_TID);
1036 memset(&pe, 0, sizeof(pe));
1037 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1040 /* Finished: go to flowid generation */
1041 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1042 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1043 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1044 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1045 sizeof(struct ipv6hdr) - 6,
1046 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1048 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1049 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1050 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1051 /* Unmask all ports */
1052 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1055 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1056 mvpp2_prs_hw_write(priv, &pe);
1061 /* IPv6 L3 multicast entry */
1062 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
1064 struct mvpp2_prs_entry pe;
1067 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
1070 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1071 MVPP2_PE_LAST_FREE_TID);
1075 memset(&pe, 0, sizeof(pe));
1076 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1079 /* Finished: go to flowid generation */
1080 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1081 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
1082 MVPP2_PRS_RI_L3_ADDR_MASK);
1083 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1084 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1085 /* Shift back to IPv6 NH */
1086 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1088 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
1089 MVPP2_PRS_IPV6_MC_MASK);
1090 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1091 /* Unmask all ports */
1092 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1094 /* Update shadow table and hw entry */
1095 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1096 mvpp2_prs_hw_write(priv, &pe);
1101 /* Parser per-port initialization */
1102 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1103 int lu_max, int offset)
1108 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1109 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1110 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1111 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1113 /* Set maximum number of loops for packet received from port */
1114 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1115 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1116 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1117 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1119 /* Set initial offset for packet header extraction for the first
1122 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1123 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1124 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1125 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1128 /* Default flow entries initialization for all ports */
1129 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1131 struct mvpp2_prs_entry pe;
1134 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1135 memset(&pe, 0, sizeof(pe));
1136 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1137 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1139 /* Mask all ports */
1140 mvpp2_prs_tcam_port_map_set(&pe, 0);
1143 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1144 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1146 /* Update shadow table and hw entry */
1147 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1148 mvpp2_prs_hw_write(priv, &pe);
1152 /* Set default entry for Marvell Header field */
1153 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1155 struct mvpp2_prs_entry pe;
1157 memset(&pe, 0, sizeof(pe));
1159 pe.index = MVPP2_PE_MH_DEFAULT;
1160 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1161 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1162 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1163 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1165 /* Unmask all ports */
1166 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1168 /* Update shadow table and hw entry */
1169 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1170 mvpp2_prs_hw_write(priv, &pe);
1172 /* Set MH entry that skip parser */
1173 pe.index = MVPP2_PE_MH_SKIP_PRS;
1174 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1175 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1176 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1177 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1178 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1180 /* Mask all ports */
1181 mvpp2_prs_tcam_port_map_set(&pe, 0);
1183 /* Update shadow table and hw entry */
1184 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1185 mvpp2_prs_hw_write(priv, &pe);
1188 /* Set default entires (place holder) for promiscuous, non-promiscuous and
1189 * multicast MAC addresses
1191 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1193 struct mvpp2_prs_entry pe;
1195 memset(&pe, 0, sizeof(pe));
1197 /* Non-promiscuous mode for all ports - DROP unknown packets */
1198 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1199 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1201 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1202 MVPP2_PRS_RI_DROP_MASK);
1203 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1204 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1206 /* Unmask all ports */
1207 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1209 /* Update shadow table and hw entry */
1210 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1211 mvpp2_prs_hw_write(priv, &pe);
1213 /* Create dummy entries for drop all and promiscuous modes */
1214 mvpp2_prs_drop_fc(priv);
1215 mvpp2_prs_mac_drop_all_set(priv, 0, false);
1216 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
1217 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
1220 /* Set default entries for various types of dsa packets */
1221 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
1223 struct mvpp2_prs_entry pe;
1225 /* None tagged EDSA entry - place holder */
1226 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1229 /* Tagged EDSA entry - place holder */
1230 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1232 /* None tagged DSA entry - place holder */
1233 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1236 /* Tagged DSA entry - place holder */
1237 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1239 /* None tagged EDSA ethertype entry - place holder*/
1240 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1241 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
1243 /* Tagged EDSA ethertype entry - place holder*/
1244 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1245 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1247 /* None tagged DSA ethertype entry */
1248 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1249 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
1251 /* Tagged DSA ethertype entry */
1252 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1253 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1255 /* Set default entry, in case DSA or EDSA tag not found */
1256 memset(&pe, 0, sizeof(pe));
1257 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1258 pe.index = MVPP2_PE_DSA_DEFAULT;
1259 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1262 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1263 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1265 /* Clear all sram ai bits for next iteration */
1266 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1268 /* Unmask all ports */
1269 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1271 mvpp2_prs_hw_write(priv, &pe);
1274 /* Initialize parser entries for VID filtering */
1275 static void mvpp2_prs_vid_init(struct mvpp2 *priv)
1277 struct mvpp2_prs_entry pe;
1279 memset(&pe, 0, sizeof(pe));
1281 /* Set default vid entry */
1282 pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
1283 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1285 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
1287 /* Skip VLAN header - Set offset to 4 bytes */
1288 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
1289 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1291 /* Clear all ai bits for next iteration */
1292 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1294 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1296 /* Unmask all ports */
1297 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1299 /* Update shadow table and hw entry */
1300 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1301 mvpp2_prs_hw_write(priv, &pe);
1303 /* Set default vid entry for extended DSA*/
1304 memset(&pe, 0, sizeof(pe));
1306 /* Set default vid entry */
1307 pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
1308 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1310 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
1311 MVPP2_PRS_EDSA_VID_AI_BIT);
1313 /* Skip VLAN header - Set offset to 8 bytes */
1314 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
1315 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1317 /* Clear all ai bits for next iteration */
1318 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1320 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1322 /* Unmask all ports */
1323 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1325 /* Update shadow table and hw entry */
1326 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1327 mvpp2_prs_hw_write(priv, &pe);
1330 /* Match basic ethertypes */
1331 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1333 struct mvpp2_prs_entry pe;
1336 /* Ethertype: PPPoE */
1337 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1338 MVPP2_PE_LAST_FREE_TID);
1342 memset(&pe, 0, sizeof(pe));
1343 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1346 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
1348 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1349 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1350 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1351 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1352 MVPP2_PRS_RI_PPPOE_MASK);
1354 /* Update shadow table and hw entry */
1355 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1356 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1357 priv->prs_shadow[pe.index].finish = false;
1358 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1359 MVPP2_PRS_RI_PPPOE_MASK);
1360 mvpp2_prs_hw_write(priv, &pe);
1362 /* Ethertype: ARP */
1363 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1364 MVPP2_PE_LAST_FREE_TID);
1368 memset(&pe, 0, sizeof(pe));
1369 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1372 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
1374 /* Generate flow in the next iteration*/
1375 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1376 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1377 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1378 MVPP2_PRS_RI_L3_PROTO_MASK);
1380 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1382 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1384 /* Update shadow table and hw entry */
1385 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1386 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1387 priv->prs_shadow[pe.index].finish = true;
1388 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1389 MVPP2_PRS_RI_L3_PROTO_MASK);
1390 mvpp2_prs_hw_write(priv, &pe);
1392 /* Ethertype: LBTD */
1393 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1394 MVPP2_PE_LAST_FREE_TID);
1398 memset(&pe, 0, sizeof(pe));
1399 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1402 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1404 /* Generate flow in the next iteration*/
1405 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1406 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1407 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1408 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1409 MVPP2_PRS_RI_CPU_CODE_MASK |
1410 MVPP2_PRS_RI_UDF3_MASK);
1412 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1414 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1416 /* Update shadow table and hw entry */
1417 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1418 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1419 priv->prs_shadow[pe.index].finish = true;
1420 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1421 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1422 MVPP2_PRS_RI_CPU_CODE_MASK |
1423 MVPP2_PRS_RI_UDF3_MASK);
1424 mvpp2_prs_hw_write(priv, &pe);
1426 /* Ethertype: IPv4 with header length >= 5 */
1427 for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
1428 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1429 MVPP2_PE_LAST_FREE_TID);
1433 memset(&pe, 0, sizeof(pe));
1434 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1437 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
1438 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1439 MVPP2_PRS_IPV4_HEAD | ihl,
1440 MVPP2_PRS_IPV4_HEAD_MASK |
1441 MVPP2_PRS_IPV4_IHL_MASK);
1443 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1444 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1445 MVPP2_PRS_RI_L3_PROTO_MASK);
1446 /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
1447 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
1448 sizeof(struct iphdr) - 4,
1449 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1451 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1452 MVPP2_ETH_TYPE_LEN + (ihl * 4),
1453 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1455 /* Update shadow table and hw entry */
1456 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1457 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1458 priv->prs_shadow[pe.index].finish = false;
1459 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1460 MVPP2_PRS_RI_L3_PROTO_MASK);
1461 mvpp2_prs_hw_write(priv, &pe);
1464 /* Ethertype: IPv6 without options */
1465 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1466 MVPP2_PE_LAST_FREE_TID);
1470 memset(&pe, 0, sizeof(pe));
1471 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1474 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
1476 /* Skip DIP of IPV6 header */
1477 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1478 MVPP2_MAX_L3_ADDR_SIZE,
1479 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1480 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1481 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1482 MVPP2_PRS_RI_L3_PROTO_MASK);
1484 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1486 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1488 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1489 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1490 priv->prs_shadow[pe.index].finish = false;
1491 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1492 MVPP2_PRS_RI_L3_PROTO_MASK);
1493 mvpp2_prs_hw_write(priv, &pe);
1495 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1496 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1497 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1498 pe.index = MVPP2_PE_ETH_TYPE_UN;
1500 /* Unmask all ports */
1501 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1503 /* Generate flow in the next iteration*/
1504 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1505 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1506 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1507 MVPP2_PRS_RI_L3_PROTO_MASK);
1508 /* Set L3 offset even it's unknown L3 */
1509 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1511 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1513 /* Update shadow table and hw entry */
1514 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1515 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1516 priv->prs_shadow[pe.index].finish = true;
1517 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
1518 MVPP2_PRS_RI_L3_PROTO_MASK);
1519 mvpp2_prs_hw_write(priv, &pe);
1524 /* Configure vlan entries and detect up to 2 successive VLAN tags.
1531 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
1533 struct mvpp2_prs_entry pe;
1536 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
1537 MVPP2_PRS_DBL_VLANS_MAX,
1539 if (!priv->prs_double_vlans)
1542 /* Double VLAN: 0x8100, 0x88A8 */
1543 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
1544 MVPP2_PRS_PORT_MASK);
1548 /* Double VLAN: 0x8100, 0x8100 */
1549 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
1550 MVPP2_PRS_PORT_MASK);
1554 /* Single VLAN: 0x88a8 */
1555 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
1556 MVPP2_PRS_PORT_MASK);
1560 /* Single VLAN: 0x8100 */
1561 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
1562 MVPP2_PRS_PORT_MASK);
1566 /* Set default double vlan entry */
1567 memset(&pe, 0, sizeof(pe));
1568 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1569 pe.index = MVPP2_PE_VLAN_DBL;
1571 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
1573 /* Clear ai for next iterations */
1574 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1575 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1576 MVPP2_PRS_RI_VLAN_MASK);
1578 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1579 MVPP2_PRS_DBL_VLAN_AI_BIT);
1580 /* Unmask all ports */
1581 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1583 /* Update shadow table and hw entry */
1584 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1585 mvpp2_prs_hw_write(priv, &pe);
1587 /* Set default vlan none entry */
1588 memset(&pe, 0, sizeof(pe));
1589 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1590 pe.index = MVPP2_PE_VLAN_NONE;
1592 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1593 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1594 MVPP2_PRS_RI_VLAN_MASK);
1596 /* Unmask all ports */
1597 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1599 /* Update shadow table and hw entry */
1600 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1601 mvpp2_prs_hw_write(priv, &pe);
1606 /* Set entries for PPPoE ethertype */
1607 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
1609 struct mvpp2_prs_entry pe;
1612 /* IPv4 over PPPoE with options */
1613 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1614 MVPP2_PE_LAST_FREE_TID);
1618 memset(&pe, 0, sizeof(pe));
1619 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1622 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1624 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1625 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1626 MVPP2_PRS_RI_L3_PROTO_MASK);
1627 /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
1628 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
1629 sizeof(struct iphdr) - 4,
1630 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1632 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1634 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1636 /* Update shadow table and hw entry */
1637 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1638 mvpp2_prs_hw_write(priv, &pe);
1640 /* IPv4 over PPPoE without options */
1641 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1642 MVPP2_PE_LAST_FREE_TID);
1648 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1649 MVPP2_PRS_IPV4_HEAD |
1650 MVPP2_PRS_IPV4_IHL_MIN,
1651 MVPP2_PRS_IPV4_HEAD_MASK |
1652 MVPP2_PRS_IPV4_IHL_MASK);
1654 /* Clear ri before updating */
1655 pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1656 pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1657 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1658 MVPP2_PRS_RI_L3_PROTO_MASK);
1660 /* Update shadow table and hw entry */
1661 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1662 mvpp2_prs_hw_write(priv, &pe);
1664 /* IPv6 over PPPoE */
1665 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1666 MVPP2_PE_LAST_FREE_TID);
1670 memset(&pe, 0, sizeof(pe));
1671 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1674 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1676 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1677 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1678 MVPP2_PRS_RI_L3_PROTO_MASK);
1679 /* Jump to DIP of IPV6 header */
1680 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1681 MVPP2_MAX_L3_ADDR_SIZE,
1682 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1684 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1686 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1688 /* Update shadow table and hw entry */
1689 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1690 mvpp2_prs_hw_write(priv, &pe);
1692 /* Non-IP over PPPoE */
1693 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1694 MVPP2_PE_LAST_FREE_TID);
1698 memset(&pe, 0, sizeof(pe));
1699 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1702 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1703 MVPP2_PRS_RI_L3_PROTO_MASK);
1705 /* Finished: go to flowid generation */
1706 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1707 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1708 /* Set L3 offset even if it's unknown L3 */
1709 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1711 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1713 /* Update shadow table and hw entry */
1714 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1715 mvpp2_prs_hw_write(priv, &pe);
1720 /* Initialize entries for IPv4 */
1721 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
1723 struct mvpp2_prs_entry pe;
1726 /* Set entries for TCP, UDP and IGMP over IPv4 */
1727 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1728 MVPP2_PRS_RI_L4_PROTO_MASK);
1732 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1733 MVPP2_PRS_RI_L4_PROTO_MASK);
1737 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
1738 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1739 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1740 MVPP2_PRS_RI_CPU_CODE_MASK |
1741 MVPP2_PRS_RI_UDF3_MASK);
1745 /* IPv4 Broadcast */
1746 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
1750 /* IPv4 Multicast */
1751 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1755 /* Default IPv4 entry for unknown protocols */
1756 memset(&pe, 0, sizeof(pe));
1757 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1758 pe.index = MVPP2_PE_IP4_PROTO_UN;
1760 /* Finished: go to flowid generation */
1761 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1762 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1765 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
1766 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1767 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1768 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1769 MVPP2_PRS_RI_L4_PROTO_MASK);
1771 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1772 MVPP2_PRS_IPV4_DIP_AI_BIT);
1773 /* Unmask all ports */
1774 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1776 /* Update shadow table and hw entry */
1777 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1778 mvpp2_prs_hw_write(priv, &pe);
1780 /* Default IPv4 entry for unicast address */
1781 memset(&pe, 0, sizeof(pe));
1782 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1783 pe.index = MVPP2_PE_IP4_ADDR_UN;
1785 /* Go again to ipv4 */
1786 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1788 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1789 MVPP2_PRS_IPV4_DIP_AI_BIT);
1791 /* Shift back to IPv4 proto */
1792 mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1794 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1795 MVPP2_PRS_RI_L3_ADDR_MASK);
1796 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1798 /* Unmask all ports */
1799 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1801 /* Update shadow table and hw entry */
1802 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1803 mvpp2_prs_hw_write(priv, &pe);
1808 /* Initialize entries for IPv6 */
1809 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
1811 struct mvpp2_prs_entry pe;
1814 /* Set entries for TCP, UDP and ICMP over IPv6 */
1815 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
1816 MVPP2_PRS_RI_L4_TCP,
1817 MVPP2_PRS_RI_L4_PROTO_MASK);
1821 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
1822 MVPP2_PRS_RI_L4_UDP,
1823 MVPP2_PRS_RI_L4_PROTO_MASK);
1827 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
1828 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1829 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1830 MVPP2_PRS_RI_CPU_CODE_MASK |
1831 MVPP2_PRS_RI_UDF3_MASK);
1835 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
1836 /* Result Info: UDF7=1, DS lite */
1837 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
1838 MVPP2_PRS_RI_UDF7_IP6_LITE,
1839 MVPP2_PRS_RI_UDF7_MASK);
1843 /* IPv6 multicast */
1844 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1848 /* Entry for checking hop limit */
1849 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1850 MVPP2_PE_LAST_FREE_TID);
1854 memset(&pe, 0, sizeof(pe));
1855 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1858 /* Finished: go to flowid generation */
1859 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1860 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1861 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
1862 MVPP2_PRS_RI_DROP_MASK,
1863 MVPP2_PRS_RI_L3_PROTO_MASK |
1864 MVPP2_PRS_RI_DROP_MASK);
1866 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1867 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1868 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1870 /* Update shadow table and hw entry */
1871 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1872 mvpp2_prs_hw_write(priv, &pe);
1874 /* Default IPv6 entry for unknown protocols */
1875 memset(&pe, 0, sizeof(pe));
1876 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1877 pe.index = MVPP2_PE_IP6_PROTO_UN;
1879 /* Finished: go to flowid generation */
1880 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1881 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1882 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1883 MVPP2_PRS_RI_L4_PROTO_MASK);
1884 /* Set L4 offset relatively to our current place */
1885 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1886 sizeof(struct ipv6hdr) - 4,
1887 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1889 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1890 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1891 /* Unmask all ports */
1892 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1894 /* Update shadow table and hw entry */
1895 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1896 mvpp2_prs_hw_write(priv, &pe);
1898 /* Default IPv6 entry for unknown ext protocols */
1899 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1900 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1901 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1903 /* Finished: go to flowid generation */
1904 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1905 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1906 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1907 MVPP2_PRS_RI_L4_PROTO_MASK);
1909 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1910 MVPP2_PRS_IPV6_EXT_AI_BIT);
1911 /* Unmask all ports */
1912 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1914 /* Update shadow table and hw entry */
1915 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1916 mvpp2_prs_hw_write(priv, &pe);
1918 /* Default IPv6 entry for unicast address */
1919 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1920 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1921 pe.index = MVPP2_PE_IP6_ADDR_UN;
1923 /* Finished: go to IPv6 again */
1924 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1925 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1926 MVPP2_PRS_RI_L3_ADDR_MASK);
1927 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1928 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1929 /* Shift back to IPV6 NH */
1930 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1932 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1933 /* Unmask all ports */
1934 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1936 /* Update shadow table and hw entry */
1937 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1938 mvpp2_prs_hw_write(priv, &pe);
1943 /* Find tcam entry with matched pair <vid,port> */
1944 static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
1946 unsigned char byte[2], enable[2];
1947 struct mvpp2_prs_entry pe;
1951 /* Go through the all entries with MVPP2_PRS_LU_VID */
1952 for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
1953 tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
1954 if (!port->priv->prs_shadow[tid].valid ||
1955 port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
1958 mvpp2_prs_init_from_hw(port->priv, &pe, tid);
1960 mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
1961 mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
1963 rvid = ((byte[0] & 0xf) << 8) + byte[1];
1964 rmask = ((enable[0] & 0xf) << 8) + enable[1];
1966 if (rvid != vid || rmask != mask)
1975 /* Write parser entry for VID filtering */
1976 int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
1978 unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
1979 port->id * MVPP2_PRS_VLAN_FILT_MAX;
1980 unsigned int mask = 0xfff, reg_val, shift;
1981 struct mvpp2 *priv = port->priv;
1982 struct mvpp2_prs_entry pe;
1985 memset(&pe, 0, sizeof(pe));
1987 /* Scan TCAM and see if entry with this <vid,port> already exist */
1988 tid = mvpp2_prs_vid_range_find(port, vid, mask);
1990 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
1991 if (reg_val & MVPP2_DSA_EXTENDED)
1992 shift = MVPP2_VLAN_TAG_EDSA_LEN;
1994 shift = MVPP2_VLAN_TAG_LEN;
1999 /* Go through all entries from first to last in vlan range */
2000 tid = mvpp2_prs_tcam_first_free(priv, vid_start,
2002 MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
2004 /* There isn't room for a new VID filter */
2008 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2011 /* Mask all ports */
2012 mvpp2_prs_tcam_port_map_set(&pe, 0);
2014 mvpp2_prs_init_from_hw(priv, &pe, tid);
2017 /* Enable the current port */
2018 mvpp2_prs_tcam_port_set(&pe, port->id, true);
2020 /* Continue - set next lookup */
2021 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2023 /* Skip VLAN header - Set offset to 4 or 8 bytes */
2024 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2026 /* Set match on VID */
2027 mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
2029 /* Clear all ai bits for next iteration */
2030 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2032 /* Update shadow table */
2033 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2034 mvpp2_prs_hw_write(priv, &pe);
2039 /* Write parser entry for VID filtering */
2040 void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
2042 struct mvpp2 *priv = port->priv;
2045 /* Scan TCAM and see if entry with this <vid,port> already exist */
2046 tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
2052 mvpp2_prs_hw_inv(priv, tid);
2053 priv->prs_shadow[tid].valid = false;
2056 /* Remove all existing VID filters on this port */
2057 void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
2059 struct mvpp2 *priv = port->priv;
2062 for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
2063 tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
2064 if (priv->prs_shadow[tid].valid) {
2065 mvpp2_prs_hw_inv(priv, tid);
2066 priv->prs_shadow[tid].valid = false;
2071 /* Remove VID filering entry for this port */
2072 void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
2074 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2075 struct mvpp2 *priv = port->priv;
2077 /* Invalidate the guard entry */
2078 mvpp2_prs_hw_inv(priv, tid);
2080 priv->prs_shadow[tid].valid = false;
2083 /* Add guard entry that drops packets when no VID is matched on this port */
2084 void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
2086 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2087 struct mvpp2 *priv = port->priv;
2088 unsigned int reg_val, shift;
2089 struct mvpp2_prs_entry pe;
2091 if (priv->prs_shadow[tid].valid)
2094 memset(&pe, 0, sizeof(pe));
2098 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
2099 if (reg_val & MVPP2_DSA_EXTENDED)
2100 shift = MVPP2_VLAN_TAG_EDSA_LEN;
2102 shift = MVPP2_VLAN_TAG_LEN;
2104 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2106 /* Mask all ports */
2107 mvpp2_prs_tcam_port_map_set(&pe, 0);
2109 /* Update port mask */
2110 mvpp2_prs_tcam_port_set(&pe, port->id, true);
2112 /* Continue - set next lookup */
2113 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2115 /* Skip VLAN header - Set offset to 4 or 8 bytes */
2116 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2118 /* Drop VLAN packets that don't belong to any VIDs on this port */
2119 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2120 MVPP2_PRS_RI_DROP_MASK);
2122 /* Clear all ai bits for next iteration */
2123 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2125 /* Update shadow table */
2126 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2127 mvpp2_prs_hw_write(priv, &pe);
2130 /* Parser default initialization */
2131 int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
2135 /* Enable tcam table */
2136 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2138 /* Clear all tcam and sram entries */
2139 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2140 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2141 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2142 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2144 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2145 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2146 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2149 /* Invalidate all tcam entries */
2150 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2151 mvpp2_prs_hw_inv(priv, index);
2153 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2154 sizeof(*priv->prs_shadow),
2156 if (!priv->prs_shadow)
2159 /* Always start from lookup = 0 */
2160 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2161 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2162 MVPP2_PRS_PORT_LU_MAX, 0);
2164 mvpp2_prs_def_flow_init(priv);
2166 mvpp2_prs_mh_init(priv);
2168 mvpp2_prs_mac_init(priv);
2170 mvpp2_prs_dsa_init(priv);
2172 mvpp2_prs_vid_init(priv);
2174 err = mvpp2_prs_etype_init(priv);
2178 err = mvpp2_prs_vlan_init(pdev, priv);
2182 err = mvpp2_prs_pppoe_init(priv);
2186 err = mvpp2_prs_ip6_init(priv);
2190 err = mvpp2_prs_ip4_init(priv);
2197 /* Compare MAC DA with tcam entry data */
2198 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2199 const u8 *da, unsigned char *mask)
2201 unsigned char tcam_byte, tcam_mask;
2204 for (index = 0; index < ETH_ALEN; index++) {
2205 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2206 if (tcam_mask != mask[index])
2209 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2216 /* Find tcam entry with matched pair <MAC DA, port> */
2218 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2219 unsigned char *mask, int udf_type)
2221 struct mvpp2_prs_entry pe;
2224 /* Go through the all entires with MVPP2_PRS_LU_MAC */
2225 for (tid = MVPP2_PE_MAC_RANGE_START;
2226 tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2227 unsigned int entry_pmap;
2229 if (!priv->prs_shadow[tid].valid ||
2230 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2231 (priv->prs_shadow[tid].udf != udf_type))
2234 mvpp2_prs_init_from_hw(priv, &pe, tid);
2235 entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
2237 if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
2245 /* Update parser's mac da entry */
2246 int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
2248 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2249 struct mvpp2 *priv = port->priv;
2250 unsigned int pmap, len, ri;
2251 struct mvpp2_prs_entry pe;
2254 memset(&pe, 0, sizeof(pe));
2256 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2257 tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
2258 MVPP2_PRS_UDF_MAC_DEF);
2265 /* Create new TCAM entry */
2266 /* Go through the all entries from first to last */
2267 tid = mvpp2_prs_tcam_first_free(priv,
2268 MVPP2_PE_MAC_RANGE_START,
2269 MVPP2_PE_MAC_RANGE_END);
2275 /* Mask all ports */
2276 mvpp2_prs_tcam_port_map_set(&pe, 0);
2278 mvpp2_prs_init_from_hw(priv, &pe, tid);
2281 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2283 /* Update port mask */
2284 mvpp2_prs_tcam_port_set(&pe, port->id, add);
2286 /* Invalidate the entry if no ports are left enabled */
2287 pmap = mvpp2_prs_tcam_port_map_get(&pe);
2292 mvpp2_prs_hw_inv(priv, pe.index);
2293 priv->prs_shadow[pe.index].valid = false;
2297 /* Continue - set next lookup */
2298 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2300 /* Set match on DA */
2303 mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
2305 /* Set result info bits */
2306 if (is_broadcast_ether_addr(da)) {
2307 ri = MVPP2_PRS_RI_L2_BCAST;
2308 } else if (is_multicast_ether_addr(da)) {
2309 ri = MVPP2_PRS_RI_L2_MCAST;
2311 ri = MVPP2_PRS_RI_L2_UCAST;
2313 if (ether_addr_equal(da, port->dev->dev_addr))
2314 ri |= MVPP2_PRS_RI_MAC_ME_MASK;
2317 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2318 MVPP2_PRS_RI_MAC_ME_MASK);
2319 mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2320 MVPP2_PRS_RI_MAC_ME_MASK);
2322 /* Shift to ethertype */
2323 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2324 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2326 /* Update shadow table and hw entry */
2327 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
2328 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2329 mvpp2_prs_hw_write(priv, &pe);
2334 int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
2336 struct mvpp2_port *port = netdev_priv(dev);
2339 /* Remove old parser entry */
2340 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
2344 /* Add new parser entry */
2345 err = mvpp2_prs_mac_da_accept(port, da, true);
2349 /* Set addr in the device */
2350 ether_addr_copy(dev->dev_addr, da);
2355 void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
2357 struct mvpp2 *priv = port->priv;
2358 struct mvpp2_prs_entry pe;
2362 for (tid = MVPP2_PE_MAC_RANGE_START;
2363 tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2364 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
2366 if (!priv->prs_shadow[tid].valid ||
2367 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2368 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
2371 mvpp2_prs_init_from_hw(priv, &pe, tid);
2373 pmap = mvpp2_prs_tcam_port_map_get(&pe);
2375 /* We only want entries active on this port */
2376 if (!test_bit(port->id, &pmap))
2379 /* Read mac addr from entry */
2380 for (index = 0; index < ETH_ALEN; index++)
2381 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
2384 /* Special cases : Don't remove broadcast and port's own
2387 if (is_broadcast_ether_addr(da) ||
2388 ether_addr_equal(da, port->dev->dev_addr))
2391 /* Remove entry from TCAM */
2392 mvpp2_prs_mac_da_accept(port, da, false);
2396 int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
2399 case MVPP2_TAG_TYPE_EDSA:
2400 /* Add port to EDSA entries */
2401 mvpp2_prs_dsa_tag_set(priv, port, true,
2402 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2403 mvpp2_prs_dsa_tag_set(priv, port, true,
2404 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2405 /* Remove port from DSA entries */
2406 mvpp2_prs_dsa_tag_set(priv, port, false,
2407 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2408 mvpp2_prs_dsa_tag_set(priv, port, false,
2409 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2412 case MVPP2_TAG_TYPE_DSA:
2413 /* Add port to DSA entries */
2414 mvpp2_prs_dsa_tag_set(priv, port, true,
2415 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2416 mvpp2_prs_dsa_tag_set(priv, port, true,
2417 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2418 /* Remove port from EDSA entries */
2419 mvpp2_prs_dsa_tag_set(priv, port, false,
2420 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2421 mvpp2_prs_dsa_tag_set(priv, port, false,
2422 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2425 case MVPP2_TAG_TYPE_MH:
2426 case MVPP2_TAG_TYPE_NONE:
2427 /* Remove port form EDSA and DSA entries */
2428 mvpp2_prs_dsa_tag_set(priv, port, false,
2429 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2430 mvpp2_prs_dsa_tag_set(priv, port, false,
2431 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2432 mvpp2_prs_dsa_tag_set(priv, port, false,
2433 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2434 mvpp2_prs_dsa_tag_set(priv, port, false,
2435 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2439 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
2446 int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
2448 struct mvpp2_prs_entry pe;
2449 u8 *ri_byte, *ri_byte_mask;
2452 memset(&pe, 0, sizeof(pe));
2454 tid = mvpp2_prs_tcam_first_free(priv,
2455 MVPP2_PE_LAST_FREE_TID,
2456 MVPP2_PE_FIRST_FREE_TID);
2462 ri_byte = (u8 *)&ri;
2463 ri_byte_mask = (u8 *)&ri_mask;
2465 mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK);
2466 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2468 for (i = 0; i < 4; i++) {
2469 mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i],
2473 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2474 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2475 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2476 mvpp2_prs_hw_write(priv, &pe);
2481 /* Set prs flow for the port */
2482 int mvpp2_prs_def_flow(struct mvpp2_port *port)
2484 struct mvpp2_prs_entry pe;
2487 memset(&pe, 0, sizeof(pe));
2489 tid = mvpp2_prs_flow_find(port->priv, port->id);
2491 /* Such entry not exist */
2493 /* Go through the all entires from last to first */
2494 tid = mvpp2_prs_tcam_first_free(port->priv,
2495 MVPP2_PE_LAST_FREE_TID,
2496 MVPP2_PE_FIRST_FREE_TID);
2503 mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2504 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2506 /* Update shadow table */
2507 mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
2509 mvpp2_prs_init_from_hw(port->priv, &pe, tid);
2512 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2513 mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
2514 mvpp2_prs_hw_write(port->priv, &pe);
2519 int mvpp2_prs_hits(struct mvpp2 *priv, int index)
2523 if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
2526 mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
2528 val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
2530 val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;