1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2022 Marvell.
8 #include <linux/bitfield.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/firmware.h>
12 #include <linux/stddef.h>
13 #include <linux/debugfs.h>
15 #include "rvu_struct.h"
20 #include "rvu_npc_fs.h"
21 #include "rvu_npc_hash.h"
23 static u64 rvu_npc_wide_extract(const u64 input[], size_t start_bit,
26 const u64 mask = ~(u64)((~(__uint128_t)0) << width_bits);
27 const size_t msb = start_bit + width_bits - 1;
28 const size_t lword = start_bit >> 6;
29 const size_t uword = msb >> 6;
34 return (input[lword] >> (start_bit & 63)) & mask;
36 lbits = 64 - (start_bit & 63);
38 lo = (input[lword] >> (start_bit & 63));
39 return ((hi << lbits) | lo) & mask;
42 static void rvu_npc_lshift_key(u64 *key, size_t key_bit_len)
44 u64 prev_orig_word = 0;
45 u64 cur_orig_word = 0;
46 size_t extra = key_bit_len % 64;
47 size_t max_idx = key_bit_len / 64;
53 for (i = 0; i < max_idx; i++) {
54 cur_orig_word = key[i];
56 key[i] |= ((prev_orig_word >> 63) & 0x1);
57 prev_orig_word = cur_orig_word;
61 static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len,
68 for (i = data_bit_len - 1; i >= 0; i--) {
69 temp_data = (data[i / 64]);
70 temp_data = temp_data >> (i % 64);
73 hash_out ^= (u32)(rvu_npc_wide_extract(key, key_bit_len - 32, 32));
75 rvu_npc_lshift_key(key, key_bit_len);
81 u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp,
88 hash_key[0] = rsp.secret_key[1] << 31;
89 hash_key[0] |= rsp.secret_key[2];
90 hash_key[1] = rsp.secret_key[1] >> 33;
91 hash_key[1] |= rsp.secret_key[0] << 31;
92 hash_key[2] = rsp.secret_key[0] >> 33;
94 data_padded[0] = rsp.hash_mask[intf][hash_idx][0] & ldata[0];
95 data_padded[1] = rsp.hash_mask[intf][hash_idx][1] & ldata[1];
96 field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159);
98 field_hash &= FIELD_GET(GENMASK(63, 32), rsp.hash_ctrl[intf][hash_idx]);
99 field_hash += FIELD_GET(GENMASK(31, 0), rsp.hash_ctrl[intf][hash_idx]);
103 static u64 npc_update_use_hash(struct rvu *rvu, int blkaddr,
104 u8 intf, int lid, int lt, int ld)
109 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld));
110 hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
111 key = FIELD_GET(NPC_KEY_OFFSET, cfg);
113 /* Update use_hash(bit-20) to 'true' and
114 * bytesm1(bit-16:19) to '0x3' in KEX_LD_CFG
116 cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
122 static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
125 struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
126 int lid, lt, ld, hash_cnt = 0;
128 if (is_npc_intf_tx(intf))
131 /* Program HASH_CFG */
132 for (lid = 0; lid < NPC_MAX_LID; lid++) {
133 for (lt = 0; lt < NPC_MAX_LT; lt++) {
134 for (ld = 0; ld < NPC_MAX_LD; ld++) {
135 if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
138 if (hash_cnt == NPC_MAX_HASH)
141 cfg = npc_update_use_hash(rvu, blkaddr,
143 /* Set updated KEX configuration */
144 SET_KEX_LD(intf, lid, lt, ld, cfg);
145 /* Set HASH configuration */
146 SET_KEX_LD_HASH(intf, ld,
147 mkex_hash->hash[intf][ld]);
148 SET_KEX_LD_HASH_MASK(intf, ld, 0,
149 mkex_hash->hash_mask[intf][ld][0]);
150 SET_KEX_LD_HASH_MASK(intf, ld, 1,
151 mkex_hash->hash_mask[intf][ld][1]);
152 SET_KEX_LD_HASH_CTRL(intf, ld,
153 mkex_hash->hash_ctrl[intf][ld]);
162 static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
165 struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
166 int lid, lt, ld, hash_cnt = 0;
168 if (is_npc_intf_rx(intf))
171 /* Program HASH_CFG */
172 for (lid = 0; lid < NPC_MAX_LID; lid++) {
173 for (lt = 0; lt < NPC_MAX_LT; lt++) {
174 for (ld = 0; ld < NPC_MAX_LD; ld++)
175 if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
178 if (hash_cnt == NPC_MAX_HASH)
181 cfg = npc_update_use_hash(rvu, blkaddr,
183 /* Set updated KEX configuration */
184 SET_KEX_LD(intf, lid, lt, ld, cfg);
185 /* Set HASH configuration */
186 SET_KEX_LD_HASH(intf, ld,
187 mkex_hash->hash[intf][ld]);
188 SET_KEX_LD_HASH_MASK(intf, ld, 0,
189 mkex_hash->hash_mask[intf][ld][0]);
190 SET_KEX_LD_HASH_MASK(intf, ld, 1,
191 mkex_hash->hash_mask[intf][ld][1]);
192 SET_KEX_LD_HASH_CTRL(intf, ld,
193 mkex_hash->hash_ctrl[intf][ld]);
200 void npc_config_secret_key(struct rvu *rvu, int blkaddr)
202 struct hw_cap *hwcap = &rvu->hw->cap;
203 struct rvu_hwinfo *hw = rvu->hw;
206 if (!hwcap->npc_hash_extract)
209 for (intf = 0; intf < hw->npc_intfs; intf++) {
210 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf),
211 RVU_NPC_HASH_SECRET_KEY0);
212 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf),
213 RVU_NPC_HASH_SECRET_KEY1);
214 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf),
215 RVU_NPC_HASH_SECRET_KEY2);
219 void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
221 struct npc_mcam_kex_hash *mh = rvu->kpu.mkex_hash;
222 struct hw_cap *hwcap = &rvu->hw->cap;
223 u8 intf, ld, hdr_offset, byte_len;
224 struct rvu_hwinfo *hw = rvu->hw;
227 /* Check if hardware supports hash extraction */
228 if (!hwcap->npc_hash_extract)
231 /* Check if IPv6 source/destination address
232 * should be hash enabled.
233 * Hashing reduces 128bit SIP/DIP fields to 32bit
234 * so that 224 bit X2 key can be used for IPv6 based filters as well,
235 * which in turn results in more number of MCAM entries available for
238 * Hashing of IPV6 SIP/DIP is enabled in below scenarios
239 * 1. If the silicon variant supports hashing feature
240 * 2. If the number of bytes of IP addr being extracted is 4 bytes ie
241 * 32bit. The assumption here is that if user wants 8bytes of LSB of
242 * IP addr or full 16 bytes then his intention is not to use 32bit
245 for (intf = 0; intf < hw->npc_intfs; intf++) {
246 for (ld = 0; ld < NPC_MAX_LD; ld++) {
247 cfg = rvu_read64(rvu, blkaddr,
248 NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf,
252 hdr_offset = FIELD_GET(NPC_HDR_OFFSET, cfg);
253 byte_len = FIELD_GET(NPC_BYTESM, cfg);
254 /* Hashing of IPv6 source/destination address should be
256 * hdr_offset == 8 (offset of source IPv6 address) or
257 * hdr_offset == 24 (offset of destination IPv6)
258 * address) and the number of byte to be
259 * extracted is 4. As per hardware configuration
260 * byte_len should be == actual byte_len - 1.
261 * Hence byte_len is checked against 3 but nor 4.
263 if ((hdr_offset == 8 || hdr_offset == 24) && byte_len == 3)
264 mh->lid_lt_ld_hash_en[intf][NPC_LID_LC][NPC_LT_LC_IP6][ld] = true;
268 /* Update hash configuration if the field is hash enabled */
269 for (intf = 0; intf < hw->npc_intfs; intf++) {
270 npc_program_mkex_hash_rx(rvu, blkaddr, intf);
271 npc_program_mkex_hash_tx(rvu, blkaddr, intf);
275 void npc_update_field_hash(struct rvu *rvu, u8 intf,
276 struct mcam_entry *entry,
279 struct flow_msg *pkt,
280 struct flow_msg *mask,
281 struct flow_msg *opkt,
282 struct flow_msg *omask)
284 struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
285 struct npc_get_field_hash_info_req req;
286 struct npc_get_field_hash_info_rsp rsp;
291 if (!rvu->hw->cap.npc_hash_extract) {
292 dev_dbg(rvu->dev, "%s: Field hash extract feature is not supported\n", __func__);
297 rvu_mbox_handler_npc_get_field_hash_info(rvu, &req, &rsp);
299 for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) {
300 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx));
301 if ((cfg & BIT_ULL(11)) && (cfg & BIT_ULL(12))) {
302 u8 lid = (cfg & GENMASK_ULL(10, 8)) >> 8;
303 u8 ltype = (cfg & GENMASK_ULL(7, 4)) >> 4;
304 u8 ltype_mask = cfg & GENMASK_ULL(3, 0);
306 if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) {
307 switch (ltype & ltype_mask) {
308 /* If hash extract enabled is supported for IPv6 then
309 * 128 bit IPv6 source and destination addressed
310 * is hashed to 32 bit value.
313 /* ld[0] == hash_idx[0] == Source IPv6
314 * ld[1] == hash_idx[1] == Destination IPv6
316 if ((features & BIT_ULL(NPC_SIP_IPV6)) && !hash_idx) {
317 u32 src_ip[IPV6_WORDS];
319 be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
320 ldata[1] = (u64)src_ip[0] << 32 | src_ip[1];
321 ldata[0] = (u64)src_ip[2] << 32 | src_ip[3];
322 field_hash = npc_field_hash_calc(ldata,
326 npc_update_entry(rvu, NPC_SIP_IPV6, entry,
328 GENMASK(31, 0), 0, intf);
329 memcpy(&opkt->ip6src, &pkt->ip6src,
330 sizeof(pkt->ip6src));
331 memcpy(&omask->ip6src, &mask->ip6src,
332 sizeof(mask->ip6src));
333 } else if ((features & BIT_ULL(NPC_DIP_IPV6)) && hash_idx) {
334 u32 dst_ip[IPV6_WORDS];
336 be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
337 ldata[1] = (u64)dst_ip[0] << 32 | dst_ip[1];
338 ldata[0] = (u64)dst_ip[2] << 32 | dst_ip[3];
339 field_hash = npc_field_hash_calc(ldata,
343 npc_update_entry(rvu, NPC_DIP_IPV6, entry,
345 GENMASK(31, 0), 0, intf);
346 memcpy(&opkt->ip6dst, &pkt->ip6dst,
347 sizeof(pkt->ip6dst));
348 memcpy(&omask->ip6dst, &mask->ip6dst,
349 sizeof(mask->ip6dst));
359 int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu,
360 struct npc_get_field_hash_info_req *req,
361 struct npc_get_field_hash_info_rsp *rsp)
363 u64 *secret_key = rsp->secret_key;
367 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
369 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
373 secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf));
374 secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf));
375 secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf));
377 for (i = 0; i < NPC_MAX_HASH; i++) {
378 for (j = 0; j < NPC_MAX_HASH_MASK; j++) {
379 rsp->hash_mask[NIX_INTF_RX][i][j] =
380 GET_KEX_LD_HASH_MASK(NIX_INTF_RX, i, j);
381 rsp->hash_mask[NIX_INTF_TX][i][j] =
382 GET_KEX_LD_HASH_MASK(NIX_INTF_TX, i, j);
386 for (i = 0; i < NPC_MAX_INTF; i++)
387 for (j = 0; j < NPC_MAX_HASH; j++)
388 rsp->hash_ctrl[i][j] = GET_KEX_LD_HASH_CTRL(i, j);
394 * rvu_exact_prepare_mdata - Make mdata for mcam entry
396 * @chan: Channel number.
397 * @ctype: Channel Type.
401 static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask)
403 u64 ldata = ether_addr_to_u64(mac);
405 /* Please note that mask is 48bit which excludes chan and ctype.
406 * Increase mask bits if we need to include them as well.
408 ldata |= ((u64)chan << 48);
409 ldata |= ((u64)ctype << 60);
417 * rvu_exact_calculate_hash - calculate hash index to mem table.
418 * @rvu: resource virtualization unit.
419 * @chan: Channel number
420 * @ctype: Channel type.
423 * @table_depth: Depth of table.
426 static u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac,
427 u64 mask, u32 table_depth)
429 struct npc_exact_table *table = rvu->hw->table;
435 key_in[0] = RVU_NPC_HASH_SECRET_KEY0;
436 key_in[1] = RVU_NPC_HASH_SECRET_KEY2;
438 hash_key[0] = key_in[0] << 31;
439 hash_key[0] |= key_in[1];
440 hash_key[1] = key_in[0] >> 33;
442 ldata = rvu_exact_prepare_mdata(mac, chan, ctype, mask);
444 dev_dbg(rvu->dev, "%s: ldata=0x%llx hash_key0=0x%llx hash_key2=0x%llx\n", __func__,
445 ldata, hash_key[1], hash_key[0]);
446 hash = rvu_npc_toeplitz_hash(&ldata, (u64 *)hash_key, 64, 95);
448 hash &= table->mem_table.hash_mask;
449 hash += table->mem_table.hash_offset;
450 dev_dbg(rvu->dev, "%s: hash=%x\n", __func__, hash);
456 * rvu_npc_exact_alloc_mem_table_entry - find free entry in 4 way table.
457 * @rvu: resource virtualization unit.
458 * @way: Indicate way to table.
459 * @index: Hash index to 4 way table.
462 * Searches 4 way table using hash index. Returns 0 on success.
463 * Return: 0 upon success.
465 static int rvu_npc_exact_alloc_mem_table_entry(struct rvu *rvu, u8 *way,
466 u32 *index, unsigned int hash)
468 struct npc_exact_table *table;
471 table = rvu->hw->table;
472 depth = table->mem_table.depth;
474 /* Check all the 4 ways for a free slot. */
475 mutex_lock(&table->lock);
476 for (i = 0; i < table->mem_table.ways; i++) {
477 if (test_bit(hash + i * depth, table->mem_table.bmap))
480 set_bit(hash + i * depth, table->mem_table.bmap);
481 mutex_unlock(&table->lock);
483 dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n",
490 mutex_unlock(&table->lock);
492 dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__,
493 bitmap_weight(table->mem_table.bmap, table->mem_table.depth));
498 * rvu_npc_exact_free_id - Free seq id from bitmat.
499 * @rvu: Resource virtualization unit.
500 * @seq_id: Sequence identifier to be freed.
502 static void rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id)
504 struct npc_exact_table *table;
506 table = rvu->hw->table;
507 mutex_lock(&table->lock);
508 clear_bit(seq_id, table->id_bmap);
509 mutex_unlock(&table->lock);
510 dev_dbg(rvu->dev, "%s: freed id %d\n", __func__, seq_id);
514 * rvu_npc_exact_alloc_id - Alloc seq id from bitmap.
515 * @rvu: Resource virtualization unit.
516 * @seq_id: Sequence identifier.
517 * Return: True or false.
519 static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id)
521 struct npc_exact_table *table;
524 table = rvu->hw->table;
526 mutex_lock(&table->lock);
527 idx = find_first_zero_bit(table->id_bmap, table->tot_ids);
528 if (idx == table->tot_ids) {
529 mutex_unlock(&table->lock);
530 dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n",
531 __func__, table->tot_ids);
536 /* Mark bit map to indicate that slot is used.*/
537 set_bit(idx, table->id_bmap);
538 mutex_unlock(&table->lock);
541 dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id);
547 * rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table.
548 * @rvu: resource virtualization unit.
549 * @index: Index to exact CAM table.
550 * Return: 0 upon success; else error number.
552 static int rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index)
554 struct npc_exact_table *table;
557 table = rvu->hw->table;
559 mutex_lock(&table->lock);
560 idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth);
561 if (idx == table->cam_table.depth) {
562 mutex_unlock(&table->lock);
563 dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__,
564 bitmap_weight(table->cam_table.bmap, table->cam_table.depth));
568 /* Mark bit map to indicate that slot is used.*/
569 set_bit(idx, table->cam_table.bmap);
570 mutex_unlock(&table->lock);
573 dev_dbg(rvu->dev, "%s: cam table entry alloc success (index=%d)\n",
579 * rvu_exact_prepare_table_entry - Data for exact match table entry.
580 * @rvu: Resource virtualization unit.
581 * @enable: Enable/Disable entry
582 * @ctype: Software defined channel type. Currently set as 0.
583 * @chan: Channel number.
584 * @mac_addr: Destination mac address.
585 * Return: mdata for exact match table.
587 static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable,
588 u8 ctype, u16 chan, u8 *mac_addr)
591 u64 ldata = ether_addr_to_u64(mac_addr);
593 /* Enable or disable */
594 u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0);
597 mdata |= FIELD_PREP(GENMASK_ULL(61, 60), ctype);
600 mdata |= FIELD_PREP(GENMASK_ULL(59, 48), chan);
603 mdata |= FIELD_PREP(GENMASK_ULL(47, 0), ldata);
609 * rvu_exact_config_secret_key - Configure secret key.
610 * @rvu: Resource virtualization unit.
612 static void rvu_exact_config_secret_key(struct rvu *rvu)
616 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
617 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET0(NIX_INTF_RX),
618 RVU_NPC_HASH_SECRET_KEY0);
620 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET1(NIX_INTF_RX),
621 RVU_NPC_HASH_SECRET_KEY1);
623 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET2(NIX_INTF_RX),
624 RVU_NPC_HASH_SECRET_KEY2);
628 * rvu_exact_config_search_key - Configure search key
629 * @rvu: Resource virtualization unit.
631 static void rvu_exact_config_search_key(struct rvu *rvu)
636 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
639 reg_val = FIELD_PREP(GENMASK_ULL(39, 32), 0);
641 /* BYTESM1, number of bytes - 1 */
642 reg_val |= FIELD_PREP(GENMASK_ULL(18, 16), ETH_ALEN - 1);
644 /* Enable LID and set LID to NPC_LID_LA */
645 reg_val |= FIELD_PREP(GENMASK_ULL(11, 11), 1);
646 reg_val |= FIELD_PREP(GENMASK_ULL(10, 8), NPC_LID_LA);
648 /* Clear layer type based extraction */
651 reg_val |= FIELD_PREP(GENMASK_ULL(12, 12), 0);
653 /* Set LTYPE_MATCH to 0 */
654 reg_val |= FIELD_PREP(GENMASK_ULL(7, 4), 0);
656 /* Set LTYPE_MASK to 0 */
657 reg_val |= FIELD_PREP(GENMASK_ULL(3, 0), 0);
659 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_CFG(NIX_INTF_RX), reg_val);
663 * rvu_exact_config_result_ctrl - Set exact table hash control
664 * @rvu: Resource virtualization unit.
665 * @depth: Depth of Exact match table.
667 * Sets mask and offset for hash for mem table.
669 static void rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth)
674 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
676 /* Set mask. Note that depth is a power of 2 */
677 rvu->hw->table->mem_table.hash_mask = (depth - 1);
678 reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1));
680 /* Set offset as 0 */
681 rvu->hw->table->mem_table.hash_offset = 0;
682 reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0);
685 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg);
686 /* Store hash mask and offset for s/w algorithm */
690 * rvu_exact_config_table_mask - Set exact table mask.
691 * @rvu: Resource virtualization unit.
693 static void rvu_exact_config_table_mask(struct rvu *rvu)
698 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
700 /* Don't use Ctype */
701 mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0);
704 mask |= GENMASK_ULL(59, 48);
707 mask |= GENMASK_ULL(47, 0);
709 /* Store mask for s/w hash calcualtion */
710 rvu->hw->table->mem_table.mask = mask;
712 /* Set mask for RX.*/
713 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask);
717 * rvu_npc_exact_get_max_entries - Get total number of entries in table.
718 * @rvu: resource virtualization unit.
719 * Return: Maximum table entries possible.
721 u32 rvu_npc_exact_get_max_entries(struct rvu *rvu)
723 struct npc_exact_table *table;
725 table = rvu->hw->table;
726 return table->tot_ids;
730 * rvu_npc_exact_has_match_table - Checks support for exact match.
731 * @rvu: resource virtualization unit.
732 * Return: True if exact match table is supported/enabled.
734 bool rvu_npc_exact_has_match_table(struct rvu *rvu)
736 return rvu->hw->cap.npc_exact_match_enabled;
740 * __rvu_npc_exact_find_entry_by_seq_id - find entry by id
741 * @rvu: resource virtualization unit.
742 * @seq_id: Sequence identifier.
744 * Caller should acquire the lock.
745 * Return: Pointer to table entry.
747 static struct npc_exact_table_entry *
748 __rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id)
750 struct npc_exact_table *table = rvu->hw->table;
751 struct npc_exact_table_entry *entry = NULL;
752 struct list_head *lhead;
754 lhead = &table->lhead_gbl;
756 /* traverse to find the matching entry */
757 list_for_each_entry(entry, lhead, glist) {
758 if (entry->seq_id != seq_id)
768 * rvu_npc_exact_add_to_list - Add entry to list
769 * @rvu: resource virtualization unit.
770 * @opc_type: OPCODE to select MEM/CAM table.
771 * @ways: MEM table ways.
772 * @index: Index in MEM/CAM table.
773 * @cgx_id: CGX identifier.
774 * @lmac_id: LMAC identifier.
775 * @mac_addr: MAC address.
776 * @chan: Channel number.
777 * @ctype: Channel Type.
778 * @seq_id: Sequence identifier
779 * @cmd: True if function is called by ethtool cmd
780 * @mcam_idx: NPC mcam index of DMAC entry in NPC mcam.
781 * @pcifunc: pci function
782 * Return: 0 upon success.
784 static int rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways,
785 u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan,
786 u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc)
788 struct npc_exact_table_entry *entry, *tmp, *iter;
789 struct npc_exact_table *table = rvu->hw->table;
790 struct list_head *lhead, *pprev;
792 WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS);
794 if (!rvu_npc_exact_alloc_id(rvu, seq_id)) {
795 dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__);
799 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
801 rvu_npc_exact_free_id(rvu, *seq_id);
802 dev_err(rvu->dev, "%s: Memory allocation failed\n", __func__);
806 mutex_lock(&table->lock);
808 case NPC_EXACT_OPC_CAM:
809 lhead = &table->lhead_cam_tbl_entry;
810 table->cam_tbl_entry_cnt++;
813 case NPC_EXACT_OPC_MEM:
814 lhead = &table->lhead_mem_tbl_entry[ways];
815 table->mem_tbl_entry_cnt++;
819 mutex_unlock(&table->lock);
821 rvu_npc_exact_free_id(rvu, *seq_id);
823 dev_err(rvu->dev, "%s: Unknown opc type%d\n", __func__, opc_type);
827 /* Add to global list */
828 INIT_LIST_HEAD(&entry->glist);
829 list_add_tail(&entry->glist, &table->lhead_gbl);
830 INIT_LIST_HEAD(&entry->list);
831 entry->index = index;
833 entry->opc_type = opc_type;
835 entry->pcifunc = pcifunc;
837 ether_addr_copy(entry->mac, mac_addr);
839 entry->ctype = ctype;
840 entry->cgx_id = cgx_id;
841 entry->lmac_id = lmac_id;
843 entry->seq_id = *seq_id;
845 entry->mcam_idx = mcam_idx;
850 /* Insert entry in ascending order of index */
851 list_for_each_entry_safe(iter, tmp, lhead, list) {
852 if (index < iter->index)
858 /* Add to each table list */
859 list_add(&entry->list, pprev);
860 mutex_unlock(&table->lock);
865 * rvu_npc_exact_mem_table_write - Wrapper for register write
866 * @rvu: resource virtualization unit.
867 * @blkaddr: Block address
868 * @ways: ways for MEM table.
869 * @index: Index in MEM
870 * @mdata: Meta data to be written to register.
872 static void rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways,
873 u32 index, u64 mdata)
875 rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata);
879 * rvu_npc_exact_cam_table_write - Wrapper for register write
880 * @rvu: resource virtualization unit.
881 * @blkaddr: Block address
882 * @index: Index in MEM
883 * @mdata: Meta data to be written to register.
885 static void rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr,
886 u32 index, u64 mdata)
888 rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata);
892 * rvu_npc_exact_dealloc_table_entry - dealloc table entry
893 * @rvu: resource virtualization unit.
894 * @opc_type: OPCODE for selection of table(MEM or CAM)
895 * @ways: ways if opc_type is MEM table.
896 * @index: Index of MEM or CAM table.
897 * Return: 0 upon success.
899 static int rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type,
902 int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
903 struct npc_exact_table *table;
904 u8 null_dmac[6] = { 0 };
907 /* Prepare entry with all fields set to zero */
908 u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac);
910 table = rvu->hw->table;
911 depth = table->mem_table.depth;
913 mutex_lock(&table->lock);
916 case NPC_EXACT_OPC_CAM:
918 /* Check whether entry is used already */
919 if (!test_bit(index, table->cam_table.bmap)) {
920 mutex_unlock(&table->lock);
921 dev_err(rvu->dev, "%s: Trying to free an unused entry ways=%d index=%d\n",
922 __func__, ways, index);
926 rvu_npc_exact_cam_table_write(rvu, blkaddr, index, null_mdata);
927 clear_bit(index, table->cam_table.bmap);
930 case NPC_EXACT_OPC_MEM:
932 /* Check whether entry is used already */
933 if (!test_bit(index + ways * depth, table->mem_table.bmap)) {
934 mutex_unlock(&table->lock);
935 dev_err(rvu->dev, "%s: Trying to free an unused entry index=%d\n",
940 rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, null_mdata);
941 clear_bit(index + ways * depth, table->mem_table.bmap);
945 mutex_unlock(&table->lock);
946 dev_err(rvu->dev, "%s: invalid opc type %d", __func__, opc_type);
950 mutex_unlock(&table->lock);
952 dev_dbg(rvu->dev, "%s: Successfully deleted entry (index=%d, ways=%d opc_type=%d\n",
953 __func__, index, ways, opc_type);
959 * rvu_npc_exact_alloc_table_entry - Allociate an entry
960 * @rvu: resource virtualization unit.
962 * @chan: Channel number.
963 * @ctype: Channel Type.
964 * @index: Index of MEM table or CAM table.
965 * @ways: Ways. Only valid for MEM table.
966 * @opc_type: OPCODE to select table (MEM or CAM)
968 * Try allocating a slot from MEM table. If all 4 ways
969 * slot are full for a hash index, check availability in
970 * 32-entry CAM table for allocation.
971 * Return: 0 upon success.
973 static int rvu_npc_exact_alloc_table_entry(struct rvu *rvu, char *mac, u16 chan, u8 ctype,
974 u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type)
976 struct npc_exact_table *table;
980 table = rvu->hw->table;
982 /* Check in 4-ways mem entry for free slote */
983 hash = rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask,
984 table->mem_table.depth);
985 err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash);
987 *opc_type = NPC_EXACT_OPC_MEM;
988 dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n",
989 __func__, *ways, *index);
993 dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__);
995 /* wayss is 0 for cam table */
997 err = rvu_npc_exact_alloc_cam_table_entry(rvu, index);
999 *opc_type = NPC_EXACT_OPC_CAM;
1000 dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n",
1005 dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__);
1010 * rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base.
1011 * @rvu: resource virtualization unit.
1012 * @drop_mcam_idx: Drop rule index in NPC mcam.
1013 * @chan_val: Channel value.
1014 * @chan_mask: Channel Mask.
1015 * @pcifunc: pcifunc of interface.
1016 * Return: True upon success.
1018 static bool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx,
1019 u64 chan_val, u64 chan_mask, u16 pcifunc)
1021 struct npc_exact_table *table;
1024 table = rvu->hw->table;
1026 for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
1027 if (!table->drop_rule_map[i].valid)
1030 if (table->drop_rule_map[i].chan_val != (u16)chan_val)
1033 if (table->drop_rule_map[i].chan_mask != (u16)chan_mask)
1039 if (i == NPC_MCAM_DROP_RULE_MAX)
1042 table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx;
1043 table->drop_rule_map[i].chan_val = (u16)chan_val;
1044 table->drop_rule_map[i].chan_mask = (u16)chan_mask;
1045 table->drop_rule_map[i].pcifunc = pcifunc;
1046 table->drop_rule_map[i].valid = true;
1051 * rvu_npc_exact_calc_drop_rule_chan_and_mask - Calculate Channel number and mask.
1052 * @rvu: resource virtualization unit.
1053 * @intf_type: Interface type (SDK, LBK or CGX)
1054 * @cgx_id: CGX identifier.
1055 * @lmac_id: LAMC identifier.
1056 * @val: Channel number.
1057 * @mask: Channel mask.
1058 * Return: True upon success.
1060 static bool rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu *rvu, u8 intf_type,
1061 u8 cgx_id, u8 lmac_id,
1062 u64 *val, u64 *mask)
1064 u16 chan_val, chan_mask;
1066 /* No support for SDP and LBK */
1067 if (intf_type != NIX_INTF_TYPE_CGX)
1070 chan_val = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
1083 * rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc
1084 * @rvu: resource virtualization unit.
1085 * @drop_rule_idx: Drop rule index in NPC mcam.
1087 * Debugfs (exact_drop_cnt) entry displays pcifunc for interface
1088 * by retrieving the pcifunc value from data base.
1089 * Return: Drop rule index.
1091 u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx)
1093 struct npc_exact_table *table;
1096 table = rvu->hw->table;
1098 for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
1099 if (!table->drop_rule_map[i].valid)
1102 if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx)
1105 return table->drop_rule_map[i].pcifunc;
1108 dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
1109 __func__, drop_rule_idx);
1114 * rvu_npc_exact_get_drop_rule_info - Get drop rule information.
1115 * @rvu: resource virtualization unit.
1116 * @intf_type: Interface type (CGX, SDP or LBK)
1117 * @cgx_id: CGX identifier.
1118 * @lmac_id: LMAC identifier.
1119 * @drop_mcam_idx: NPC mcam drop rule index.
1120 * @val: Channel value.
1121 * @mask: Channel mask.
1122 * @pcifunc: pcifunc of interface corresponding to the drop rule.
1123 * Return: True upon success.
1125 static bool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id,
1126 u8 lmac_id, u32 *drop_mcam_idx, u64 *val,
1127 u64 *mask, u16 *pcifunc)
1129 struct npc_exact_table *table;
1130 u64 chan_val, chan_mask;
1134 table = rvu->hw->table;
1136 if (intf_type != NIX_INTF_TYPE_CGX) {
1137 dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__);
1141 rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, intf_type, cgx_id,
1142 lmac_id, &chan_val, &chan_mask);
1146 for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
1147 if (!table->drop_rule_map[i].valid)
1150 if (table->drop_rule_map[i].chan_val != (u16)chan_val)
1154 *val = table->drop_rule_map[i].chan_val;
1156 *mask = table->drop_rule_map[i].chan_mask;
1158 *pcifunc = table->drop_rule_map[i].pcifunc;
1164 if (i == NPC_MCAM_DROP_RULE_MAX) {
1165 dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
1166 __func__, *drop_mcam_idx);
1170 dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n",
1171 __func__, cgx_id, lmac_id);
1176 * __rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule.
1177 * @rvu: resource virtualization unit.
1178 * @drop_mcam_idx: NPC mcam drop rule index.
1180 * @enable_or_disable_cam: If no exact match rules against a drop rule, disable it.
1182 * when first exact match entry against a drop rule is added, enable_or_disable_cam
1183 * is set to true. When last exact match entry against a drop rule is deleted,
1184 * enable_or_disable_cam is set to true.
1185 * Return: Number of rules
1187 static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx,
1188 int val, bool *enable_or_disable_cam)
1190 struct npc_exact_table *table;
1194 table = rvu->hw->table;
1195 promisc = table->promisc_mode[drop_mcam_idx];
1197 cnt = &table->cnt_cmd_rules[drop_mcam_idx];
1202 if (!enable_or_disable_cam)
1205 *enable_or_disable_cam = false;
1210 /* If all rules are deleted and not already in promisc mode;
1213 if (!*cnt && val < 0) {
1214 *enable_or_disable_cam = true;
1218 /* If rule got added and not already in promisc mode; enable cam */
1219 if (!old_cnt && val > 0) {
1220 *enable_or_disable_cam = true;
1229 * rvu_npc_exact_del_table_entry_by_id - Delete and free table entry.
1230 * @rvu: resource virtualization unit.
1231 * @seq_id: Sequence identifier of the entry.
1233 * Deletes entry from linked lists and free up slot in HW MEM or CAM
1235 * Return: 0 upon success.
1237 static int rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id)
1239 struct npc_exact_table_entry *entry = NULL;
1240 struct npc_exact_table *table;
1241 bool disable_cam = false;
1242 u32 drop_mcam_idx = -1;
1246 table = rvu->hw->table;
1248 mutex_lock(&table->lock);
1250 /* Lookup for entry which needs to be updated */
1251 entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id);
1253 dev_dbg(rvu->dev, "%s: failed to find entry for id=%d\n", __func__, seq_id);
1254 mutex_unlock(&table->lock);
1258 cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt :
1259 &table->mem_tbl_entry_cnt;
1261 /* delete from lists */
1262 list_del_init(&entry->list);
1263 list_del_init(&entry->glist);
1267 rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, entry->cgx_id,
1268 entry->lmac_id, &drop_mcam_idx, NULL, NULL, NULL);
1270 dev_dbg(rvu->dev, "%s: failed to retrieve drop info for id=0x%x\n",
1272 mutex_unlock(&table->lock);
1277 __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, -1, &disable_cam);
1279 /* No dmac filter rules; disable drop on hit rule */
1281 rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
1282 dev_dbg(rvu->dev, "%s: Disabling mcam idx %d\n",
1283 __func__, drop_mcam_idx);
1286 mutex_unlock(&table->lock);
1288 rvu_npc_exact_dealloc_table_entry(rvu, entry->opc_type, entry->ways, entry->index);
1290 rvu_npc_exact_free_id(rvu, seq_id);
1292 dev_dbg(rvu->dev, "%s: delete entry success for id=0x%x, mca=%pM\n",
1293 __func__, seq_id, entry->mac);
1300 * rvu_npc_exact_add_table_entry - Adds a table entry
1301 * @rvu: resource virtualization unit.
1302 * @cgx_id: cgx identifier.
1303 * @lmac_id: lmac identifier.
1304 * @mac: MAC address.
1305 * @chan: Channel number.
1306 * @ctype: Channel Type.
1307 * @seq_id: Sequence number.
1308 * @cmd: Whether it is invoked by ethtool cmd.
1309 * @mcam_idx: NPC mcam index corresponding to MAC
1310 * @pcifunc: PCI func.
1312 * Creates a new exact match table entry in either CAM or
1314 * Return: 0 upon success.
1316 static int rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac,
1317 u16 chan, u8 ctype, u32 *seq_id, bool cmd,
1318 u32 mcam_idx, u16 pcifunc)
1320 int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1321 enum npc_exact_opc_type opc_type;
1322 bool enable_cam = false;
1332 err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type);
1334 dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__);
1338 /* Write mdata to table */
1339 mdata = rvu_exact_prepare_table_entry(rvu, true, ctype, chan, mac);
1341 if (opc_type == NPC_EXACT_OPC_CAM)
1342 rvu_npc_exact_cam_table_write(rvu, blkaddr, index, mdata);
1344 rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, mdata);
1346 /* Insert entry to linked list */
1347 err = rvu_npc_exact_add_to_list(rvu, opc_type, ways, index, cgx_id, lmac_id,
1348 mac, chan, ctype, seq_id, cmd, mcam_idx, pcifunc);
1350 rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
1351 dev_err(rvu->dev, "%s: could not add to exact match table\n", __func__);
1355 rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1356 &drop_mcam_idx, NULL, NULL, NULL);
1358 rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
1359 dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1360 __func__, cgx_id, lmac_id);
1365 __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 1, &enable_cam);
1367 /* First command rule; enable drop on hit rule */
1369 rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true);
1370 dev_dbg(rvu->dev, "%s: Enabling mcam idx %d\n",
1371 __func__, drop_mcam_idx);
1375 "%s: Successfully added entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
1376 __func__, index, mac, ways, opc_type);
1382 * rvu_npc_exact_update_table_entry - Update exact match table.
1383 * @rvu: resource virtualization unit.
1384 * @cgx_id: CGX identifier.
1385 * @lmac_id: LMAC identifier.
1386 * @old_mac: Existing MAC address entry.
1387 * @new_mac: New MAC address entry.
1388 * @seq_id: Sequence identifier of the entry.
1390 * Updates MAC address of an entry. If entry is in MEM table, new
1391 * hash value may not match with old one.
1392 * Return: 0 upon success.
1394 static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id,
1395 u8 *old_mac, u8 *new_mac, u32 *seq_id)
1397 int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1398 struct npc_exact_table_entry *entry;
1399 struct npc_exact_table *table;
1403 table = rvu->hw->table;
1405 mutex_lock(&table->lock);
1407 /* Lookup for entry which needs to be updated */
1408 entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id);
1410 mutex_unlock(&table->lock);
1412 "%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n",
1413 __func__, cgx_id, lmac_id, old_mac);
1417 /* If entry is in mem table and new hash index is different than old
1418 * hash index, we cannot update the entry. Fail in these scenarios.
1420 if (entry->opc_type == NPC_EXACT_OPC_MEM) {
1421 hash_index = rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype,
1422 new_mac, table->mem_table.mask,
1423 table->mem_table.depth);
1424 if (hash_index != entry->index) {
1426 "%s: Update failed due to index mismatch(new=0x%x, old=%x)\n",
1427 __func__, hash_index, entry->index);
1428 mutex_unlock(&table->lock);
1433 mdata = rvu_exact_prepare_table_entry(rvu, true, entry->ctype, entry->chan, new_mac);
1435 if (entry->opc_type == NPC_EXACT_OPC_MEM)
1436 rvu_npc_exact_mem_table_write(rvu, blkaddr, entry->ways, entry->index, mdata);
1438 rvu_npc_exact_cam_table_write(rvu, blkaddr, entry->index, mdata);
1440 /* Update entry fields */
1441 ether_addr_copy(entry->mac, new_mac);
1442 *seq_id = entry->seq_id;
1445 "%s: Successfully updated entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
1446 __func__, entry->index, entry->mac, entry->ways, entry->opc_type);
1448 dev_dbg(rvu->dev, "%s: Successfully updated entry (old mac=%pM new_mac=%pM\n",
1449 __func__, old_mac, new_mac);
1451 mutex_unlock(&table->lock);
1456 * rvu_npc_exact_promisc_disable - Disable promiscuous mode.
1457 * @rvu: resource virtualization unit.
1460 * Drop rule is against each PF. We dont support DMAC filter for
1462 * Return: 0 upon success
1465 int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
1467 struct npc_exact_table *table;
1468 int pf = rvu_get_pf(pcifunc);
1474 table = rvu->hw->table;
1476 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1477 rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1478 &drop_mcam_idx, NULL, NULL, NULL);
1480 dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1481 __func__, cgx_id, lmac_id);
1485 mutex_lock(&table->lock);
1486 promisc = &table->promisc_mode[drop_mcam_idx];
1489 mutex_unlock(&table->lock);
1490 dev_dbg(rvu->dev, "%s: Err Already promisc mode disabled (cgx=%d lmac=%d)\n",
1491 __func__, cgx_id, lmac_id);
1492 return LMAC_AF_ERR_INVALID_PARAM;
1495 mutex_unlock(&table->lock);
1497 /* Enable drop rule */
1498 rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
1501 dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d)\n",
1502 __func__, cgx_id, lmac_id);
1507 * rvu_npc_exact_promisc_enable - Enable promiscuous mode.
1508 * @rvu: resource virtualization unit.
1509 * @pcifunc: pcifunc.
1510 * Return: 0 upon success
1512 int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
1514 struct npc_exact_table *table;
1515 int pf = rvu_get_pf(pcifunc);
1521 table = rvu->hw->table;
1523 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1524 rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1525 &drop_mcam_idx, NULL, NULL, NULL);
1527 dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1528 __func__, cgx_id, lmac_id);
1532 mutex_lock(&table->lock);
1533 promisc = &table->promisc_mode[drop_mcam_idx];
1536 mutex_unlock(&table->lock);
1537 dev_dbg(rvu->dev, "%s: Already in promisc mode (cgx=%d lmac=%d)\n",
1538 __func__, cgx_id, lmac_id);
1539 return LMAC_AF_ERR_INVALID_PARAM;
1542 mutex_unlock(&table->lock);
1544 /* disable drop rule */
1545 rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
1548 dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d)\n",
1549 __func__, cgx_id, lmac_id);
1554 * rvu_npc_exact_mac_addr_reset - Delete PF mac address.
1555 * @rvu: resource virtualization unit.
1556 * @req: Reset request
1557 * @rsp: Reset response.
1558 * Return: 0 upon success
1560 int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
1561 struct msg_rsp *rsp)
1563 int pf = rvu_get_pf(req->hdr.pcifunc);
1564 u32 seq_id = req->index;
1565 struct rvu_pfvf *pfvf;
1569 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1571 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1573 rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
1575 /* TODO: how to handle this error case ? */
1576 dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf);
1580 dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n",
1581 __func__, pfvf->mac_addr, pf, seq_id);
1586 * rvu_npc_exact_mac_addr_update - Update mac address field with new value.
1587 * @rvu: resource virtualization unit.
1588 * @req: Update request.
1589 * @rsp: Update response.
1590 * Return: 0 upon success
1592 int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
1593 struct cgx_mac_addr_update_req *req,
1594 struct cgx_mac_addr_update_rsp *rsp)
1596 int pf = rvu_get_pf(req->hdr.pcifunc);
1597 struct npc_exact_table_entry *entry;
1598 struct npc_exact_table *table;
1599 struct rvu_pfvf *pfvf;
1600 u32 seq_id, mcam_idx;
1601 u8 old_mac[ETH_ALEN];
1605 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1606 return LMAC_AF_ERR_PERM_DENIED;
1608 dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n",
1609 __func__, req->index, req->mac_addr);
1611 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1613 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1615 table = rvu->hw->table;
1617 mutex_lock(&table->lock);
1619 /* Lookup for entry which needs to be updated */
1620 entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, req->index);
1622 dev_err(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, req->index);
1623 mutex_unlock(&table->lock);
1624 return LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED;
1626 ether_addr_copy(old_mac, entry->mac);
1627 seq_id = entry->seq_id;
1628 mcam_idx = entry->mcam_idx;
1629 mutex_unlock(&table->lock);
1631 rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, old_mac,
1632 req->mac_addr, &seq_id);
1634 rsp->index = seq_id;
1635 dev_dbg(rvu->dev, "%s mac:%pM (pfvf:%pM default:%pM) update to PF=%d success\n",
1636 __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf);
1637 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1641 /* Try deleting and adding it again */
1642 rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1644 /* This could be a new entry */
1645 dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__,
1646 pfvf->mac_addr, pf);
1649 rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1650 pfvf->rx_chan_base, 0, &seq_id, true,
1651 mcam_idx, req->hdr.pcifunc);
1653 dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__,
1655 return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1658 rsp->index = seq_id;
1660 "%s MAC (new:%pM, old=%pM default:%pM) del and add to PF=%d success (seq_id=%u)\n",
1661 __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf, seq_id);
1663 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1668 * rvu_npc_exact_mac_addr_add - Adds MAC address to exact match table.
1669 * @rvu: resource virtualization unit.
1670 * @req: Add request.
1671 * @rsp: Add response.
1672 * Return: 0 upon success
1674 int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
1675 struct cgx_mac_addr_add_req *req,
1676 struct cgx_mac_addr_add_rsp *rsp)
1678 int pf = rvu_get_pf(req->hdr.pcifunc);
1679 struct rvu_pfvf *pfvf;
1684 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1685 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1687 rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1688 pfvf->rx_chan_base, 0, &seq_id,
1689 true, -1, req->hdr.pcifunc);
1692 rsp->index = seq_id;
1693 dev_dbg(rvu->dev, "%s MAC (%pM) add to PF=%d success (seq_id=%u)\n",
1694 __func__, req->mac_addr, pf, seq_id);
1698 dev_err(rvu->dev, "%s MAC (%pM) add to PF=%d failed\n", __func__,
1700 return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1704 * rvu_npc_exact_mac_addr_del - Delete DMAC filter
1705 * @rvu: resource virtualization unit.
1706 * @req: Delete request.
1707 * @rsp: Delete response.
1708 * Return: 0 upon success
1710 int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
1711 struct cgx_mac_addr_del_req *req,
1712 struct msg_rsp *rsp)
1714 int pf = rvu_get_pf(req->hdr.pcifunc);
1717 rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1719 dev_dbg(rvu->dev, "%s del to PF=%d success (seq_id=%u)\n",
1720 __func__, pf, req->index);
1724 dev_err(rvu->dev, "%s del to PF=%d failed (seq_id=%u)\n",
1725 __func__, pf, req->index);
1726 return LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED;
1730 * rvu_npc_exact_mac_addr_set - Add PF mac address to dmac filter.
1731 * @rvu: resource virtualization unit.
1732 * @req: Set request.
1733 * @rsp: Set response.
1734 * Return: 0 upon success
1736 int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
1737 struct cgx_mac_addr_set_or_get *rsp)
1739 int pf = rvu_get_pf(req->hdr.pcifunc);
1740 u32 seq_id = req->index;
1741 struct rvu_pfvf *pfvf;
1746 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1748 pfvf = &rvu->pf[pf];
1750 /* If table does not have an entry; both update entry and del table entry API
1751 * below fails. Those are not failure conditions.
1753 rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr,
1754 req->mac_addr, &seq_id);
1756 rsp->index = seq_id;
1757 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1758 ether_addr_copy(rsp->mac_addr, req->mac_addr);
1759 dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n",
1760 __func__, req->mac_addr, pf);
1764 /* Try deleting and adding it again */
1765 rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1767 dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n",
1768 __func__, pfvf->mac_addr, pf);
1771 /* find mcam entry if exist */
1772 rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, NULL);
1774 mcam_idx = npc_get_nixlf_mcam_index(&rvu->hw->mcam, req->hdr.pcifunc,
1775 nixlf, NIXLF_UCAST_ENTRY);
1778 rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1779 pfvf->rx_chan_base, 0, &seq_id,
1780 true, mcam_idx, req->hdr.pcifunc);
1782 dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n",
1783 __func__, req->mac_addr, pf);
1784 return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1787 rsp->index = seq_id;
1788 ether_addr_copy(rsp->mac_addr, req->mac_addr);
1789 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1791 "%s MAC (%pM) del and add to PF=%d success (seq_id=%u)\n",
1792 __func__, req->mac_addr, pf, seq_id);
1797 * rvu_npc_exact_can_disable_feature - Check if feature can be disabled.
1798 * @rvu: resource virtualization unit.
1799 * Return: True if exact match feature is supported.
1801 bool rvu_npc_exact_can_disable_feature(struct rvu *rvu)
1803 struct npc_exact_table *table = rvu->hw->table;
1806 if (!rvu->hw->cap.npc_exact_match_enabled)
1809 mutex_lock(&table->lock);
1810 empty = list_empty(&table->lhead_gbl);
1811 mutex_unlock(&table->lock);
1817 * rvu_npc_exact_disable_feature - Disable feature.
1818 * @rvu: resource virtualization unit.
1820 void rvu_npc_exact_disable_feature(struct rvu *rvu)
1822 rvu->hw->cap.npc_exact_match_enabled = false;
1826 * rvu_npc_exact_reset - Delete and free all entry which match pcifunc.
1827 * @rvu: resource virtualization unit.
1828 * @pcifunc: PCI func to match.
1830 void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc)
1832 struct npc_exact_table *table = rvu->hw->table;
1833 struct npc_exact_table_entry *tmp, *iter;
1836 mutex_lock(&table->lock);
1837 list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) {
1838 if (pcifunc != iter->pcifunc)
1841 seq_id = iter->seq_id;
1842 dev_dbg(rvu->dev, "%s: resetting pcifun=%d seq_id=%u\n", __func__,
1845 mutex_unlock(&table->lock);
1846 rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
1847 mutex_lock(&table->lock);
1849 mutex_unlock(&table->lock);
1853 * rvu_npc_exact_init - initialize exact match table
1854 * @rvu: resource virtualization unit.
1856 * Initialize HW and SW resources to manage 4way-2K table and fully
1857 * associative 32-entry mcam table.
1858 * Return: 0 upon success.
1860 int rvu_npc_exact_init(struct rvu *rvu)
1862 u64 bcast_mcast_val, bcast_mcast_mask;
1863 struct npc_exact_table *table;
1864 u64 exact_val, exact_mask;
1865 u64 chan_val, chan_mask;
1877 /* Read NPC_AF_CONST3 and check for have exact
1878 * match functionality is present
1880 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1882 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
1886 /* Check exact match feature is supported */
1887 npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3);
1888 if (!(npc_const3 & BIT_ULL(62)))
1891 /* Check if kex profile has enabled EXACT match nibble */
1892 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
1893 if (!(cfg & NPC_EXACT_NIBBLE_HIT))
1896 /* Set capability to true */
1897 rvu->hw->cap.npc_exact_match_enabled = true;
1899 table = kzalloc(sizeof(*table), GFP_KERNEL);
1903 dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__);
1904 rvu->hw->table = table;
1906 /* Read table size, ways and depth */
1907 table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3);
1908 table->mem_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
1909 table->cam_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
1911 dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n",
1912 __func__, table->mem_table.ways, table->cam_table.depth);
1914 /* Check if depth of table is not a sequre of 2
1915 * TODO: why _builtin_popcount() is not working ?
1917 if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) {
1919 "%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n",
1920 __func__, table->mem_table.depth);
1924 table_size = table->mem_table.depth * table->mem_table.ways;
1926 /* Allocate bitmap for 4way 2K table */
1927 table->mem_table.bmap = devm_bitmap_zalloc(rvu->dev, table_size,
1929 if (!table->mem_table.bmap)
1932 dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__);
1934 /* Allocate bitmap for 32 entry mcam */
1935 table->cam_table.bmap = devm_bitmap_zalloc(rvu->dev, 32, GFP_KERNEL);
1937 if (!table->cam_table.bmap)
1940 dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__);
1942 table->tot_ids = table_size + table->cam_table.depth;
1943 table->id_bmap = devm_bitmap_zalloc(rvu->dev, table->tot_ids,
1946 if (!table->id_bmap)
1949 dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n",
1950 __func__, table->tot_ids);
1952 /* Initialize list heads for npc_exact_table entries.
1953 * This entry is used by debugfs to show entries in
1954 * exact match table.
1956 for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++)
1957 INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]);
1959 INIT_LIST_HEAD(&table->lhead_cam_tbl_entry);
1960 INIT_LIST_HEAD(&table->lhead_gbl);
1962 mutex_init(&table->lock);
1964 rvu_exact_config_secret_key(rvu);
1965 rvu_exact_config_search_key(rvu);
1967 rvu_exact_config_table_mask(rvu);
1968 rvu_exact_config_result_ctrl(rvu, table->mem_table.depth);
1970 /* - No drop rule for LBK
1971 * - Drop rules for SDP and each LMAC.
1973 exact_val = !NPC_EXACT_RESULT_HIT;
1974 exact_mask = NPC_EXACT_RESULT_HIT;
1979 bcast_mcast_val = 0b0000;
1980 bcast_mcast_mask = 0b0011;
1982 /* Install SDP drop rule */
1983 drop_mcam_idx = &table->num_drop_rules;
1985 max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx +
1988 for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
1989 if (rvu->pf2cgxlmac_map[i] == 0xFF)
1992 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[i], &cgx_id, &lmac_id);
1994 rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, NIX_INTF_TYPE_CGX, cgx_id,
1995 lmac_id, &chan_val, &chan_mask);
1998 "%s: failed, info chan_val=0x%llx chan_mask=0x%llx rule_id=%d\n",
1999 __func__, chan_val, chan_mask, *drop_mcam_idx);
2003 /* Filter rules are only for PF */
2004 pcifunc = RVU_PFFUNC(i, 0);
2007 "%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
2008 __func__, cgx_id, lmac_id, chan_val, chan_mask);
2010 rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules,
2011 chan_val, chan_mask, pcifunc);
2014 "%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n",
2015 __func__, cgx_id, lmac_id, chan_val);
2019 err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx,
2020 &table->counter_idx[*drop_mcam_idx],
2021 chan_val, chan_mask,
2022 exact_val, exact_mask,
2023 bcast_mcast_val, bcast_mcast_mask);
2026 "failed to configure drop rule (cgx=%d lmac=%d)\n",
2034 dev_info(rvu->dev, "initialized exact match table successfully\n");