octeontx2-af: Add mbox messages to install and delete MCAM rules
[linux-2.6-microblaze.git] / drivers / net / ethernet / marvell / octeontx2 / af / rvu_npc_fs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6
7 #include <linux/bitfield.h>
8
9 #include "rvu_struct.h"
10 #include "rvu_reg.h"
11 #include "rvu.h"
12 #include "npc.h"
13
14 #define NPC_BYTESM              GENMASK_ULL(19, 16)
15 #define NPC_HDR_OFFSET          GENMASK_ULL(15, 8)
16 #define NPC_KEY_OFFSET          GENMASK_ULL(5, 0)
17 #define NPC_LDATA_EN            BIT_ULL(7)
18
19 static const char * const npc_flow_names[] = {
20         [NPC_DMAC]      = "dmac",
21         [NPC_SMAC]      = "smac",
22         [NPC_ETYPE]     = "ether type",
23         [NPC_OUTER_VID] = "outer vlan id",
24         [NPC_TOS]       = "tos",
25         [NPC_SIP_IPV4]  = "ipv4 source ip",
26         [NPC_DIP_IPV4]  = "ipv4 destination ip",
27         [NPC_SIP_IPV6]  = "ipv6 source ip",
28         [NPC_DIP_IPV6]  = "ipv6 destination ip",
29         [NPC_SPORT_TCP] = "tcp source port",
30         [NPC_DPORT_TCP] = "tcp destination port",
31         [NPC_SPORT_UDP] = "udp source port",
32         [NPC_DPORT_UDP] = "udp destination port",
33         [NPC_SPORT_SCTP] = "sctp source port",
34         [NPC_DPORT_SCTP] = "sctp destination port",
35         [NPC_UNKNOWN]   = "unknown",
36 };
37
38 const char *npc_get_field_name(u8 hdr)
39 {
40         if (hdr >= ARRAY_SIZE(npc_flow_names))
41                 return npc_flow_names[NPC_UNKNOWN];
42
43         return npc_flow_names[hdr];
44 }
45
46 /* Compute keyword masks and figure out the number of keywords a field
47  * spans in the key.
48  */
49 static void npc_set_kw_masks(struct npc_mcam *mcam, u8 type,
50                              u8 nr_bits, int start_kwi, int offset, u8 intf)
51 {
52         struct npc_key_field *field = &mcam->rx_key_fields[type];
53         u8 bits_in_kw;
54         int max_kwi;
55
56         if (mcam->banks_per_entry == 1)
57                 max_kwi = 1; /* NPC_MCAM_KEY_X1 */
58         else if (mcam->banks_per_entry == 2)
59                 max_kwi = 3; /* NPC_MCAM_KEY_X2 */
60         else
61                 max_kwi = 6; /* NPC_MCAM_KEY_X4 */
62
63         if (is_npc_intf_tx(intf))
64                 field = &mcam->tx_key_fields[type];
65
66         if (offset + nr_bits <= 64) {
67                 /* one KW only */
68                 if (start_kwi > max_kwi)
69                         return;
70                 field->kw_mask[start_kwi] |= GENMASK_ULL(nr_bits - 1, 0)
71                                              << offset;
72                 field->nr_kws = 1;
73         } else if (offset + nr_bits > 64 &&
74                    offset + nr_bits <= 128) {
75                 /* two KWs */
76                 if (start_kwi + 1 > max_kwi)
77                         return;
78                 /* first KW mask */
79                 bits_in_kw = 64 - offset;
80                 field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0)
81                                              << offset;
82                 /* second KW mask i.e. mask for rest of bits */
83                 bits_in_kw = nr_bits + offset - 64;
84                 field->kw_mask[start_kwi + 1] |= GENMASK_ULL(bits_in_kw - 1, 0);
85                 field->nr_kws = 2;
86         } else {
87                 /* three KWs */
88                 if (start_kwi + 2 > max_kwi)
89                         return;
90                 /* first KW mask */
91                 bits_in_kw = 64 - offset;
92                 field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0)
93                                              << offset;
94                 /* second KW mask */
95                 field->kw_mask[start_kwi + 1] = ~0ULL;
96                 /* third KW mask i.e. mask for rest of bits */
97                 bits_in_kw = nr_bits + offset - 128;
98                 field->kw_mask[start_kwi + 2] |= GENMASK_ULL(bits_in_kw - 1, 0);
99                 field->nr_kws = 3;
100         }
101 }
102
103 /* Helper function to figure out whether field exists in the key */
104 static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf)
105 {
106         struct npc_mcam *mcam = &rvu->hw->mcam;
107         struct npc_key_field *input;
108
109         input  = &mcam->rx_key_fields[type];
110         if (is_npc_intf_tx(intf))
111                 input  = &mcam->tx_key_fields[type];
112
113         return input->nr_kws > 0;
114 }
115
116 static bool npc_is_same(struct npc_key_field *input,
117                         struct npc_key_field *field)
118 {
119         int ret;
120
121         ret = memcmp(&input->layer_mdata, &field->layer_mdata,
122                      sizeof(struct npc_layer_mdata));
123         return ret == 0;
124 }
125
126 static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type,
127                                 u64 cfg, u8 lid, u8 lt, u8 intf)
128 {
129         struct npc_key_field *input = &mcam->rx_key_fields[type];
130
131         if (is_npc_intf_tx(intf))
132                 input = &mcam->tx_key_fields[type];
133
134         input->layer_mdata.hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
135         input->layer_mdata.key = FIELD_GET(NPC_KEY_OFFSET, cfg);
136         input->layer_mdata.len = FIELD_GET(NPC_BYTESM, cfg) + 1;
137         input->layer_mdata.ltype = lt;
138         input->layer_mdata.lid = lid;
139 }
140
141 static bool npc_check_overlap_fields(struct npc_key_field *input1,
142                                      struct npc_key_field *input2)
143 {
144         int kwi;
145
146         /* Fields with same layer id and different ltypes are mutually
147          * exclusive hence they can be overlapped
148          */
149         if (input1->layer_mdata.lid == input2->layer_mdata.lid &&
150             input1->layer_mdata.ltype != input2->layer_mdata.ltype)
151                 return false;
152
153         for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) {
154                 if (input1->kw_mask[kwi] & input2->kw_mask[kwi])
155                         return true;
156         }
157
158         return false;
159 }
160
161 /* Helper function to check whether given field overlaps with any other fields
162  * in the key. Due to limitations on key size and the key extraction profile in
163  * use higher layers can overwrite lower layer's header fields. Hence overlap
164  * needs to be checked.
165  */
166 static bool npc_check_overlap(struct rvu *rvu, int blkaddr,
167                               enum key_fields type, u8 start_lid, u8 intf)
168 {
169         struct npc_mcam *mcam = &rvu->hw->mcam;
170         struct npc_key_field *dummy, *input;
171         int start_kwi, offset;
172         u8 nr_bits, lid, lt, ld;
173         u64 cfg;
174
175         dummy = &mcam->rx_key_fields[NPC_UNKNOWN];
176         input = &mcam->rx_key_fields[type];
177
178         if (is_npc_intf_tx(intf)) {
179                 dummy = &mcam->tx_key_fields[NPC_UNKNOWN];
180                 input = &mcam->tx_key_fields[type];
181         }
182
183         for (lid = start_lid; lid < NPC_MAX_LID; lid++) {
184                 for (lt = 0; lt < NPC_MAX_LT; lt++) {
185                         for (ld = 0; ld < NPC_MAX_LD; ld++) {
186                                 cfg = rvu_read64(rvu, blkaddr,
187                                                  NPC_AF_INTFX_LIDX_LTX_LDX_CFG
188                                                  (intf, lid, lt, ld));
189                                 if (!FIELD_GET(NPC_LDATA_EN, cfg))
190                                         continue;
191                                 memset(dummy, 0, sizeof(struct npc_key_field));
192                                 npc_set_layer_mdata(mcam, NPC_UNKNOWN, cfg,
193                                                     lid, lt, intf);
194                                 /* exclude input */
195                                 if (npc_is_same(input, dummy))
196                                         continue;
197                                 start_kwi = dummy->layer_mdata.key / 8;
198                                 offset = (dummy->layer_mdata.key * 8) % 64;
199                                 nr_bits = dummy->layer_mdata.len * 8;
200                                 /* form KW masks */
201                                 npc_set_kw_masks(mcam, NPC_UNKNOWN, nr_bits,
202                                                  start_kwi, offset, intf);
203                                 /* check any input field bits falls in any
204                                  * other field bits.
205                                  */
206                                 if (npc_check_overlap_fields(dummy, input))
207                                         return true;
208                         }
209                 }
210         }
211
212         return false;
213 }
214
215 static int npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
216                            u8 intf)
217 {
218         if (!npc_is_field_present(rvu, type, intf) ||
219             npc_check_overlap(rvu, blkaddr, type, 0, intf))
220                 return -EOPNOTSUPP;
221         return 0;
222 }
223
224 static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
225                                   u8 key_nibble, u8 intf)
226 {
227         u8 offset = (key_nibble * 4) % 64; /* offset within key word */
228         u8 kwi = (key_nibble * 4) / 64; /* which word in key */
229         u8 nr_bits = 4; /* bits in a nibble */
230         u8 type;
231
232         switch (bit_number) {
233         case 0 ... 2:
234                 type = NPC_CHAN;
235                 break;
236         case 3:
237                 type = NPC_ERRLEV;
238                 break;
239         case 4 ... 5:
240                 type = NPC_ERRCODE;
241                 break;
242         case 6:
243                 type = NPC_LXMB;
244                 break;
245         /* check for LTYPE only as of now */
246         case 9:
247                 type = NPC_LA;
248                 break;
249         case 12:
250                 type = NPC_LB;
251                 break;
252         case 15:
253                 type = NPC_LC;
254                 break;
255         case 18:
256                 type = NPC_LD;
257                 break;
258         case 21:
259                 type = NPC_LE;
260                 break;
261         case 24:
262                 type = NPC_LF;
263                 break;
264         case 27:
265                 type = NPC_LG;
266                 break;
267         case 30:
268                 type = NPC_LH;
269                 break;
270         default:
271                 return;
272         };
273         npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
274 }
275
276 static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
277 {
278         struct npc_mcam *mcam = &rvu->hw->mcam;
279         struct npc_key_field *key_fields;
280         /* Ether type can come from three layers
281          * (ethernet, single tagged, double tagged)
282          */
283         struct npc_key_field *etype_ether;
284         struct npc_key_field *etype_tag1;
285         struct npc_key_field *etype_tag2;
286         /* Outer VLAN TCI can come from two layers
287          * (single tagged, double tagged)
288          */
289         struct npc_key_field *vlan_tag1;
290         struct npc_key_field *vlan_tag2;
291         u64 *features;
292         u8 start_lid;
293         int i;
294
295         key_fields = mcam->rx_key_fields;
296         features = &mcam->rx_features;
297
298         if (is_npc_intf_tx(intf)) {
299                 key_fields = mcam->tx_key_fields;
300                 features = &mcam->tx_features;
301         }
302
303         /* Handle header fields which can come from multiple layers like
304          * etype, outer vlan tci. These fields should have same position in
305          * the key otherwise to install a mcam rule more than one entry is
306          * needed which complicates mcam space management.
307          */
308         etype_ether = &key_fields[NPC_ETYPE_ETHER];
309         etype_tag1 = &key_fields[NPC_ETYPE_TAG1];
310         etype_tag2 = &key_fields[NPC_ETYPE_TAG2];
311         vlan_tag1 = &key_fields[NPC_VLAN_TAG1];
312         vlan_tag2 = &key_fields[NPC_VLAN_TAG2];
313
314         /* if key profile programmed does not extract Ethertype at all */
315         if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
316                 goto vlan_tci;
317
318         /* if key profile programmed extracts Ethertype from one layer */
319         if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
320                 key_fields[NPC_ETYPE] = *etype_ether;
321         if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws)
322                 key_fields[NPC_ETYPE] = *etype_tag1;
323         if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws)
324                 key_fields[NPC_ETYPE] = *etype_tag2;
325
326         /* if key profile programmed extracts Ethertype from multiple layers */
327         if (etype_ether->nr_kws && etype_tag1->nr_kws) {
328                 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
329                         if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i])
330                                 goto vlan_tci;
331                 }
332                 key_fields[NPC_ETYPE] = *etype_tag1;
333         }
334         if (etype_ether->nr_kws && etype_tag2->nr_kws) {
335                 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
336                         if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i])
337                                 goto vlan_tci;
338                 }
339                 key_fields[NPC_ETYPE] = *etype_tag2;
340         }
341         if (etype_tag1->nr_kws && etype_tag2->nr_kws) {
342                 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
343                         if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i])
344                                 goto vlan_tci;
345                 }
346                 key_fields[NPC_ETYPE] = *etype_tag2;
347         }
348
349         /* check none of higher layers overwrite Ethertype */
350         start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1;
351         if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf))
352                 goto vlan_tci;
353         *features |= BIT_ULL(NPC_ETYPE);
354 vlan_tci:
355         /* if key profile does not extract outer vlan tci at all */
356         if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
357                 goto done;
358
359         /* if key profile extracts outer vlan tci from one layer */
360         if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
361                 key_fields[NPC_OUTER_VID] = *vlan_tag1;
362         if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws)
363                 key_fields[NPC_OUTER_VID] = *vlan_tag2;
364
365         /* if key profile extracts outer vlan tci from multiple layers */
366         if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) {
367                 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
368                         if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i])
369                                 goto done;
370                 }
371                 key_fields[NPC_OUTER_VID] = *vlan_tag2;
372         }
373         /* check none of higher layers overwrite outer vlan tci */
374         start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1;
375         if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf))
376                 goto done;
377         *features |= BIT_ULL(NPC_OUTER_VID);
378 done:
379         return;
380 }
381
382 static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
383                            u8 lt, u64 cfg, u8 intf)
384 {
385         struct npc_mcam *mcam = &rvu->hw->mcam;
386         u8 hdr, key, nr_bytes, bit_offset;
387         u8 la_ltype, la_start;
388         /* starting KW index and starting bit position */
389         int start_kwi, offset;
390
391         nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1;
392         hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
393         key = FIELD_GET(NPC_KEY_OFFSET, cfg);
394         start_kwi = key / 8;
395         offset = (key * 8) % 64;
396
397         /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding
398          * ethernet header.
399          */
400         if (is_npc_intf_tx(intf)) {
401                 la_ltype = NPC_LT_LA_IH_NIX_ETHER;
402                 la_start = 8;
403         } else {
404                 la_ltype = NPC_LT_LA_ETHER;
405                 la_start = 0;
406         }
407
408 #define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen)                            \
409 do {                                                                           \
410         if (lid == (hlid) && lt == (hlt)) {                                    \
411                 if ((hstart) >= hdr &&                                         \
412                     ((hstart) + (hlen)) <= (hdr + nr_bytes)) {                 \
413                         bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \
414                         npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \
415                         npc_set_kw_masks(mcam, (name), (hlen) * 8,             \
416                                          start_kwi, offset + bit_offset, intf);\
417                 }                                                              \
418         }                                                                      \
419 } while (0)
420
421         /* List LID, LTYPE, start offset from layer and length(in bytes) of
422          * packet header fields below.
423          * Example: Source IP is 4 bytes and starts at 12th byte of IP header
424          */
425         NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4);
426         NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4);
427         NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
428         NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
429         NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2);
430         NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2);
431         NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2);
432         NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2);
433         NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2);
434         NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
435         NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
436         NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
437         NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
438         NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
439         NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
440         NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
441         NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6);
442         /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
443         NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2);
444 }
445
446 static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
447 {
448         struct npc_mcam *mcam = &rvu->hw->mcam;
449         u64 *features = &mcam->rx_features;
450         u64 tcp_udp_sctp;
451         int err, hdr;
452
453         if (is_npc_intf_tx(intf))
454                 features = &mcam->tx_features;
455
456         for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) {
457                 err = npc_check_field(rvu, blkaddr, hdr, intf);
458                 if (!err)
459                         *features |= BIT_ULL(hdr);
460         }
461
462         tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
463                        BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
464                        BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP);
465
466         /* for tcp/udp/sctp corresponding layer type should be in the key */
467         if (*features & tcp_udp_sctp)
468                 if (npc_check_field(rvu, blkaddr, NPC_LD, intf))
469                         *features &= ~tcp_udp_sctp;
470
471         /* for vlan corresponding layer type should be in the key */
472         if (*features & BIT_ULL(NPC_OUTER_VID))
473                 if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
474                         *features &= ~BIT_ULL(NPC_OUTER_VID);
475 }
476
477 /* Scan key extraction profile and record how fields of our interest
478  * fill the key structure. Also verify Channel and DMAC exists in
479  * key and not overwritten by other header fields.
480  */
481 static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
482 {
483         struct npc_mcam *mcam = &rvu->hw->mcam;
484         u8 lid, lt, ld, bitnr;
485         u8 key_nibble = 0;
486         u64 cfg;
487
488         /* Scan and note how parse result is going to be in key.
489          * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from
490          * parse result in the key. The enabled nibbles from parse result
491          * will be concatenated in key.
492          */
493         cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf));
494         cfg &= NPC_PARSE_NIBBLE;
495         for_each_set_bit(bitnr, (unsigned long *)&cfg, 31) {
496                 npc_scan_parse_result(mcam, bitnr, key_nibble, intf);
497                 key_nibble++;
498         }
499
500         /* Scan and note how layer data is going to be in key */
501         for (lid = 0; lid < NPC_MAX_LID; lid++) {
502                 for (lt = 0; lt < NPC_MAX_LT; lt++) {
503                         for (ld = 0; ld < NPC_MAX_LD; ld++) {
504                                 cfg = rvu_read64(rvu, blkaddr,
505                                                  NPC_AF_INTFX_LIDX_LTX_LDX_CFG
506                                                  (intf, lid, lt, ld));
507                                 if (!FIELD_GET(NPC_LDATA_EN, cfg))
508                                         continue;
509                                 npc_scan_ldata(rvu, blkaddr, lid, lt, cfg,
510                                                intf);
511                         }
512                 }
513         }
514
515         return 0;
516 }
517
518 static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr)
519 {
520         int err;
521
522         err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX);
523         if (err)
524                 return err;
525
526         err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX);
527         if (err)
528                 return err;
529
530         /* Channel is mandatory */
531         if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) {
532                 dev_err(rvu->dev, "Channel not present in Key\n");
533                 return -EINVAL;
534         }
535         /* check that none of the fields overwrite channel */
536         if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) {
537                 dev_err(rvu->dev, "Channel cannot be overwritten\n");
538                 return -EINVAL;
539         }
540         /* DMAC should be present in key for unicast filter to work */
541         if (!npc_is_field_present(rvu, NPC_DMAC, NIX_INTF_RX)) {
542                 dev_err(rvu->dev, "DMAC not present in Key\n");
543                 return -EINVAL;
544         }
545         /* check that none of the fields overwrite DMAC */
546         if (npc_check_overlap(rvu, blkaddr, NPC_DMAC, 0, NIX_INTF_RX)) {
547                 dev_err(rvu->dev, "DMAC cannot be overwritten\n");
548                 return -EINVAL;
549         }
550
551         npc_set_features(rvu, blkaddr, NIX_INTF_TX);
552         npc_set_features(rvu, blkaddr, NIX_INTF_RX);
553         npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_TX);
554         npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_RX);
555
556         return 0;
557 }
558
559 int npc_flow_steering_init(struct rvu *rvu, int blkaddr)
560 {
561         struct npc_mcam *mcam = &rvu->hw->mcam;
562
563         INIT_LIST_HEAD(&mcam->mcam_rules);
564
565         return npc_scan_verify_kex(rvu, blkaddr);
566 }
567
568 static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
569 {
570         struct npc_mcam *mcam = &rvu->hw->mcam;
571         u64 *mcam_features = &mcam->rx_features;
572         u64 unsupported;
573         u8 bit;
574
575         if (is_npc_intf_tx(intf))
576                 mcam_features = &mcam->tx_features;
577
578         unsupported = (*mcam_features ^ features) & ~(*mcam_features);
579         if (unsupported) {
580                 dev_info(rvu->dev, "Unsupported flow(s):\n");
581                 for_each_set_bit(bit, (unsigned long *)&unsupported, 64)
582                         dev_info(rvu->dev, "%s ", npc_get_field_name(bit));
583                 return -EOPNOTSUPP;
584         }
585
586         return 0;
587 }
588
589 /* npc_update_entry - Based on the masks generated during
590  * the key scanning, updates the given entry with value and
591  * masks for the field of interest. Maximum 16 bytes of a packet
592  * header can be extracted by HW hence lo and hi are sufficient.
593  * When field bytes are less than or equal to 8 then hi should be
594  * 0 for value and mask.
595  *
596  * If exact match of value is required then mask should be all 1's.
597  * If any bits in mask are 0 then corresponding bits in value are
598  * dont care.
599  */
600 static void npc_update_entry(struct rvu *rvu, enum key_fields type,
601                              struct mcam_entry *entry, u64 val_lo,
602                              u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
603 {
604         struct npc_mcam *mcam = &rvu->hw->mcam;
605         struct mcam_entry dummy = { {0} };
606         struct npc_key_field *field;
607         u64 kw1, kw2, kw3;
608         u8 shift;
609         int i;
610
611         field = &mcam->rx_key_fields[type];
612         if (is_npc_intf_tx(intf))
613                 field = &mcam->tx_key_fields[type];
614
615         if (!field->nr_kws)
616                 return;
617
618         for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
619                 if (!field->kw_mask[i])
620                         continue;
621                 /* place key value in kw[x] */
622                 shift = __ffs64(field->kw_mask[i]);
623                 /* update entry value */
624                 kw1 = (val_lo << shift) & field->kw_mask[i];
625                 dummy.kw[i] = kw1;
626                 /* update entry mask */
627                 kw1 = (mask_lo << shift) & field->kw_mask[i];
628                 dummy.kw_mask[i] = kw1;
629
630                 if (field->nr_kws == 1)
631                         break;
632                 /* place remaining bits of key value in kw[x + 1] */
633                 if (field->nr_kws == 2) {
634                         /* update entry value */
635                         kw2 = shift ? val_lo >> (64 - shift) : 0;
636                         kw2 |= (val_hi << shift);
637                         kw2 &= field->kw_mask[i + 1];
638                         dummy.kw[i + 1] = kw2;
639                         /* update entry mask */
640                         kw2 = shift ? mask_lo >> (64 - shift) : 0;
641                         kw2 |= (mask_hi << shift);
642                         kw2 &= field->kw_mask[i + 1];
643                         dummy.kw_mask[i + 1] = kw2;
644                         break;
645                 }
646                 /* place remaining bits of key value in kw[x + 1], kw[x + 2] */
647                 if (field->nr_kws == 3) {
648                         /* update entry value */
649                         kw2 = shift ? val_lo >> (64 - shift) : 0;
650                         kw2 |= (val_hi << shift);
651                         kw2 &= field->kw_mask[i + 1];
652                         kw3 = shift ? val_hi >> (64 - shift) : 0;
653                         kw3 &= field->kw_mask[i + 2];
654                         dummy.kw[i + 1] = kw2;
655                         dummy.kw[i + 2] = kw3;
656                         /* update entry mask */
657                         kw2 = shift ? mask_lo >> (64 - shift) : 0;
658                         kw2 |= (mask_hi << shift);
659                         kw2 &= field->kw_mask[i + 1];
660                         kw3 = shift ? mask_hi >> (64 - shift) : 0;
661                         kw3 &= field->kw_mask[i + 2];
662                         dummy.kw_mask[i + 1] = kw2;
663                         dummy.kw_mask[i + 2] = kw3;
664                         break;
665                 }
666         }
667         /* dummy is ready with values and masks for given key
668          * field now clear and update input entry with those
669          */
670         for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
671                 if (!field->kw_mask[i])
672                         continue;
673                 entry->kw[i] &= ~field->kw_mask[i];
674                 entry->kw_mask[i] &= ~field->kw_mask[i];
675
676                 entry->kw[i] |= dummy.kw[i];
677                 entry->kw_mask[i] |= dummy.kw_mask[i];
678         }
679 }
680
681 #define IPV6_WORDS     4
682
683 static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry,
684                                  u64 features, struct flow_msg *pkt,
685                                  struct flow_msg *mask,
686                                  struct rvu_npc_mcam_rule *output, u8 intf)
687 {
688         u32 src_ip[IPV6_WORDS], src_ip_mask[IPV6_WORDS];
689         u32 dst_ip[IPV6_WORDS], dst_ip_mask[IPV6_WORDS];
690         struct flow_msg *opkt = &output->packet;
691         struct flow_msg *omask = &output->mask;
692         u64 mask_lo, mask_hi;
693         u64 val_lo, val_hi;
694
695         /* For an ipv6 address fe80::2c68:63ff:fe5e:2d0a the packet
696          * values to be programmed in MCAM should as below:
697          * val_high: 0xfe80000000000000
698          * val_low: 0x2c6863fffe5e2d0a
699          */
700         if (features & BIT_ULL(NPC_SIP_IPV6)) {
701                 be32_to_cpu_array(src_ip_mask, mask->ip6src, IPV6_WORDS);
702                 be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
703
704                 mask_hi = (u64)src_ip_mask[0] << 32 | src_ip_mask[1];
705                 mask_lo = (u64)src_ip_mask[2] << 32 | src_ip_mask[3];
706                 val_hi = (u64)src_ip[0] << 32 | src_ip[1];
707                 val_lo = (u64)src_ip[2] << 32 | src_ip[3];
708
709                 npc_update_entry(rvu, NPC_SIP_IPV6, entry, val_lo, val_hi,
710                                  mask_lo, mask_hi, intf);
711                 memcpy(opkt->ip6src, pkt->ip6src, sizeof(opkt->ip6src));
712                 memcpy(omask->ip6src, mask->ip6src, sizeof(omask->ip6src));
713         }
714         if (features & BIT_ULL(NPC_DIP_IPV6)) {
715                 be32_to_cpu_array(dst_ip_mask, mask->ip6dst, IPV6_WORDS);
716                 be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
717
718                 mask_hi = (u64)dst_ip_mask[0] << 32 | dst_ip_mask[1];
719                 mask_lo = (u64)dst_ip_mask[2] << 32 | dst_ip_mask[3];
720                 val_hi = (u64)dst_ip[0] << 32 | dst_ip[1];
721                 val_lo = (u64)dst_ip[2] << 32 | dst_ip[3];
722
723                 npc_update_entry(rvu, NPC_DIP_IPV6, entry, val_lo, val_hi,
724                                  mask_lo, mask_hi, intf);
725                 memcpy(opkt->ip6dst, pkt->ip6dst, sizeof(opkt->ip6dst));
726                 memcpy(omask->ip6dst, mask->ip6dst, sizeof(omask->ip6dst));
727         }
728 }
729
730 static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
731                             u64 features, struct flow_msg *pkt,
732                             struct flow_msg *mask,
733                             struct rvu_npc_mcam_rule *output, u8 intf)
734 {
735         u64 dmac_mask = ether_addr_to_u64(mask->dmac);
736         u64 smac_mask = ether_addr_to_u64(mask->smac);
737         u64 dmac_val = ether_addr_to_u64(pkt->dmac);
738         u64 smac_val = ether_addr_to_u64(pkt->smac);
739         struct flow_msg *opkt = &output->packet;
740         struct flow_msg *omask = &output->mask;
741
742         if (!features)
743                 return;
744
745         /* For tcp/udp/sctp LTYPE should be present in entry */
746         if (features & (BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_DPORT_TCP)))
747                 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP,
748                                  0, ~0ULL, 0, intf);
749         if (features & (BIT_ULL(NPC_SPORT_UDP) | BIT_ULL(NPC_DPORT_UDP)))
750                 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP,
751                                  0, ~0ULL, 0, intf);
752         if (features & (BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP)))
753                 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP,
754                                  0, ~0ULL, 0, intf);
755
756         if (features & BIT_ULL(NPC_OUTER_VID))
757                 npc_update_entry(rvu, NPC_LB, entry,
758                                  NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0,
759                                  NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf);
760
761 #define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi)       \
762 do {                                                                          \
763         if (features & BIT_ULL((field))) {                                    \
764                 npc_update_entry(rvu, (field), entry, (val_lo), (val_hi),     \
765                                  (mask_lo), (mask_hi), intf);                 \
766                 memcpy(&opkt->member, &pkt->member, sizeof(pkt->member));     \
767                 memcpy(&omask->member, &mask->member, sizeof(mask->member));  \
768         }                                                                     \
769 } while (0)
770
771         NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0);
772         NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0);
773         NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0,
774                        ntohs(mask->etype), 0);
775         NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0,
776                        ntohl(mask->ip4src), 0);
777         NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0,
778                        ntohl(mask->ip4dst), 0);
779         NPC_WRITE_FLOW(NPC_SPORT_TCP, sport, ntohs(pkt->sport), 0,
780                        ntohs(mask->sport), 0);
781         NPC_WRITE_FLOW(NPC_SPORT_UDP, sport, ntohs(pkt->sport), 0,
782                        ntohs(mask->sport), 0);
783         NPC_WRITE_FLOW(NPC_DPORT_TCP, dport, ntohs(pkt->dport), 0,
784                        ntohs(mask->dport), 0);
785         NPC_WRITE_FLOW(NPC_DPORT_UDP, dport, ntohs(pkt->dport), 0,
786                        ntohs(mask->dport), 0);
787         NPC_WRITE_FLOW(NPC_SPORT_SCTP, sport, ntohs(pkt->sport), 0,
788                        ntohs(mask->sport), 0);
789         NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0,
790                        ntohs(mask->dport), 0);
791
792         NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0,
793                        ntohs(mask->vlan_tci), 0);
794
795         npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf);
796 }
797
798 static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam,
799                                                     u16 entry)
800 {
801         struct rvu_npc_mcam_rule *iter;
802
803         mutex_lock(&mcam->lock);
804         list_for_each_entry(iter, &mcam->mcam_rules, list) {
805                 if (iter->entry == entry) {
806                         mutex_unlock(&mcam->lock);
807                         return iter;
808                 }
809         }
810         mutex_unlock(&mcam->lock);
811
812         return NULL;
813 }
814
815 static void rvu_mcam_add_rule(struct npc_mcam *mcam,
816                               struct rvu_npc_mcam_rule *rule)
817 {
818         struct list_head *head = &mcam->mcam_rules;
819         struct rvu_npc_mcam_rule *iter;
820
821         mutex_lock(&mcam->lock);
822         list_for_each_entry(iter, &mcam->mcam_rules, list) {
823                 if (iter->entry > rule->entry)
824                         break;
825                 head = &iter->list;
826         }
827
828         list_add(&rule->list, head);
829         mutex_unlock(&mcam->lock);
830 }
831
832 static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
833                                               struct rvu_npc_mcam_rule *rule)
834 {
835         struct npc_mcam_oper_counter_req free_req = { 0 };
836         struct msg_rsp free_rsp;
837
838         if (!rule->has_cntr)
839                 return;
840
841         free_req.hdr.pcifunc = pcifunc;
842         free_req.cntr = rule->cntr;
843
844         rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp);
845         rule->has_cntr = false;
846 }
847
848 static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
849                                          struct rvu_npc_mcam_rule *rule,
850                                          struct npc_install_flow_rsp *rsp)
851 {
852         struct npc_mcam_alloc_counter_req cntr_req = { 0 };
853         struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
854         int err;
855
856         cntr_req.hdr.pcifunc = pcifunc;
857         cntr_req.contig = true;
858         cntr_req.count = 1;
859
860         /* we try to allocate a counter to track the stats of this
861          * rule. If counter could not be allocated then proceed
862          * without counter because counters are limited than entries.
863          */
864         err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req,
865                                                       &cntr_rsp);
866         if (!err && cntr_rsp.count) {
867                 rule->cntr = cntr_rsp.cntr;
868                 rule->has_cntr = true;
869                 rsp->counter = rule->cntr;
870         } else {
871                 rsp->counter = err;
872         }
873 }
874
875 static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
876                                 struct mcam_entry *entry,
877                                 struct npc_install_flow_req *req, u16 target)
878 {
879         struct nix_rx_action action;
880
881         npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0,
882                          ~0ULL, 0, NIX_INTF_RX);
883
884         *(u64 *)&action = 0x00;
885         action.pf_func = target;
886         action.op = req->op;
887         action.index = req->index;
888         action.match_id = req->match_id;
889         action.flow_key_alg = req->flow_key_alg;
890
891         if (req->op == NIX_RX_ACTION_DEFAULT && pfvf->def_ucast_rule)
892                 action = pfvf->def_ucast_rule->rx_action;
893
894         entry->action = *(u64 *)&action;
895
896         /* VTAG0 starts at 0th byte of LID_B.
897          * VTAG1 starts at 4th byte of LID_B.
898          */
899         entry->vtag_action = FIELD_PREP(RX_VTAG0_VALID_BIT, req->vtag0_valid) |
900                              FIELD_PREP(RX_VTAG0_TYPE_MASK, req->vtag0_type) |
901                              FIELD_PREP(RX_VTAG0_LID_MASK, NPC_LID_LB) |
902                              FIELD_PREP(RX_VTAG0_RELPTR_MASK, 0) |
903                              FIELD_PREP(RX_VTAG1_VALID_BIT, req->vtag1_valid) |
904                              FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) |
905                              FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) |
906                              FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4);
907 }
908
909 static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
910                                 struct mcam_entry *entry,
911                                 struct npc_install_flow_req *req, u16 target)
912 {
913         struct nix_tx_action action;
914
915         npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target),
916                          0, ~0ULL, 0, NIX_INTF_TX);
917
918         *(u64 *)&action = 0x00;
919         action.op = req->op;
920         action.index = req->index;
921         action.match_id = req->match_id;
922
923         entry->action = *(u64 *)&action;
924
925         /* VTAG0 starts at 0th byte of LID_B.
926          * VTAG1 starts at 4th byte of LID_B.
927          */
928         entry->vtag_action = FIELD_PREP(TX_VTAG0_DEF_MASK, req->vtag0_def) |
929                              FIELD_PREP(TX_VTAG0_OP_MASK, req->vtag0_op) |
930                              FIELD_PREP(TX_VTAG0_LID_MASK, NPC_LID_LA) |
931                              FIELD_PREP(TX_VTAG0_RELPTR_MASK, 20) |
932                              FIELD_PREP(TX_VTAG1_DEF_MASK, req->vtag1_def) |
933                              FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) |
934                              FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) |
935                              FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24);
936 }
937
938 static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
939                             int nixlf, struct rvu_pfvf *pfvf,
940                             struct npc_install_flow_req *req,
941                             struct npc_install_flow_rsp *rsp, bool enable)
942 {
943         struct rvu_npc_mcam_rule *def_ucast_rule = pfvf->def_ucast_rule;
944         u64 features, installed_features, missing_features = 0;
945         struct npc_mcam_write_entry_req write_req = { 0 };
946         struct npc_mcam *mcam = &rvu->hw->mcam;
947         struct rvu_npc_mcam_rule dummy = { 0 };
948         struct rvu_npc_mcam_rule *rule;
949         bool new = false, msg_from_vf;
950         u16 owner = req->hdr.pcifunc;
951         struct msg_rsp write_rsp;
952         struct mcam_entry *entry;
953         int entry_index, err;
954
955         msg_from_vf = !!(owner & RVU_PFVF_FUNC_MASK);
956
957         installed_features = req->features;
958         features = req->features;
959         entry = &write_req.entry_data;
960         entry_index = req->entry;
961
962         npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
963                         req->intf);
964
965         if (is_npc_intf_rx(req->intf))
966                 npc_update_rx_entry(rvu, pfvf, entry, req, target);
967         else
968                 npc_update_tx_entry(rvu, pfvf, entry, req, target);
969
970         /* Default unicast rules do not exist for TX */
971         if (is_npc_intf_tx(req->intf))
972                 goto find_rule;
973
974         if (def_ucast_rule)
975                 missing_features = (def_ucast_rule->features ^ features) &
976                                         def_ucast_rule->features;
977
978         if (req->default_rule && req->append) {
979                 /* add to default rule */
980                 if (missing_features)
981                         npc_update_flow(rvu, entry, missing_features,
982                                         &def_ucast_rule->packet,
983                                         &def_ucast_rule->mask,
984                                         &dummy, req->intf);
985                 enable = rvu_npc_write_default_rule(rvu, blkaddr,
986                                                     nixlf, target,
987                                                     pfvf->nix_rx_intf, entry,
988                                                     &entry_index);
989                 installed_features = req->features | missing_features;
990         } else if (req->default_rule && !req->append) {
991                 /* overwrite default rule */
992                 enable = rvu_npc_write_default_rule(rvu, blkaddr,
993                                                     nixlf, target,
994                                                     pfvf->nix_rx_intf, entry,
995                                                     &entry_index);
996         } else if (msg_from_vf) {
997                 /* normal rule - include default rule also to it for VF */
998                 npc_update_flow(rvu, entry, missing_features,
999                                 &def_ucast_rule->packet, &def_ucast_rule->mask,
1000                                 &dummy, req->intf);
1001                 installed_features = req->features | missing_features;
1002         }
1003
1004 find_rule:
1005         rule = rvu_mcam_find_rule(mcam, entry_index);
1006         if (!rule) {
1007                 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1008                 if (!rule)
1009                         return -ENOMEM;
1010                 new = true;
1011         }
1012         /* no counter for default rule */
1013         if (req->default_rule)
1014                 goto update_rule;
1015
1016         /* allocate new counter if rule has no counter */
1017         if (req->set_cntr && !rule->has_cntr)
1018                 rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp);
1019
1020         /* if user wants to delete an existing counter for a rule then
1021          * free the counter
1022          */
1023         if (!req->set_cntr && rule->has_cntr)
1024                 rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
1025
1026         write_req.hdr.pcifunc = owner;
1027         write_req.entry = req->entry;
1028         write_req.intf = req->intf;
1029         write_req.enable_entry = (u8)enable;
1030         /* if counter is available then clear and use it */
1031         if (req->set_cntr && rule->has_cntr) {
1032                 rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00);
1033                 write_req.set_cntr = 1;
1034                 write_req.cntr = rule->cntr;
1035         }
1036
1037         err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
1038                                                     &write_rsp);
1039         if (err) {
1040                 rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
1041                 if (new)
1042                         kfree(rule);
1043                 return err;
1044         }
1045 update_rule:
1046         memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
1047         memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
1048         rule->entry = entry_index;
1049         memcpy(&rule->rx_action, &entry->action, sizeof(struct nix_rx_action));
1050         if (is_npc_intf_tx(req->intf))
1051                 memcpy(&rule->tx_action, &entry->action,
1052                        sizeof(struct nix_tx_action));
1053         rule->vtag_action = entry->vtag_action;
1054         rule->features = installed_features;
1055         rule->default_rule = req->default_rule;
1056         rule->owner = owner;
1057         rule->enable = enable;
1058         if (is_npc_intf_tx(req->intf))
1059                 rule->intf = pfvf->nix_tx_intf;
1060         else
1061                 rule->intf = pfvf->nix_rx_intf;
1062
1063         if (new)
1064                 rvu_mcam_add_rule(mcam, rule);
1065         if (req->default_rule)
1066                 pfvf->def_ucast_rule = rule;
1067
1068         return 0;
1069 }
1070
1071 int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
1072                                       struct npc_install_flow_req *req,
1073                                       struct npc_install_flow_rsp *rsp)
1074 {
1075         bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
1076         int blkaddr, nixlf, err;
1077         struct rvu_pfvf *pfvf;
1078         bool enable = true;
1079         u16 target;
1080
1081         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1082         if (blkaddr < 0) {
1083                 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
1084                 return -ENODEV;
1085         }
1086
1087         if (!is_npc_interface_valid(rvu, req->intf))
1088                 return -EINVAL;
1089
1090         if (from_vf && req->default_rule)
1091                 return NPC_MCAM_PERM_DENIED;
1092
1093         /* Each PF/VF info is maintained in struct rvu_pfvf.
1094          * rvu_pfvf for the target PF/VF needs to be retrieved
1095          * hence modify pcifunc accordingly.
1096          */
1097
1098         /* AF installing for a PF/VF */
1099         if (!req->hdr.pcifunc)
1100                 target = req->vf;
1101         /* PF installing for its VF */
1102         else if (!from_vf && req->vf)
1103                 target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
1104         /* msg received from PF/VF */
1105         else
1106                 target = req->hdr.pcifunc;
1107
1108         if (npc_check_unsupported_flows(rvu, req->features, req->intf))
1109                 return -EOPNOTSUPP;
1110
1111         if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
1112                 return -EINVAL;
1113
1114         pfvf = rvu_get_pfvf(rvu, target);
1115
1116         /* update req destination mac addr */
1117         if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) &&
1118             is_zero_ether_addr(req->packet.dmac)) {
1119                 ether_addr_copy(req->packet.dmac, pfvf->mac_addr);
1120                 eth_broadcast_addr((u8 *)&req->mask.dmac);
1121         }
1122
1123         err = nix_get_nixlf(rvu, target, &nixlf, NULL);
1124
1125         /* If interface is uninitialized then do not enable entry */
1126         if (err || (!req->default_rule && !pfvf->def_ucast_rule))
1127                 enable = false;
1128
1129         /* Packets reaching NPC in Tx path implies that a
1130          * NIXLF is properly setup and transmitting.
1131          * Hence rules can be enabled for Tx.
1132          */
1133         if (is_npc_intf_tx(req->intf))
1134                 enable = true;
1135
1136         /* Do not allow requests from uninitialized VFs */
1137         if (from_vf && !enable)
1138                 return -EINVAL;
1139
1140         /* If message is from VF then its flow should not overlap with
1141          * reserved unicast flow.
1142          */
1143         if (from_vf && pfvf->def_ucast_rule && is_npc_intf_rx(req->intf) &&
1144             pfvf->def_ucast_rule->features & req->features)
1145                 return -EINVAL;
1146
1147         return npc_install_flow(rvu, blkaddr, target, nixlf, pfvf, req, rsp,
1148                                 enable);
1149 }
1150
1151 static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule,
1152                            u16 pcifunc)
1153 {
1154         struct npc_mcam_ena_dis_entry_req dis_req = { 0 };
1155         struct msg_rsp dis_rsp;
1156
1157         if (rule->default_rule)
1158                 return 0;
1159
1160         if (rule->has_cntr)
1161                 rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule);
1162
1163         dis_req.hdr.pcifunc = pcifunc;
1164         dis_req.entry = rule->entry;
1165
1166         list_del(&rule->list);
1167         kfree(rule);
1168
1169         return rvu_mbox_handler_npc_mcam_dis_entry(rvu, &dis_req, &dis_rsp);
1170 }
1171
1172 int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
1173                                      struct npc_delete_flow_req *req,
1174                                      struct msg_rsp *rsp)
1175 {
1176         struct npc_mcam *mcam = &rvu->hw->mcam;
1177         struct rvu_npc_mcam_rule *iter, *tmp;
1178         u16 pcifunc = req->hdr.pcifunc;
1179         struct list_head del_list;
1180
1181         INIT_LIST_HEAD(&del_list);
1182
1183         mutex_lock(&mcam->lock);
1184         list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) {
1185                 if (iter->owner == pcifunc) {
1186                         /* All rules */
1187                         if (req->all) {
1188                                 list_move_tail(&iter->list, &del_list);
1189                         /* Range of rules */
1190                         } else if (req->end && iter->entry >= req->start &&
1191                                    iter->entry <= req->end) {
1192                                 list_move_tail(&iter->list, &del_list);
1193                         /* single rule */
1194                         } else if (req->entry == iter->entry) {
1195                                 list_move_tail(&iter->list, &del_list);
1196                                 break;
1197                         }
1198                 }
1199         }
1200         mutex_unlock(&mcam->lock);
1201
1202         list_for_each_entry_safe(iter, tmp, &del_list, list) {
1203                 /* clear the mcam entry target pcifunc */
1204                 mcam->entry2target_pffunc[iter->entry] = 0x0;
1205                 if (npc_delete_flow(rvu, iter, pcifunc))
1206                         dev_err(rvu->dev, "rule deletion failed for entry:%d",
1207                                 iter->entry);
1208         }
1209
1210         return 0;
1211 }
1212
1213 void npc_mcam_enable_flows(struct rvu *rvu, u16 target)
1214 {
1215         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, target);
1216         struct rvu_npc_mcam_rule *def_ucast_rule;
1217         struct npc_mcam *mcam = &rvu->hw->mcam;
1218         struct rvu_npc_mcam_rule *rule;
1219         int blkaddr, bank, index;
1220         u64 def_action;
1221
1222         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1223         if (blkaddr < 0)
1224                 return;
1225
1226         def_ucast_rule = pfvf->def_ucast_rule;
1227
1228         mutex_lock(&mcam->lock);
1229         list_for_each_entry(rule, &mcam->mcam_rules, list) {
1230                 if (is_npc_intf_rx(rule->intf) &&
1231                     rule->rx_action.pf_func == target && !rule->enable) {
1232                         if (rule->default_rule) {
1233                                 npc_enable_mcam_entry(rvu, mcam, blkaddr,
1234                                                       rule->entry, true);
1235                                 rule->enable = true;
1236                                 continue;
1237                         }
1238
1239                         if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) {
1240                                 if (!def_ucast_rule)
1241                                         continue;
1242                                 /* Use default unicast entry action */
1243                                 rule->rx_action = def_ucast_rule->rx_action;
1244                                 def_action = *(u64 *)&def_ucast_rule->rx_action;
1245                                 bank = npc_get_bank(mcam, rule->entry);
1246                                 rvu_write64(rvu, blkaddr,
1247                                             NPC_AF_MCAMEX_BANKX_ACTION
1248                                             (rule->entry, bank), def_action);
1249                         }
1250
1251                         npc_enable_mcam_entry(rvu, mcam, blkaddr,
1252                                               rule->entry, true);
1253                         rule->enable = true;
1254                 }
1255         }
1256
1257         /* Enable MCAM entries installed by PF with target as VF pcifunc */
1258         for (index = 0; index < mcam->bmap_entries; index++) {
1259                 if (mcam->entry2target_pffunc[index] == target)
1260                         npc_enable_mcam_entry(rvu, mcam, blkaddr,
1261                                               index, true);
1262         }
1263         mutex_unlock(&mcam->lock);
1264 }
1265
1266 void npc_mcam_disable_flows(struct rvu *rvu, u16 target)
1267 {
1268         struct npc_mcam *mcam = &rvu->hw->mcam;
1269         int blkaddr, index;
1270
1271         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1272         if (blkaddr < 0)
1273                 return;
1274
1275         mutex_lock(&mcam->lock);
1276         /* Disable MCAM entries installed by PF with target as VF pcifunc */
1277         for (index = 0; index < mcam->bmap_entries; index++) {
1278                 if (mcam->entry2target_pffunc[index] == target)
1279                         npc_enable_mcam_entry(rvu, mcam, blkaddr,
1280                                               index, false);
1281         }
1282         mutex_unlock(&mcam->lock);
1283 }