Merge 5.17-rc6 into char-misc-next
[linux-2.6-microblaze.git] / drivers / net / ethernet / intel / ice / ice_switch.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #include "ice_lib.h"
5 #include "ice_switch.h"
6
7 #define ICE_ETH_DA_OFFSET               0
8 #define ICE_ETH_ETHTYPE_OFFSET          12
9 #define ICE_ETH_VLAN_TCI_OFFSET         14
10 #define ICE_MAX_VLAN_ID                 0xFFF
11 #define ICE_IPV6_ETHER_ID               0x86DD
12
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14  * struct to configure any switch filter rules.
15  * {DA (6 bytes), SA(6 bytes),
16  * Ether type (2 bytes for header without VLAN tag) OR
17  * VLAN tag (4 bytes for header with VLAN tag) }
18  *
19  * Word on Hardcoded values
20  * byte 0 = 0x2: to identify it as locally administered DA MAC
21  * byte 6 = 0x2: to identify it as locally administered SA MAC
22  * byte 12 = 0x81 & byte 13 = 0x00:
23  *      In case of VLAN filter first two bytes defines ether type (0x8100)
24  *      and remaining two bytes are placeholder for programming a given VLAN ID
25  *      In case of Ether type filter it is treated as header without VLAN tag
26  *      and byte 12 and 13 is used to program a given Ether type instead
27  */
28 #define DUMMY_ETH_HDR_LEN               16
29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
30                                                         0x2, 0, 0, 0, 0, 0,
31                                                         0x81, 0, 0, 0};
32
33 struct ice_dummy_pkt_offsets {
34         enum ice_protocol_type type;
35         u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
36 };
37
38 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
39         { ICE_MAC_OFOS,         0 },
40         { ICE_ETYPE_OL,         12 },
41         { ICE_IPV4_OFOS,        14 },
42         { ICE_NVGRE,            34 },
43         { ICE_MAC_IL,           42 },
44         { ICE_IPV4_IL,          56 },
45         { ICE_TCP_IL,           76 },
46         { ICE_PROTOCOL_LAST,    0 },
47 };
48
49 static const u8 dummy_gre_tcp_packet[] = {
50         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
51         0x00, 0x00, 0x00, 0x00,
52         0x00, 0x00, 0x00, 0x00,
53
54         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
55
56         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
57         0x00, 0x00, 0x00, 0x00,
58         0x00, 0x2F, 0x00, 0x00,
59         0x00, 0x00, 0x00, 0x00,
60         0x00, 0x00, 0x00, 0x00,
61
62         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
63         0x00, 0x00, 0x00, 0x00,
64
65         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
66         0x00, 0x00, 0x00, 0x00,
67         0x00, 0x00, 0x00, 0x00,
68         0x08, 0x00,
69
70         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
71         0x00, 0x00, 0x00, 0x00,
72         0x00, 0x06, 0x00, 0x00,
73         0x00, 0x00, 0x00, 0x00,
74         0x00, 0x00, 0x00, 0x00,
75
76         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
77         0x00, 0x00, 0x00, 0x00,
78         0x00, 0x00, 0x00, 0x00,
79         0x50, 0x02, 0x20, 0x00,
80         0x00, 0x00, 0x00, 0x00
81 };
82
83 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
84         { ICE_MAC_OFOS,         0 },
85         { ICE_ETYPE_OL,         12 },
86         { ICE_IPV4_OFOS,        14 },
87         { ICE_NVGRE,            34 },
88         { ICE_MAC_IL,           42 },
89         { ICE_IPV4_IL,          56 },
90         { ICE_UDP_ILOS,         76 },
91         { ICE_PROTOCOL_LAST,    0 },
92 };
93
94 static const u8 dummy_gre_udp_packet[] = {
95         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
96         0x00, 0x00, 0x00, 0x00,
97         0x00, 0x00, 0x00, 0x00,
98
99         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
100
101         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
102         0x00, 0x00, 0x00, 0x00,
103         0x00, 0x2F, 0x00, 0x00,
104         0x00, 0x00, 0x00, 0x00,
105         0x00, 0x00, 0x00, 0x00,
106
107         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
108         0x00, 0x00, 0x00, 0x00,
109
110         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
111         0x00, 0x00, 0x00, 0x00,
112         0x00, 0x00, 0x00, 0x00,
113         0x08, 0x00,
114
115         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
116         0x00, 0x00, 0x00, 0x00,
117         0x00, 0x11, 0x00, 0x00,
118         0x00, 0x00, 0x00, 0x00,
119         0x00, 0x00, 0x00, 0x00,
120
121         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
122         0x00, 0x08, 0x00, 0x00,
123 };
124
125 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
126         { ICE_MAC_OFOS,         0 },
127         { ICE_ETYPE_OL,         12 },
128         { ICE_IPV4_OFOS,        14 },
129         { ICE_UDP_OF,           34 },
130         { ICE_VXLAN,            42 },
131         { ICE_GENEVE,           42 },
132         { ICE_VXLAN_GPE,        42 },
133         { ICE_MAC_IL,           50 },
134         { ICE_IPV4_IL,          64 },
135         { ICE_TCP_IL,           84 },
136         { ICE_PROTOCOL_LAST,    0 },
137 };
138
139 static const u8 dummy_udp_tun_tcp_packet[] = {
140         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
141         0x00, 0x00, 0x00, 0x00,
142         0x00, 0x00, 0x00, 0x00,
143
144         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
145
146         0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
147         0x00, 0x01, 0x00, 0x00,
148         0x40, 0x11, 0x00, 0x00,
149         0x00, 0x00, 0x00, 0x00,
150         0x00, 0x00, 0x00, 0x00,
151
152         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
153         0x00, 0x46, 0x00, 0x00,
154
155         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
156         0x00, 0x00, 0x00, 0x00,
157
158         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
159         0x00, 0x00, 0x00, 0x00,
160         0x00, 0x00, 0x00, 0x00,
161         0x08, 0x00,
162
163         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
164         0x00, 0x01, 0x00, 0x00,
165         0x40, 0x06, 0x00, 0x00,
166         0x00, 0x00, 0x00, 0x00,
167         0x00, 0x00, 0x00, 0x00,
168
169         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
170         0x00, 0x00, 0x00, 0x00,
171         0x00, 0x00, 0x00, 0x00,
172         0x50, 0x02, 0x20, 0x00,
173         0x00, 0x00, 0x00, 0x00
174 };
175
176 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
177         { ICE_MAC_OFOS,         0 },
178         { ICE_ETYPE_OL,         12 },
179         { ICE_IPV4_OFOS,        14 },
180         { ICE_UDP_OF,           34 },
181         { ICE_VXLAN,            42 },
182         { ICE_GENEVE,           42 },
183         { ICE_VXLAN_GPE,        42 },
184         { ICE_MAC_IL,           50 },
185         { ICE_IPV4_IL,          64 },
186         { ICE_UDP_ILOS,         84 },
187         { ICE_PROTOCOL_LAST,    0 },
188 };
189
190 static const u8 dummy_udp_tun_udp_packet[] = {
191         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
192         0x00, 0x00, 0x00, 0x00,
193         0x00, 0x00, 0x00, 0x00,
194
195         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
196
197         0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
198         0x00, 0x01, 0x00, 0x00,
199         0x00, 0x11, 0x00, 0x00,
200         0x00, 0x00, 0x00, 0x00,
201         0x00, 0x00, 0x00, 0x00,
202
203         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
204         0x00, 0x3a, 0x00, 0x00,
205
206         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
207         0x00, 0x00, 0x00, 0x00,
208
209         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
210         0x00, 0x00, 0x00, 0x00,
211         0x00, 0x00, 0x00, 0x00,
212         0x08, 0x00,
213
214         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
215         0x00, 0x01, 0x00, 0x00,
216         0x00, 0x11, 0x00, 0x00,
217         0x00, 0x00, 0x00, 0x00,
218         0x00, 0x00, 0x00, 0x00,
219
220         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
221         0x00, 0x08, 0x00, 0x00,
222 };
223
224 /* offset info for MAC + IPv4 + UDP dummy packet */
225 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
226         { ICE_MAC_OFOS,         0 },
227         { ICE_ETYPE_OL,         12 },
228         { ICE_IPV4_OFOS,        14 },
229         { ICE_UDP_ILOS,         34 },
230         { ICE_PROTOCOL_LAST,    0 },
231 };
232
233 /* Dummy packet for MAC + IPv4 + UDP */
234 static const u8 dummy_udp_packet[] = {
235         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
236         0x00, 0x00, 0x00, 0x00,
237         0x00, 0x00, 0x00, 0x00,
238
239         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
240
241         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
242         0x00, 0x01, 0x00, 0x00,
243         0x00, 0x11, 0x00, 0x00,
244         0x00, 0x00, 0x00, 0x00,
245         0x00, 0x00, 0x00, 0x00,
246
247         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
248         0x00, 0x08, 0x00, 0x00,
249
250         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
251 };
252
253 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
254 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
255         { ICE_MAC_OFOS,         0 },
256         { ICE_VLAN_OFOS,        12 },
257         { ICE_ETYPE_OL,         16 },
258         { ICE_IPV4_OFOS,        18 },
259         { ICE_UDP_ILOS,         38 },
260         { ICE_PROTOCOL_LAST,    0 },
261 };
262
263 /* C-tag (801.1Q), IPv4:UDP dummy packet */
264 static const u8 dummy_vlan_udp_packet[] = {
265         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
266         0x00, 0x00, 0x00, 0x00,
267         0x00, 0x00, 0x00, 0x00,
268
269         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
270
271         0x08, 0x00,             /* ICE_ETYPE_OL 16 */
272
273         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
274         0x00, 0x01, 0x00, 0x00,
275         0x00, 0x11, 0x00, 0x00,
276         0x00, 0x00, 0x00, 0x00,
277         0x00, 0x00, 0x00, 0x00,
278
279         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
280         0x00, 0x08, 0x00, 0x00,
281
282         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
283 };
284
285 /* offset info for MAC + IPv4 + TCP dummy packet */
286 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
287         { ICE_MAC_OFOS,         0 },
288         { ICE_ETYPE_OL,         12 },
289         { ICE_IPV4_OFOS,        14 },
290         { ICE_TCP_IL,           34 },
291         { ICE_PROTOCOL_LAST,    0 },
292 };
293
294 /* Dummy packet for MAC + IPv4 + TCP */
295 static const u8 dummy_tcp_packet[] = {
296         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
297         0x00, 0x00, 0x00, 0x00,
298         0x00, 0x00, 0x00, 0x00,
299
300         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
301
302         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
303         0x00, 0x01, 0x00, 0x00,
304         0x00, 0x06, 0x00, 0x00,
305         0x00, 0x00, 0x00, 0x00,
306         0x00, 0x00, 0x00, 0x00,
307
308         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
309         0x00, 0x00, 0x00, 0x00,
310         0x00, 0x00, 0x00, 0x00,
311         0x50, 0x00, 0x00, 0x00,
312         0x00, 0x00, 0x00, 0x00,
313
314         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
315 };
316
317 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
318 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
319         { ICE_MAC_OFOS,         0 },
320         { ICE_VLAN_OFOS,        12 },
321         { ICE_ETYPE_OL,         16 },
322         { ICE_IPV4_OFOS,        18 },
323         { ICE_TCP_IL,           38 },
324         { ICE_PROTOCOL_LAST,    0 },
325 };
326
327 /* C-tag (801.1Q), IPv4:TCP dummy packet */
328 static const u8 dummy_vlan_tcp_packet[] = {
329         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
330         0x00, 0x00, 0x00, 0x00,
331         0x00, 0x00, 0x00, 0x00,
332
333         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
334
335         0x08, 0x00,             /* ICE_ETYPE_OL 16 */
336
337         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
338         0x00, 0x01, 0x00, 0x00,
339         0x00, 0x06, 0x00, 0x00,
340         0x00, 0x00, 0x00, 0x00,
341         0x00, 0x00, 0x00, 0x00,
342
343         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
344         0x00, 0x00, 0x00, 0x00,
345         0x00, 0x00, 0x00, 0x00,
346         0x50, 0x00, 0x00, 0x00,
347         0x00, 0x00, 0x00, 0x00,
348
349         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
350 };
351
352 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
353         { ICE_MAC_OFOS,         0 },
354         { ICE_ETYPE_OL,         12 },
355         { ICE_IPV6_OFOS,        14 },
356         { ICE_TCP_IL,           54 },
357         { ICE_PROTOCOL_LAST,    0 },
358 };
359
360 static const u8 dummy_tcp_ipv6_packet[] = {
361         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
362         0x00, 0x00, 0x00, 0x00,
363         0x00, 0x00, 0x00, 0x00,
364
365         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
366
367         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
368         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
369         0x00, 0x00, 0x00, 0x00,
370         0x00, 0x00, 0x00, 0x00,
371         0x00, 0x00, 0x00, 0x00,
372         0x00, 0x00, 0x00, 0x00,
373         0x00, 0x00, 0x00, 0x00,
374         0x00, 0x00, 0x00, 0x00,
375         0x00, 0x00, 0x00, 0x00,
376         0x00, 0x00, 0x00, 0x00,
377
378         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
379         0x00, 0x00, 0x00, 0x00,
380         0x00, 0x00, 0x00, 0x00,
381         0x50, 0x00, 0x00, 0x00,
382         0x00, 0x00, 0x00, 0x00,
383
384         0x00, 0x00, /* 2 bytes for 4 byte alignment */
385 };
386
387 /* C-tag (802.1Q): IPv6 + TCP */
388 static const struct ice_dummy_pkt_offsets
389 dummy_vlan_tcp_ipv6_packet_offsets[] = {
390         { ICE_MAC_OFOS,         0 },
391         { ICE_VLAN_OFOS,        12 },
392         { ICE_ETYPE_OL,         16 },
393         { ICE_IPV6_OFOS,        18 },
394         { ICE_TCP_IL,           58 },
395         { ICE_PROTOCOL_LAST,    0 },
396 };
397
398 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
399 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
400         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
401         0x00, 0x00, 0x00, 0x00,
402         0x00, 0x00, 0x00, 0x00,
403
404         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
405
406         0x86, 0xDD,             /* ICE_ETYPE_OL 16 */
407
408         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
409         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
410         0x00, 0x00, 0x00, 0x00,
411         0x00, 0x00, 0x00, 0x00,
412         0x00, 0x00, 0x00, 0x00,
413         0x00, 0x00, 0x00, 0x00,
414         0x00, 0x00, 0x00, 0x00,
415         0x00, 0x00, 0x00, 0x00,
416         0x00, 0x00, 0x00, 0x00,
417         0x00, 0x00, 0x00, 0x00,
418
419         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
420         0x00, 0x00, 0x00, 0x00,
421         0x00, 0x00, 0x00, 0x00,
422         0x50, 0x00, 0x00, 0x00,
423         0x00, 0x00, 0x00, 0x00,
424
425         0x00, 0x00, /* 2 bytes for 4 byte alignment */
426 };
427
428 /* IPv6 + UDP */
429 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
430         { ICE_MAC_OFOS,         0 },
431         { ICE_ETYPE_OL,         12 },
432         { ICE_IPV6_OFOS,        14 },
433         { ICE_UDP_ILOS,         54 },
434         { ICE_PROTOCOL_LAST,    0 },
435 };
436
437 /* IPv6 + UDP dummy packet */
438 static const u8 dummy_udp_ipv6_packet[] = {
439         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
440         0x00, 0x00, 0x00, 0x00,
441         0x00, 0x00, 0x00, 0x00,
442
443         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
444
445         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
446         0x00, 0x10, 0x11, 0x00, /* Next header UDP */
447         0x00, 0x00, 0x00, 0x00,
448         0x00, 0x00, 0x00, 0x00,
449         0x00, 0x00, 0x00, 0x00,
450         0x00, 0x00, 0x00, 0x00,
451         0x00, 0x00, 0x00, 0x00,
452         0x00, 0x00, 0x00, 0x00,
453         0x00, 0x00, 0x00, 0x00,
454         0x00, 0x00, 0x00, 0x00,
455
456         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
457         0x00, 0x10, 0x00, 0x00,
458
459         0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
460         0x00, 0x00, 0x00, 0x00,
461
462         0x00, 0x00, /* 2 bytes for 4 byte alignment */
463 };
464
465 /* C-tag (802.1Q): IPv6 + UDP */
466 static const struct ice_dummy_pkt_offsets
467 dummy_vlan_udp_ipv6_packet_offsets[] = {
468         { ICE_MAC_OFOS,         0 },
469         { ICE_VLAN_OFOS,        12 },
470         { ICE_ETYPE_OL,         16 },
471         { ICE_IPV6_OFOS,        18 },
472         { ICE_UDP_ILOS,         58 },
473         { ICE_PROTOCOL_LAST,    0 },
474 };
475
476 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
477 static const u8 dummy_vlan_udp_ipv6_packet[] = {
478         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
479         0x00, 0x00, 0x00, 0x00,
480         0x00, 0x00, 0x00, 0x00,
481
482         0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
483
484         0x86, 0xDD,             /* ICE_ETYPE_OL 16 */
485
486         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
487         0x00, 0x08, 0x11, 0x00, /* Next header UDP */
488         0x00, 0x00, 0x00, 0x00,
489         0x00, 0x00, 0x00, 0x00,
490         0x00, 0x00, 0x00, 0x00,
491         0x00, 0x00, 0x00, 0x00,
492         0x00, 0x00, 0x00, 0x00,
493         0x00, 0x00, 0x00, 0x00,
494         0x00, 0x00, 0x00, 0x00,
495         0x00, 0x00, 0x00, 0x00,
496
497         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
498         0x00, 0x08, 0x00, 0x00,
499
500         0x00, 0x00, /* 2 bytes for 4 byte alignment */
501 };
502
503 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
504         (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
505          (DUMMY_ETH_HDR_LEN * \
506           sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
507 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
508         (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
509 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
510         (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
511          ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
512 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
513         (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
514          ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
515
516 /* this is a recipe to profile association bitmap */
517 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
518                           ICE_MAX_NUM_PROFILES);
519
520 /* this is a profile to recipe association bitmap */
521 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
522                           ICE_MAX_NUM_RECIPES);
523
524 /**
525  * ice_init_def_sw_recp - initialize the recipe book keeping tables
526  * @hw: pointer to the HW struct
527  *
528  * Allocate memory for the entire recipe table and initialize the structures/
529  * entries corresponding to basic recipes.
530  */
531 int ice_init_def_sw_recp(struct ice_hw *hw)
532 {
533         struct ice_sw_recipe *recps;
534         u8 i;
535
536         recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
537                              sizeof(*recps), GFP_KERNEL);
538         if (!recps)
539                 return -ENOMEM;
540
541         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
542                 recps[i].root_rid = i;
543                 INIT_LIST_HEAD(&recps[i].filt_rules);
544                 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
545                 INIT_LIST_HEAD(&recps[i].rg_list);
546                 mutex_init(&recps[i].filt_rule_lock);
547         }
548
549         hw->switch_info->recp_list = recps;
550
551         return 0;
552 }
553
554 /**
555  * ice_aq_get_sw_cfg - get switch configuration
556  * @hw: pointer to the hardware structure
557  * @buf: pointer to the result buffer
558  * @buf_size: length of the buffer available for response
559  * @req_desc: pointer to requested descriptor
560  * @num_elems: pointer to number of elements
561  * @cd: pointer to command details structure or NULL
562  *
563  * Get switch configuration (0x0200) to be placed in buf.
564  * This admin command returns information such as initial VSI/port number
565  * and switch ID it belongs to.
566  *
567  * NOTE: *req_desc is both an input/output parameter.
568  * The caller of this function first calls this function with *request_desc set
569  * to 0. If the response from f/w has *req_desc set to 0, all the switch
570  * configuration information has been returned; if non-zero (meaning not all
571  * the information was returned), the caller should call this function again
572  * with *req_desc set to the previous value returned by f/w to get the
573  * next block of switch configuration information.
574  *
575  * *num_elems is output only parameter. This reflects the number of elements
576  * in response buffer. The caller of this function to use *num_elems while
577  * parsing the response buffer.
578  */
579 static int
580 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
581                   u16 buf_size, u16 *req_desc, u16 *num_elems,
582                   struct ice_sq_cd *cd)
583 {
584         struct ice_aqc_get_sw_cfg *cmd;
585         struct ice_aq_desc desc;
586         int status;
587
588         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
589         cmd = &desc.params.get_sw_conf;
590         cmd->element = cpu_to_le16(*req_desc);
591
592         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
593         if (!status) {
594                 *req_desc = le16_to_cpu(cmd->element);
595                 *num_elems = le16_to_cpu(cmd->num_elems);
596         }
597
598         return status;
599 }
600
601 /**
602  * ice_aq_add_vsi
603  * @hw: pointer to the HW struct
604  * @vsi_ctx: pointer to a VSI context struct
605  * @cd: pointer to command details structure or NULL
606  *
607  * Add a VSI context to the hardware (0x0210)
608  */
609 static int
610 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
611                struct ice_sq_cd *cd)
612 {
613         struct ice_aqc_add_update_free_vsi_resp *res;
614         struct ice_aqc_add_get_update_free_vsi *cmd;
615         struct ice_aq_desc desc;
616         int status;
617
618         cmd = &desc.params.vsi_cmd;
619         res = &desc.params.add_update_free_vsi_res;
620
621         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
622
623         if (!vsi_ctx->alloc_from_pool)
624                 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
625                                            ICE_AQ_VSI_IS_VALID);
626         cmd->vf_id = vsi_ctx->vf_num;
627
628         cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
629
630         desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
631
632         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
633                                  sizeof(vsi_ctx->info), cd);
634
635         if (!status) {
636                 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
637                 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
638                 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
639         }
640
641         return status;
642 }
643
644 /**
645  * ice_aq_free_vsi
646  * @hw: pointer to the HW struct
647  * @vsi_ctx: pointer to a VSI context struct
648  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
649  * @cd: pointer to command details structure or NULL
650  *
651  * Free VSI context info from hardware (0x0213)
652  */
653 static int
654 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
655                 bool keep_vsi_alloc, struct ice_sq_cd *cd)
656 {
657         struct ice_aqc_add_update_free_vsi_resp *resp;
658         struct ice_aqc_add_get_update_free_vsi *cmd;
659         struct ice_aq_desc desc;
660         int status;
661
662         cmd = &desc.params.vsi_cmd;
663         resp = &desc.params.add_update_free_vsi_res;
664
665         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
666
667         cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
668         if (keep_vsi_alloc)
669                 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
670
671         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
672         if (!status) {
673                 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
674                 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
675         }
676
677         return status;
678 }
679
680 /**
681  * ice_aq_update_vsi
682  * @hw: pointer to the HW struct
683  * @vsi_ctx: pointer to a VSI context struct
684  * @cd: pointer to command details structure or NULL
685  *
686  * Update VSI context in the hardware (0x0211)
687  */
688 static int
689 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
690                   struct ice_sq_cd *cd)
691 {
692         struct ice_aqc_add_update_free_vsi_resp *resp;
693         struct ice_aqc_add_get_update_free_vsi *cmd;
694         struct ice_aq_desc desc;
695         int status;
696
697         cmd = &desc.params.vsi_cmd;
698         resp = &desc.params.add_update_free_vsi_res;
699
700         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
701
702         cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
703
704         desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
705
706         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
707                                  sizeof(vsi_ctx->info), cd);
708
709         if (!status) {
710                 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
711                 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
712         }
713
714         return status;
715 }
716
717 /**
718  * ice_is_vsi_valid - check whether the VSI is valid or not
719  * @hw: pointer to the HW struct
720  * @vsi_handle: VSI handle
721  *
722  * check whether the VSI is valid or not
723  */
724 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
725 {
726         return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
727 }
728
729 /**
730  * ice_get_hw_vsi_num - return the HW VSI number
731  * @hw: pointer to the HW struct
732  * @vsi_handle: VSI handle
733  *
734  * return the HW VSI number
735  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
736  */
737 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
738 {
739         return hw->vsi_ctx[vsi_handle]->vsi_num;
740 }
741
742 /**
743  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
744  * @hw: pointer to the HW struct
745  * @vsi_handle: VSI handle
746  *
747  * return the VSI context entry for a given VSI handle
748  */
749 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
750 {
751         return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
752 }
753
754 /**
755  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
756  * @hw: pointer to the HW struct
757  * @vsi_handle: VSI handle
758  * @vsi: VSI context pointer
759  *
760  * save the VSI context entry for a given VSI handle
761  */
762 static void
763 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
764 {
765         hw->vsi_ctx[vsi_handle] = vsi;
766 }
767
768 /**
769  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
770  * @hw: pointer to the HW struct
771  * @vsi_handle: VSI handle
772  */
773 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
774 {
775         struct ice_vsi_ctx *vsi;
776         u8 i;
777
778         vsi = ice_get_vsi_ctx(hw, vsi_handle);
779         if (!vsi)
780                 return;
781         ice_for_each_traffic_class(i) {
782                 if (vsi->lan_q_ctx[i]) {
783                         devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
784                         vsi->lan_q_ctx[i] = NULL;
785                 }
786                 if (vsi->rdma_q_ctx[i]) {
787                         devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
788                         vsi->rdma_q_ctx[i] = NULL;
789                 }
790         }
791 }
792
793 /**
794  * ice_clear_vsi_ctx - clear the VSI context entry
795  * @hw: pointer to the HW struct
796  * @vsi_handle: VSI handle
797  *
798  * clear the VSI context entry
799  */
800 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
801 {
802         struct ice_vsi_ctx *vsi;
803
804         vsi = ice_get_vsi_ctx(hw, vsi_handle);
805         if (vsi) {
806                 ice_clear_vsi_q_ctx(hw, vsi_handle);
807                 devm_kfree(ice_hw_to_dev(hw), vsi);
808                 hw->vsi_ctx[vsi_handle] = NULL;
809         }
810 }
811
812 /**
813  * ice_clear_all_vsi_ctx - clear all the VSI context entries
814  * @hw: pointer to the HW struct
815  */
816 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
817 {
818         u16 i;
819
820         for (i = 0; i < ICE_MAX_VSI; i++)
821                 ice_clear_vsi_ctx(hw, i);
822 }
823
824 /**
825  * ice_add_vsi - add VSI context to the hardware and VSI handle list
826  * @hw: pointer to the HW struct
827  * @vsi_handle: unique VSI handle provided by drivers
828  * @vsi_ctx: pointer to a VSI context struct
829  * @cd: pointer to command details structure or NULL
830  *
831  * Add a VSI context to the hardware also add it into the VSI handle list.
832  * If this function gets called after reset for existing VSIs then update
833  * with the new HW VSI number in the corresponding VSI handle list entry.
834  */
835 int
836 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
837             struct ice_sq_cd *cd)
838 {
839         struct ice_vsi_ctx *tmp_vsi_ctx;
840         int status;
841
842         if (vsi_handle >= ICE_MAX_VSI)
843                 return -EINVAL;
844         status = ice_aq_add_vsi(hw, vsi_ctx, cd);
845         if (status)
846                 return status;
847         tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
848         if (!tmp_vsi_ctx) {
849                 /* Create a new VSI context */
850                 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
851                                            sizeof(*tmp_vsi_ctx), GFP_KERNEL);
852                 if (!tmp_vsi_ctx) {
853                         ice_aq_free_vsi(hw, vsi_ctx, false, cd);
854                         return -ENOMEM;
855                 }
856                 *tmp_vsi_ctx = *vsi_ctx;
857                 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
858         } else {
859                 /* update with new HW VSI num */
860                 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
861         }
862
863         return 0;
864 }
865
866 /**
867  * ice_free_vsi- free VSI context from hardware and VSI handle list
868  * @hw: pointer to the HW struct
869  * @vsi_handle: unique VSI handle
870  * @vsi_ctx: pointer to a VSI context struct
871  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
872  * @cd: pointer to command details structure or NULL
873  *
874  * Free VSI context info from hardware as well as from VSI handle list
875  */
876 int
877 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
878              bool keep_vsi_alloc, struct ice_sq_cd *cd)
879 {
880         int status;
881
882         if (!ice_is_vsi_valid(hw, vsi_handle))
883                 return -EINVAL;
884         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
885         status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
886         if (!status)
887                 ice_clear_vsi_ctx(hw, vsi_handle);
888         return status;
889 }
890
891 /**
892  * ice_update_vsi
893  * @hw: pointer to the HW struct
894  * @vsi_handle: unique VSI handle
895  * @vsi_ctx: pointer to a VSI context struct
896  * @cd: pointer to command details structure or NULL
897  *
898  * Update VSI context in the hardware
899  */
900 int
901 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
902                struct ice_sq_cd *cd)
903 {
904         if (!ice_is_vsi_valid(hw, vsi_handle))
905                 return -EINVAL;
906         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
907         return ice_aq_update_vsi(hw, vsi_ctx, cd);
908 }
909
910 /**
911  * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
912  * @hw: pointer to HW struct
913  * @vsi_handle: VSI SW index
914  * @enable: boolean for enable/disable
915  */
916 int
917 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
918 {
919         struct ice_vsi_ctx *ctx;
920
921         ctx = ice_get_vsi_ctx(hw, vsi_handle);
922         if (!ctx)
923                 return -EIO;
924
925         if (enable)
926                 ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
927         else
928                 ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
929
930         return ice_update_vsi(hw, vsi_handle, ctx, NULL);
931 }
932
933 /**
934  * ice_aq_alloc_free_vsi_list
935  * @hw: pointer to the HW struct
936  * @vsi_list_id: VSI list ID returned or used for lookup
937  * @lkup_type: switch rule filter lookup type
938  * @opc: switch rules population command type - pass in the command opcode
939  *
940  * allocates or free a VSI list resource
941  */
942 static int
943 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
944                            enum ice_sw_lkup_type lkup_type,
945                            enum ice_adminq_opc opc)
946 {
947         struct ice_aqc_alloc_free_res_elem *sw_buf;
948         struct ice_aqc_res_elem *vsi_ele;
949         u16 buf_len;
950         int status;
951
952         buf_len = struct_size(sw_buf, elem, 1);
953         sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
954         if (!sw_buf)
955                 return -ENOMEM;
956         sw_buf->num_elems = cpu_to_le16(1);
957
958         if (lkup_type == ICE_SW_LKUP_MAC ||
959             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
960             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
961             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
962             lkup_type == ICE_SW_LKUP_PROMISC ||
963             lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
964                 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
965         } else if (lkup_type == ICE_SW_LKUP_VLAN) {
966                 sw_buf->res_type =
967                         cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
968         } else {
969                 status = -EINVAL;
970                 goto ice_aq_alloc_free_vsi_list_exit;
971         }
972
973         if (opc == ice_aqc_opc_free_res)
974                 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
975
976         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
977         if (status)
978                 goto ice_aq_alloc_free_vsi_list_exit;
979
980         if (opc == ice_aqc_opc_alloc_res) {
981                 vsi_ele = &sw_buf->elem[0];
982                 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
983         }
984
985 ice_aq_alloc_free_vsi_list_exit:
986         devm_kfree(ice_hw_to_dev(hw), sw_buf);
987         return status;
988 }
989
990 /**
991  * ice_aq_sw_rules - add/update/remove switch rules
992  * @hw: pointer to the HW struct
993  * @rule_list: pointer to switch rule population list
994  * @rule_list_sz: total size of the rule list in bytes
995  * @num_rules: number of switch rules in the rule_list
996  * @opc: switch rules population command type - pass in the command opcode
997  * @cd: pointer to command details structure or NULL
998  *
999  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1000  */
1001 int
1002 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1003                 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1004 {
1005         struct ice_aq_desc desc;
1006         int status;
1007
1008         if (opc != ice_aqc_opc_add_sw_rules &&
1009             opc != ice_aqc_opc_update_sw_rules &&
1010             opc != ice_aqc_opc_remove_sw_rules)
1011                 return -EINVAL;
1012
1013         ice_fill_dflt_direct_cmd_desc(&desc, opc);
1014
1015         desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1016         desc.params.sw_rules.num_rules_fltr_entry_index =
1017                 cpu_to_le16(num_rules);
1018         status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1019         if (opc != ice_aqc_opc_add_sw_rules &&
1020             hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1021                 status = -ENOENT;
1022
1023         return status;
1024 }
1025
1026 /**
1027  * ice_aq_add_recipe - add switch recipe
1028  * @hw: pointer to the HW struct
1029  * @s_recipe_list: pointer to switch rule population list
1030  * @num_recipes: number of switch recipes in the list
1031  * @cd: pointer to command details structure or NULL
1032  *
1033  * Add(0x0290)
1034  */
1035 static int
1036 ice_aq_add_recipe(struct ice_hw *hw,
1037                   struct ice_aqc_recipe_data_elem *s_recipe_list,
1038                   u16 num_recipes, struct ice_sq_cd *cd)
1039 {
1040         struct ice_aqc_add_get_recipe *cmd;
1041         struct ice_aq_desc desc;
1042         u16 buf_size;
1043
1044         cmd = &desc.params.add_get_recipe;
1045         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1046
1047         cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1048         desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1049
1050         buf_size = num_recipes * sizeof(*s_recipe_list);
1051
1052         return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1053 }
1054
1055 /**
1056  * ice_aq_get_recipe - get switch recipe
1057  * @hw: pointer to the HW struct
1058  * @s_recipe_list: pointer to switch rule population list
1059  * @num_recipes: pointer to the number of recipes (input and output)
1060  * @recipe_root: root recipe number of recipe(s) to retrieve
1061  * @cd: pointer to command details structure or NULL
1062  *
1063  * Get(0x0292)
1064  *
1065  * On input, *num_recipes should equal the number of entries in s_recipe_list.
1066  * On output, *num_recipes will equal the number of entries returned in
1067  * s_recipe_list.
1068  *
1069  * The caller must supply enough space in s_recipe_list to hold all possible
1070  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1071  */
1072 static int
1073 ice_aq_get_recipe(struct ice_hw *hw,
1074                   struct ice_aqc_recipe_data_elem *s_recipe_list,
1075                   u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1076 {
1077         struct ice_aqc_add_get_recipe *cmd;
1078         struct ice_aq_desc desc;
1079         u16 buf_size;
1080         int status;
1081
1082         if (*num_recipes != ICE_MAX_NUM_RECIPES)
1083                 return -EINVAL;
1084
1085         cmd = &desc.params.add_get_recipe;
1086         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1087
1088         cmd->return_index = cpu_to_le16(recipe_root);
1089         cmd->num_sub_recipes = 0;
1090
1091         buf_size = *num_recipes * sizeof(*s_recipe_list);
1092
1093         status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1094         *num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1095
1096         return status;
1097 }
1098
1099 /**
1100  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1101  * @hw: pointer to the HW struct
1102  * @profile_id: package profile ID to associate the recipe with
1103  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1104  * @cd: pointer to command details structure or NULL
1105  * Recipe to profile association (0x0291)
1106  */
1107 static int
1108 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1109                              struct ice_sq_cd *cd)
1110 {
1111         struct ice_aqc_recipe_to_profile *cmd;
1112         struct ice_aq_desc desc;
1113
1114         cmd = &desc.params.recipe_to_profile;
1115         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1116         cmd->profile_id = cpu_to_le16(profile_id);
1117         /* Set the recipe ID bit in the bitmask to let the device know which
1118          * profile we are associating the recipe to
1119          */
1120         memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
1121
1122         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1123 }
1124
1125 /**
1126  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1127  * @hw: pointer to the HW struct
1128  * @profile_id: package profile ID to associate the recipe with
1129  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1130  * @cd: pointer to command details structure or NULL
1131  * Associate profile ID with given recipe (0x0293)
1132  */
1133 static int
1134 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1135                              struct ice_sq_cd *cd)
1136 {
1137         struct ice_aqc_recipe_to_profile *cmd;
1138         struct ice_aq_desc desc;
1139         int status;
1140
1141         cmd = &desc.params.recipe_to_profile;
1142         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1143         cmd->profile_id = cpu_to_le16(profile_id);
1144
1145         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1146         if (!status)
1147                 memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
1148
1149         return status;
1150 }
1151
1152 /**
1153  * ice_alloc_recipe - add recipe resource
1154  * @hw: pointer to the hardware structure
1155  * @rid: recipe ID returned as response to AQ call
1156  */
1157 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1158 {
1159         struct ice_aqc_alloc_free_res_elem *sw_buf;
1160         u16 buf_len;
1161         int status;
1162
1163         buf_len = struct_size(sw_buf, elem, 1);
1164         sw_buf = kzalloc(buf_len, GFP_KERNEL);
1165         if (!sw_buf)
1166                 return -ENOMEM;
1167
1168         sw_buf->num_elems = cpu_to_le16(1);
1169         sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
1170                                         ICE_AQC_RES_TYPE_S) |
1171                                         ICE_AQC_RES_TYPE_FLAG_SHARED);
1172         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1173                                        ice_aqc_opc_alloc_res, NULL);
1174         if (!status)
1175                 *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
1176         kfree(sw_buf);
1177
1178         return status;
1179 }
1180
1181 /**
1182  * ice_get_recp_to_prof_map - updates recipe to profile mapping
1183  * @hw: pointer to hardware structure
1184  *
1185  * This function is used to populate recipe_to_profile matrix where index to
1186  * this array is the recipe ID and the element is the mapping of which profiles
1187  * is this recipe mapped to.
1188  */
1189 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1190 {
1191         DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
1192         u16 i;
1193
1194         for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
1195                 u16 j;
1196
1197                 bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1198                 bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
1199                 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1200                         continue;
1201                 bitmap_copy(profile_to_recipe[i], r_bitmap,
1202                             ICE_MAX_NUM_RECIPES);
1203                 for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
1204                         set_bit(i, recipe_to_profile[j]);
1205         }
1206 }
1207
1208 /**
1209  * ice_collect_result_idx - copy result index values
1210  * @buf: buffer that contains the result index
1211  * @recp: the recipe struct to copy data into
1212  */
1213 static void
1214 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1215                        struct ice_sw_recipe *recp)
1216 {
1217         if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1218                 set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
1219                         recp->res_idxs);
1220 }
1221
1222 /**
1223  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1224  * @hw: pointer to hardware structure
1225  * @recps: struct that we need to populate
1226  * @rid: recipe ID that we are populating
1227  * @refresh_required: true if we should get recipe to profile mapping from FW
1228  *
1229  * This function is used to populate all the necessary entries into our
1230  * bookkeeping so that we have a current list of all the recipes that are
1231  * programmed in the firmware.
1232  */
1233 static int
1234 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1235                     bool *refresh_required)
1236 {
1237         DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
1238         struct ice_aqc_recipe_data_elem *tmp;
1239         u16 num_recps = ICE_MAX_NUM_RECIPES;
1240         struct ice_prot_lkup_ext *lkup_exts;
1241         u8 fv_word_idx = 0;
1242         u16 sub_recps;
1243         int status;
1244
1245         bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
1246
1247         /* we need a buffer big enough to accommodate all the recipes */
1248         tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
1249         if (!tmp)
1250                 return -ENOMEM;
1251
1252         tmp[0].recipe_indx = rid;
1253         status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1254         /* non-zero status meaning recipe doesn't exist */
1255         if (status)
1256                 goto err_unroll;
1257
1258         /* Get recipe to profile map so that we can get the fv from lkups that
1259          * we read for a recipe from FW. Since we want to minimize the number of
1260          * times we make this FW call, just make one call and cache the copy
1261          * until a new recipe is added. This operation is only required the
1262          * first time to get the changes from FW. Then to search existing
1263          * entries we don't need to update the cache again until another recipe
1264          * gets added.
1265          */
1266         if (*refresh_required) {
1267                 ice_get_recp_to_prof_map(hw);
1268                 *refresh_required = false;
1269         }
1270
1271         /* Start populating all the entries for recps[rid] based on lkups from
1272          * firmware. Note that we are only creating the root recipe in our
1273          * database.
1274          */
1275         lkup_exts = &recps[rid].lkup_exts;
1276
1277         for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1278                 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1279                 struct ice_recp_grp_entry *rg_entry;
1280                 u8 i, prof, idx, prot = 0;
1281                 bool is_root;
1282                 u16 off = 0;
1283
1284                 rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
1285                                         GFP_KERNEL);
1286                 if (!rg_entry) {
1287                         status = -ENOMEM;
1288                         goto err_unroll;
1289                 }
1290
1291                 idx = root_bufs.recipe_indx;
1292                 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1293
1294                 /* Mark all result indices in this chain */
1295                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1296                         set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
1297                                 result_bm);
1298
1299                 /* get the first profile that is associated with rid */
1300                 prof = find_first_bit(recipe_to_profile[idx],
1301                                       ICE_MAX_NUM_PROFILES);
1302                 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1303                         u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1304
1305                         rg_entry->fv_idx[i] = lkup_indx;
1306                         rg_entry->fv_mask[i] =
1307                                 le16_to_cpu(root_bufs.content.mask[i + 1]);
1308
1309                         /* If the recipe is a chained recipe then all its
1310                          * child recipe's result will have a result index.
1311                          * To fill fv_words we should not use those result
1312                          * index, we only need the protocol ids and offsets.
1313                          * We will skip all the fv_idx which stores result
1314                          * index in them. We also need to skip any fv_idx which
1315                          * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1316                          * valid offset value.
1317                          */
1318                         if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
1319                             rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1320                             rg_entry->fv_idx[i] == 0)
1321                                 continue;
1322
1323                         ice_find_prot_off(hw, ICE_BLK_SW, prof,
1324                                           rg_entry->fv_idx[i], &prot, &off);
1325                         lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1326                         lkup_exts->fv_words[fv_word_idx].off = off;
1327                         lkup_exts->field_mask[fv_word_idx] =
1328                                 rg_entry->fv_mask[i];
1329                         fv_word_idx++;
1330                 }
1331                 /* populate rg_list with the data from the child entry of this
1332                  * recipe
1333                  */
1334                 list_add(&rg_entry->l_entry, &recps[rid].rg_list);
1335
1336                 /* Propagate some data to the recipe database */
1337                 recps[idx].is_root = !!is_root;
1338                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1339                 bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1340                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1341                         recps[idx].chain_idx = root_bufs.content.result_indx &
1342                                 ~ICE_AQ_RECIPE_RESULT_EN;
1343                         set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1344                 } else {
1345                         recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1346                 }
1347
1348                 if (!is_root)
1349                         continue;
1350
1351                 /* Only do the following for root recipes entries */
1352                 memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1353                        sizeof(recps[idx].r_bitmap));
1354                 recps[idx].root_rid = root_bufs.content.rid &
1355                         ~ICE_AQ_RECIPE_ID_IS_ROOT;
1356                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1357         }
1358
1359         /* Complete initialization of the root recipe entry */
1360         lkup_exts->n_val_words = fv_word_idx;
1361         recps[rid].big_recp = (num_recps > 1);
1362         recps[rid].n_grp_count = (u8)num_recps;
1363         recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
1364                                            recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
1365                                            GFP_KERNEL);
1366         if (!recps[rid].root_buf) {
1367                 status = -ENOMEM;
1368                 goto err_unroll;
1369         }
1370
1371         /* Copy result indexes */
1372         bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1373         recps[rid].recp_created = true;
1374
1375 err_unroll:
1376         kfree(tmp);
1377         return status;
1378 }
1379
1380 /* ice_init_port_info - Initialize port_info with switch configuration data
1381  * @pi: pointer to port_info
1382  * @vsi_port_num: VSI number or port number
1383  * @type: Type of switch element (port or VSI)
1384  * @swid: switch ID of the switch the element is attached to
1385  * @pf_vf_num: PF or VF number
1386  * @is_vf: true if the element is a VF, false otherwise
1387  */
1388 static void
1389 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1390                    u16 swid, u16 pf_vf_num, bool is_vf)
1391 {
1392         switch (type) {
1393         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1394                 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1395                 pi->sw_id = swid;
1396                 pi->pf_vf_num = pf_vf_num;
1397                 pi->is_vf = is_vf;
1398                 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1399                 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1400                 break;
1401         default:
1402                 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
1403                 break;
1404         }
1405 }
1406
1407 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1408  * @hw: pointer to the hardware structure
1409  */
1410 int ice_get_initial_sw_cfg(struct ice_hw *hw)
1411 {
1412         struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
1413         u16 req_desc = 0;
1414         u16 num_elems;
1415         int status;
1416         u16 i;
1417
1418         rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
1419                             GFP_KERNEL);
1420
1421         if (!rbuf)
1422                 return -ENOMEM;
1423
1424         /* Multiple calls to ice_aq_get_sw_cfg may be required
1425          * to get all the switch configuration information. The need
1426          * for additional calls is indicated by ice_aq_get_sw_cfg
1427          * writing a non-zero value in req_desc
1428          */
1429         do {
1430                 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1431
1432                 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1433                                            &req_desc, &num_elems, NULL);
1434
1435                 if (status)
1436                         break;
1437
1438                 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
1439                         u16 pf_vf_num, swid, vsi_port_num;
1440                         bool is_vf = false;
1441                         u8 res_type;
1442
1443                         vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
1444                                 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1445
1446                         pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
1447                                 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1448
1449                         swid = le16_to_cpu(ele->swid);
1450
1451                         if (le16_to_cpu(ele->pf_vf_num) &
1452                             ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1453                                 is_vf = true;
1454
1455                         res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
1456                                         ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
1457
1458                         if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
1459                                 /* FW VSI is not needed. Just continue. */
1460                                 continue;
1461                         }
1462
1463                         ice_init_port_info(hw->port_info, vsi_port_num,
1464                                            res_type, swid, pf_vf_num, is_vf);
1465                 }
1466         } while (req_desc && !status);
1467
1468         devm_kfree(ice_hw_to_dev(hw), rbuf);
1469         return status;
1470 }
1471
1472 /**
1473  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1474  * @hw: pointer to the hardware structure
1475  * @fi: filter info structure to fill/update
1476  *
1477  * This helper function populates the lb_en and lan_en elements of the provided
1478  * ice_fltr_info struct using the switch's type and characteristics of the
1479  * switch rule being configured.
1480  */
1481 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1482 {
1483         fi->lb_en = false;
1484         fi->lan_en = false;
1485         if ((fi->flag & ICE_FLTR_TX) &&
1486             (fi->fltr_act == ICE_FWD_TO_VSI ||
1487              fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1488              fi->fltr_act == ICE_FWD_TO_Q ||
1489              fi->fltr_act == ICE_FWD_TO_QGRP)) {
1490                 /* Setting LB for prune actions will result in replicated
1491                  * packets to the internal switch that will be dropped.
1492                  */
1493                 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1494                         fi->lb_en = true;
1495
1496                 /* Set lan_en to TRUE if
1497                  * 1. The switch is a VEB AND
1498                  * 2
1499                  * 2.1 The lookup is a directional lookup like ethertype,
1500                  * promiscuous, ethertype-MAC, promiscuous-VLAN
1501                  * and default-port OR
1502                  * 2.2 The lookup is VLAN, OR
1503                  * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1504                  * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1505                  *
1506                  * OR
1507                  *
1508                  * The switch is a VEPA.
1509                  *
1510                  * In all other cases, the LAN enable has to be set to false.
1511                  */
1512                 if (hw->evb_veb) {
1513                         if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1514                             fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1515                             fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1516                             fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1517                             fi->lkup_type == ICE_SW_LKUP_DFLT ||
1518                             fi->lkup_type == ICE_SW_LKUP_VLAN ||
1519                             (fi->lkup_type == ICE_SW_LKUP_MAC &&
1520                              !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
1521                             (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1522                              !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
1523                                 fi->lan_en = true;
1524                 } else {
1525                         fi->lan_en = true;
1526                 }
1527         }
1528 }
1529
1530 /**
1531  * ice_fill_sw_rule - Helper function to fill switch rule structure
1532  * @hw: pointer to the hardware structure
1533  * @f_info: entry containing packet forwarding information
1534  * @s_rule: switch rule structure to be filled in based on mac_entry
1535  * @opc: switch rules population command type - pass in the command opcode
1536  */
1537 static void
1538 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1539                  struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1540 {
1541         u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1542         void *daddr = NULL;
1543         u16 eth_hdr_sz;
1544         u8 *eth_hdr;
1545         u32 act = 0;
1546         __be16 *off;
1547         u8 q_rgn;
1548
1549         if (opc == ice_aqc_opc_remove_sw_rules) {
1550                 s_rule->pdata.lkup_tx_rx.act = 0;
1551                 s_rule->pdata.lkup_tx_rx.index =
1552                         cpu_to_le16(f_info->fltr_rule_id);
1553                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1554                 return;
1555         }
1556
1557         eth_hdr_sz = sizeof(dummy_eth_header);
1558         eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1559
1560         /* initialize the ether header with a dummy header */
1561         memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
1562         ice_fill_sw_info(hw, f_info);
1563
1564         switch (f_info->fltr_act) {
1565         case ICE_FWD_TO_VSI:
1566                 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1567                         ICE_SINGLE_ACT_VSI_ID_M;
1568                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1569                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1570                                 ICE_SINGLE_ACT_VALID_BIT;
1571                 break;
1572         case ICE_FWD_TO_VSI_LIST:
1573                 act |= ICE_SINGLE_ACT_VSI_LIST;
1574                 act |= (f_info->fwd_id.vsi_list_id <<
1575                         ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1576                         ICE_SINGLE_ACT_VSI_LIST_ID_M;
1577                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1578                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1579                                 ICE_SINGLE_ACT_VALID_BIT;
1580                 break;
1581         case ICE_FWD_TO_Q:
1582                 act |= ICE_SINGLE_ACT_TO_Q;
1583                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1584                         ICE_SINGLE_ACT_Q_INDEX_M;
1585                 break;
1586         case ICE_DROP_PACKET:
1587                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1588                         ICE_SINGLE_ACT_VALID_BIT;
1589                 break;
1590         case ICE_FWD_TO_QGRP:
1591                 q_rgn = f_info->qgrp_size > 0 ?
1592                         (u8)ilog2(f_info->qgrp_size) : 0;
1593                 act |= ICE_SINGLE_ACT_TO_Q;
1594                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1595                         ICE_SINGLE_ACT_Q_INDEX_M;
1596                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1597                         ICE_SINGLE_ACT_Q_REGION_M;
1598                 break;
1599         default:
1600                 return;
1601         }
1602
1603         if (f_info->lb_en)
1604                 act |= ICE_SINGLE_ACT_LB_ENABLE;
1605         if (f_info->lan_en)
1606                 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1607
1608         switch (f_info->lkup_type) {
1609         case ICE_SW_LKUP_MAC:
1610                 daddr = f_info->l_data.mac.mac_addr;
1611                 break;
1612         case ICE_SW_LKUP_VLAN:
1613                 vlan_id = f_info->l_data.vlan.vlan_id;
1614                 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1615                     f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1616                         act |= ICE_SINGLE_ACT_PRUNE;
1617                         act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1618                 }
1619                 break;
1620         case ICE_SW_LKUP_ETHERTYPE_MAC:
1621                 daddr = f_info->l_data.ethertype_mac.mac_addr;
1622                 fallthrough;
1623         case ICE_SW_LKUP_ETHERTYPE:
1624                 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1625                 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
1626                 break;
1627         case ICE_SW_LKUP_MAC_VLAN:
1628                 daddr = f_info->l_data.mac_vlan.mac_addr;
1629                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1630                 break;
1631         case ICE_SW_LKUP_PROMISC_VLAN:
1632                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1633                 fallthrough;
1634         case ICE_SW_LKUP_PROMISC:
1635                 daddr = f_info->l_data.mac_vlan.mac_addr;
1636                 break;
1637         default:
1638                 break;
1639         }
1640
1641         s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1642                 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1643                 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
1644
1645         /* Recipe set depending on lookup type */
1646         s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
1647         s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
1648         s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
1649
1650         if (daddr)
1651                 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
1652
1653         if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1654                 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1655                 *off = cpu_to_be16(vlan_id);
1656         }
1657
1658         /* Create the switch rule with the final dummy Ethernet header */
1659         if (opc != ice_aqc_opc_update_sw_rules)
1660                 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
1661 }
1662
1663 /**
1664  * ice_add_marker_act
1665  * @hw: pointer to the hardware structure
1666  * @m_ent: the management entry for which sw marker needs to be added
1667  * @sw_marker: sw marker to tag the Rx descriptor with
1668  * @l_id: large action resource ID
1669  *
1670  * Create a large action to hold software marker and update the switch rule
1671  * entry pointed by m_ent with newly created large action
1672  */
1673 static int
1674 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1675                    u16 sw_marker, u16 l_id)
1676 {
1677         struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1678         /* For software marker we need 3 large actions
1679          * 1. FWD action: FWD TO VSI or VSI LIST
1680          * 2. GENERIC VALUE action to hold the profile ID
1681          * 3. GENERIC VALUE action to hold the software marker ID
1682          */
1683         const u16 num_lg_acts = 3;
1684         u16 lg_act_size;
1685         u16 rules_size;
1686         int status;
1687         u32 act;
1688         u16 id;
1689
1690         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1691                 return -EINVAL;
1692
1693         /* Create two back-to-back switch rules and submit them to the HW using
1694          * one memory buffer:
1695          *    1. Large Action
1696          *    2. Look up Tx Rx
1697          */
1698         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
1699         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1700         lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
1701         if (!lg_act)
1702                 return -ENOMEM;
1703
1704         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
1705
1706         /* Fill in the first switch rule i.e. large action */
1707         lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
1708         lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
1709         lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
1710
1711         /* First action VSI forwarding or VSI list forwarding depending on how
1712          * many VSIs
1713          */
1714         id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1715                 m_ent->fltr_info.fwd_id.hw_vsi_id;
1716
1717         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1718         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
1719         if (m_ent->vsi_count > 1)
1720                 act |= ICE_LG_ACT_VSI_LIST;
1721         lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
1722
1723         /* Second action descriptor type */
1724         act = ICE_LG_ACT_GENERIC;
1725
1726         act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
1727         lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
1728
1729         act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
1730                ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
1731
1732         /* Third action Marker value */
1733         act |= ICE_LG_ACT_GENERIC;
1734         act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
1735                 ICE_LG_ACT_GENERIC_VALUE_M;
1736
1737         lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
1738
1739         /* call the fill switch rule to fill the lookup Tx Rx structure */
1740         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1741                          ice_aqc_opc_update_sw_rules);
1742
1743         /* Update the action to point to the large action ID */
1744         rx_tx->pdata.lkup_tx_rx.act =
1745                 cpu_to_le32(ICE_SINGLE_ACT_PTR |
1746                             ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
1747                              ICE_SINGLE_ACT_PTR_VAL_M));
1748
1749         /* Use the filter rule ID of the previously created rule with single
1750          * act. Once the update happens, hardware will treat this as large
1751          * action
1752          */
1753         rx_tx->pdata.lkup_tx_rx.index =
1754                 cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
1755
1756         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1757                                  ice_aqc_opc_update_sw_rules, NULL);
1758         if (!status) {
1759                 m_ent->lg_act_idx = l_id;
1760                 m_ent->sw_marker_id = sw_marker;
1761         }
1762
1763         devm_kfree(ice_hw_to_dev(hw), lg_act);
1764         return status;
1765 }
1766
1767 /**
1768  * ice_create_vsi_list_map
1769  * @hw: pointer to the hardware structure
1770  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
1771  * @num_vsi: number of VSI handles in the array
1772  * @vsi_list_id: VSI list ID generated as part of allocate resource
1773  *
1774  * Helper function to create a new entry of VSI list ID to VSI mapping
1775  * using the given VSI list ID
1776  */
1777 static struct ice_vsi_list_map_info *
1778 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1779                         u16 vsi_list_id)
1780 {
1781         struct ice_switch_info *sw = hw->switch_info;
1782         struct ice_vsi_list_map_info *v_map;
1783         int i;
1784
1785         v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
1786         if (!v_map)
1787                 return NULL;
1788
1789         v_map->vsi_list_id = vsi_list_id;
1790         v_map->ref_cnt = 1;
1791         for (i = 0; i < num_vsi; i++)
1792                 set_bit(vsi_handle_arr[i], v_map->vsi_map);
1793
1794         list_add(&v_map->list_entry, &sw->vsi_list_map_head);
1795         return v_map;
1796 }
1797
1798 /**
1799  * ice_update_vsi_list_rule
1800  * @hw: pointer to the hardware structure
1801  * @vsi_handle_arr: array of VSI handles to form a VSI list
1802  * @num_vsi: number of VSI handles in the array
1803  * @vsi_list_id: VSI list ID generated as part of allocate resource
1804  * @remove: Boolean value to indicate if this is a remove action
1805  * @opc: switch rules population command type - pass in the command opcode
1806  * @lkup_type: lookup type of the filter
1807  *
1808  * Call AQ command to add a new switch rule or update existing switch rule
1809  * using the given VSI list ID
1810  */
1811 static int
1812 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1813                          u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
1814                          enum ice_sw_lkup_type lkup_type)
1815 {
1816         struct ice_aqc_sw_rules_elem *s_rule;
1817         u16 s_rule_size;
1818         u16 rule_type;
1819         int status;
1820         int i;
1821
1822         if (!num_vsi)
1823                 return -EINVAL;
1824
1825         if (lkup_type == ICE_SW_LKUP_MAC ||
1826             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1827             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1828             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1829             lkup_type == ICE_SW_LKUP_PROMISC ||
1830             lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
1831                 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
1832                         ICE_AQC_SW_RULES_T_VSI_LIST_SET;
1833         else if (lkup_type == ICE_SW_LKUP_VLAN)
1834                 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
1835                         ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
1836         else
1837                 return -EINVAL;
1838
1839         s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
1840         s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
1841         if (!s_rule)
1842                 return -ENOMEM;
1843         for (i = 0; i < num_vsi; i++) {
1844                 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
1845                         status = -EINVAL;
1846                         goto exit;
1847                 }
1848                 /* AQ call requires hw_vsi_id(s) */
1849                 s_rule->pdata.vsi_list.vsi[i] =
1850                         cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
1851         }
1852
1853         s_rule->type = cpu_to_le16(rule_type);
1854         s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
1855         s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
1856
1857         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
1858
1859 exit:
1860         devm_kfree(ice_hw_to_dev(hw), s_rule);
1861         return status;
1862 }
1863
1864 /**
1865  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
1866  * @hw: pointer to the HW struct
1867  * @vsi_handle_arr: array of VSI handles to form a VSI list
1868  * @num_vsi: number of VSI handles in the array
1869  * @vsi_list_id: stores the ID of the VSI list to be created
1870  * @lkup_type: switch rule filter's lookup type
1871  */
1872 static int
1873 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1874                          u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
1875 {
1876         int status;
1877
1878         status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
1879                                             ice_aqc_opc_alloc_res);
1880         if (status)
1881                 return status;
1882
1883         /* Update the newly created VSI list to include the specified VSIs */
1884         return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
1885                                         *vsi_list_id, false,
1886                                         ice_aqc_opc_add_sw_rules, lkup_type);
1887 }
1888
1889 /**
1890  * ice_create_pkt_fwd_rule
1891  * @hw: pointer to the hardware structure
1892  * @f_entry: entry containing packet forwarding information
1893  *
1894  * Create switch rule with given filter information and add an entry
1895  * to the corresponding filter management list to track this switch rule
1896  * and VSI mapping
1897  */
1898 static int
1899 ice_create_pkt_fwd_rule(struct ice_hw *hw,
1900                         struct ice_fltr_list_entry *f_entry)
1901 {
1902         struct ice_fltr_mgmt_list_entry *fm_entry;
1903         struct ice_aqc_sw_rules_elem *s_rule;
1904         enum ice_sw_lkup_type l_type;
1905         struct ice_sw_recipe *recp;
1906         int status;
1907
1908         s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1909                               ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
1910         if (!s_rule)
1911                 return -ENOMEM;
1912         fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
1913                                 GFP_KERNEL);
1914         if (!fm_entry) {
1915                 status = -ENOMEM;
1916                 goto ice_create_pkt_fwd_rule_exit;
1917         }
1918
1919         fm_entry->fltr_info = f_entry->fltr_info;
1920
1921         /* Initialize all the fields for the management entry */
1922         fm_entry->vsi_count = 1;
1923         fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
1924         fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
1925         fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
1926
1927         ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
1928                          ice_aqc_opc_add_sw_rules);
1929
1930         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1931                                  ice_aqc_opc_add_sw_rules, NULL);
1932         if (status) {
1933                 devm_kfree(ice_hw_to_dev(hw), fm_entry);
1934                 goto ice_create_pkt_fwd_rule_exit;
1935         }
1936
1937         f_entry->fltr_info.fltr_rule_id =
1938                 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1939         fm_entry->fltr_info.fltr_rule_id =
1940                 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1941
1942         /* The book keeping entries will get removed when base driver
1943          * calls remove filter AQ command
1944          */
1945         l_type = fm_entry->fltr_info.lkup_type;
1946         recp = &hw->switch_info->recp_list[l_type];
1947         list_add(&fm_entry->list_entry, &recp->filt_rules);
1948
1949 ice_create_pkt_fwd_rule_exit:
1950         devm_kfree(ice_hw_to_dev(hw), s_rule);
1951         return status;
1952 }
1953
1954 /**
1955  * ice_update_pkt_fwd_rule
1956  * @hw: pointer to the hardware structure
1957  * @f_info: filter information for switch rule
1958  *
1959  * Call AQ command to update a previously created switch rule with a
1960  * VSI list ID
1961  */
1962 static int
1963 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
1964 {
1965         struct ice_aqc_sw_rules_elem *s_rule;
1966         int status;
1967
1968         s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1969                               ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
1970         if (!s_rule)
1971                 return -ENOMEM;
1972
1973         ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
1974
1975         s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
1976
1977         /* Update switch rule with new rule set to forward VSI list */
1978         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1979                                  ice_aqc_opc_update_sw_rules, NULL);
1980
1981         devm_kfree(ice_hw_to_dev(hw), s_rule);
1982         return status;
1983 }
1984
1985 /**
1986  * ice_update_sw_rule_bridge_mode
1987  * @hw: pointer to the HW struct
1988  *
1989  * Updates unicast switch filter rules based on VEB/VEPA mode
1990  */
1991 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
1992 {
1993         struct ice_switch_info *sw = hw->switch_info;
1994         struct ice_fltr_mgmt_list_entry *fm_entry;
1995         struct list_head *rule_head;
1996         struct mutex *rule_lock; /* Lock to protect filter rule list */
1997         int status = 0;
1998
1999         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2000         rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2001
2002         mutex_lock(rule_lock);
2003         list_for_each_entry(fm_entry, rule_head, list_entry) {
2004                 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2005                 u8 *addr = fi->l_data.mac.mac_addr;
2006
2007                 /* Update unicast Tx rules to reflect the selected
2008                  * VEB/VEPA mode
2009                  */
2010                 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2011                     (fi->fltr_act == ICE_FWD_TO_VSI ||
2012                      fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2013                      fi->fltr_act == ICE_FWD_TO_Q ||
2014                      fi->fltr_act == ICE_FWD_TO_QGRP)) {
2015                         status = ice_update_pkt_fwd_rule(hw, fi);
2016                         if (status)
2017                                 break;
2018                 }
2019         }
2020
2021         mutex_unlock(rule_lock);
2022
2023         return status;
2024 }
2025
2026 /**
2027  * ice_add_update_vsi_list
2028  * @hw: pointer to the hardware structure
2029  * @m_entry: pointer to current filter management list entry
2030  * @cur_fltr: filter information from the book keeping entry
2031  * @new_fltr: filter information with the new VSI to be added
2032  *
2033  * Call AQ command to add or update previously created VSI list with new VSI.
2034  *
2035  * Helper function to do book keeping associated with adding filter information
2036  * The algorithm to do the book keeping is described below :
2037  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2038  *      if only one VSI has been added till now
2039  *              Allocate a new VSI list and add two VSIs
2040  *              to this list using switch rule command
2041  *              Update the previously created switch rule with the
2042  *              newly created VSI list ID
2043  *      if a VSI list was previously created
2044  *              Add the new VSI to the previously created VSI list set
2045  *              using the update switch rule command
2046  */
2047 static int
2048 ice_add_update_vsi_list(struct ice_hw *hw,
2049                         struct ice_fltr_mgmt_list_entry *m_entry,
2050                         struct ice_fltr_info *cur_fltr,
2051                         struct ice_fltr_info *new_fltr)
2052 {
2053         u16 vsi_list_id = 0;
2054         int status = 0;
2055
2056         if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2057              cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2058                 return -EOPNOTSUPP;
2059
2060         if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2061              new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2062             (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2063              cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2064                 return -EOPNOTSUPP;
2065
2066         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2067                 /* Only one entry existed in the mapping and it was not already
2068                  * a part of a VSI list. So, create a VSI list with the old and
2069                  * new VSIs.
2070                  */
2071                 struct ice_fltr_info tmp_fltr;
2072                 u16 vsi_handle_arr[2];
2073
2074                 /* A rule already exists with the new VSI being added */
2075                 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2076                         return -EEXIST;
2077
2078                 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2079                 vsi_handle_arr[1] = new_fltr->vsi_handle;
2080                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2081                                                   &vsi_list_id,
2082                                                   new_fltr->lkup_type);
2083                 if (status)
2084                         return status;
2085
2086                 tmp_fltr = *new_fltr;
2087                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2088                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2089                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2090                 /* Update the previous switch rule of "MAC forward to VSI" to
2091                  * "MAC fwd to VSI list"
2092                  */
2093                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2094                 if (status)
2095                         return status;
2096
2097                 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2098                 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2099                 m_entry->vsi_list_info =
2100                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2101                                                 vsi_list_id);
2102
2103                 if (!m_entry->vsi_list_info)
2104                         return -ENOMEM;
2105
2106                 /* If this entry was large action then the large action needs
2107                  * to be updated to point to FWD to VSI list
2108                  */
2109                 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2110                         status =
2111                             ice_add_marker_act(hw, m_entry,
2112                                                m_entry->sw_marker_id,
2113                                                m_entry->lg_act_idx);
2114         } else {
2115                 u16 vsi_handle = new_fltr->vsi_handle;
2116                 enum ice_adminq_opc opcode;
2117
2118                 if (!m_entry->vsi_list_info)
2119                         return -EIO;
2120
2121                 /* A rule already exists with the new VSI being added */
2122                 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
2123                         return 0;
2124
2125                 /* Update the previously created VSI list set with
2126                  * the new VSI ID passed in
2127                  */
2128                 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2129                 opcode = ice_aqc_opc_update_sw_rules;
2130
2131                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2132                                                   vsi_list_id, false, opcode,
2133                                                   new_fltr->lkup_type);
2134                 /* update VSI list mapping info with new VSI ID */
2135                 if (!status)
2136                         set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
2137         }
2138         if (!status)
2139                 m_entry->vsi_count++;
2140         return status;
2141 }
2142
2143 /**
2144  * ice_find_rule_entry - Search a rule entry
2145  * @hw: pointer to the hardware structure
2146  * @recp_id: lookup type for which the specified rule needs to be searched
2147  * @f_info: rule information
2148  *
2149  * Helper function to search for a given rule entry
2150  * Returns pointer to entry storing the rule if found
2151  */
2152 static struct ice_fltr_mgmt_list_entry *
2153 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2154 {
2155         struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2156         struct ice_switch_info *sw = hw->switch_info;
2157         struct list_head *list_head;
2158
2159         list_head = &sw->recp_list[recp_id].filt_rules;
2160         list_for_each_entry(list_itr, list_head, list_entry) {
2161                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2162                             sizeof(f_info->l_data)) &&
2163                     f_info->flag == list_itr->fltr_info.flag) {
2164                         ret = list_itr;
2165                         break;
2166                 }
2167         }
2168         return ret;
2169 }
2170
2171 /**
2172  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2173  * @hw: pointer to the hardware structure
2174  * @recp_id: lookup type for which VSI lists needs to be searched
2175  * @vsi_handle: VSI handle to be found in VSI list
2176  * @vsi_list_id: VSI list ID found containing vsi_handle
2177  *
2178  * Helper function to search a VSI list with single entry containing given VSI
2179  * handle element. This can be extended further to search VSI list with more
2180  * than 1 vsi_count. Returns pointer to VSI list entry if found.
2181  */
2182 static struct ice_vsi_list_map_info *
2183 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2184                         u16 *vsi_list_id)
2185 {
2186         struct ice_vsi_list_map_info *map_info = NULL;
2187         struct ice_switch_info *sw = hw->switch_info;
2188         struct ice_fltr_mgmt_list_entry *list_itr;
2189         struct list_head *list_head;
2190
2191         list_head = &sw->recp_list[recp_id].filt_rules;
2192         list_for_each_entry(list_itr, list_head, list_entry) {
2193                 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
2194                         map_info = list_itr->vsi_list_info;
2195                         if (test_bit(vsi_handle, map_info->vsi_map)) {
2196                                 *vsi_list_id = map_info->vsi_list_id;
2197                                 return map_info;
2198                         }
2199                 }
2200         }
2201         return NULL;
2202 }
2203
2204 /**
2205  * ice_add_rule_internal - add rule for a given lookup type
2206  * @hw: pointer to the hardware structure
2207  * @recp_id: lookup type (recipe ID) for which rule has to be added
2208  * @f_entry: structure containing MAC forwarding information
2209  *
2210  * Adds or updates the rule lists for a given recipe
2211  */
2212 static int
2213 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2214                       struct ice_fltr_list_entry *f_entry)
2215 {
2216         struct ice_switch_info *sw = hw->switch_info;
2217         struct ice_fltr_info *new_fltr, *cur_fltr;
2218         struct ice_fltr_mgmt_list_entry *m_entry;
2219         struct mutex *rule_lock; /* Lock to protect filter rule list */
2220         int status = 0;
2221
2222         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2223                 return -EINVAL;
2224         f_entry->fltr_info.fwd_id.hw_vsi_id =
2225                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2226
2227         rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2228
2229         mutex_lock(rule_lock);
2230         new_fltr = &f_entry->fltr_info;
2231         if (new_fltr->flag & ICE_FLTR_RX)
2232                 new_fltr->src = hw->port_info->lport;
2233         else if (new_fltr->flag & ICE_FLTR_TX)
2234                 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
2235
2236         m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2237         if (!m_entry) {
2238                 mutex_unlock(rule_lock);
2239                 return ice_create_pkt_fwd_rule(hw, f_entry);
2240         }
2241
2242         cur_fltr = &m_entry->fltr_info;
2243         status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2244         mutex_unlock(rule_lock);
2245
2246         return status;
2247 }
2248
2249 /**
2250  * ice_remove_vsi_list_rule
2251  * @hw: pointer to the hardware structure
2252  * @vsi_list_id: VSI list ID generated as part of allocate resource
2253  * @lkup_type: switch rule filter lookup type
2254  *
2255  * The VSI list should be emptied before this function is called to remove the
2256  * VSI list.
2257  */
2258 static int
2259 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2260                          enum ice_sw_lkup_type lkup_type)
2261 {
2262         struct ice_aqc_sw_rules_elem *s_rule;
2263         u16 s_rule_size;
2264         int status;
2265
2266         s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2267         s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2268         if (!s_rule)
2269                 return -ENOMEM;
2270
2271         s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2272         s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
2273
2274         /* Free the vsi_list resource that we allocated. It is assumed that the
2275          * list is empty at this point.
2276          */
2277         status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2278                                             ice_aqc_opc_free_res);
2279
2280         devm_kfree(ice_hw_to_dev(hw), s_rule);
2281         return status;
2282 }
2283
2284 /**
2285  * ice_rem_update_vsi_list
2286  * @hw: pointer to the hardware structure
2287  * @vsi_handle: VSI handle of the VSI to remove
2288  * @fm_list: filter management entry for which the VSI list management needs to
2289  *           be done
2290  */
2291 static int
2292 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2293                         struct ice_fltr_mgmt_list_entry *fm_list)
2294 {
2295         enum ice_sw_lkup_type lkup_type;
2296         u16 vsi_list_id;
2297         int status = 0;
2298
2299         if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2300             fm_list->vsi_count == 0)
2301                 return -EINVAL;
2302
2303         /* A rule with the VSI being removed does not exist */
2304         if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
2305                 return -ENOENT;
2306
2307         lkup_type = fm_list->fltr_info.lkup_type;
2308         vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2309         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2310                                           ice_aqc_opc_update_sw_rules,
2311                                           lkup_type);
2312         if (status)
2313                 return status;
2314
2315         fm_list->vsi_count--;
2316         clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2317
2318         if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2319                 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2320                 struct ice_vsi_list_map_info *vsi_list_info =
2321                         fm_list->vsi_list_info;
2322                 u16 rem_vsi_handle;
2323
2324                 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
2325                                                 ICE_MAX_VSI);
2326                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2327                         return -EIO;
2328
2329                 /* Make sure VSI list is empty before removing it below */
2330                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2331                                                   vsi_list_id, true,
2332                                                   ice_aqc_opc_update_sw_rules,
2333                                                   lkup_type);
2334                 if (status)
2335                         return status;
2336
2337                 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2338                 tmp_fltr_info.fwd_id.hw_vsi_id =
2339                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
2340                 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2341                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2342                 if (status) {
2343                         ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2344                                   tmp_fltr_info.fwd_id.hw_vsi_id, status);
2345                         return status;
2346                 }
2347
2348                 fm_list->fltr_info = tmp_fltr_info;
2349         }
2350
2351         if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2352             (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2353                 struct ice_vsi_list_map_info *vsi_list_info =
2354                         fm_list->vsi_list_info;
2355
2356                 /* Remove the VSI list since it is no longer used */
2357                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2358                 if (status) {
2359                         ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
2360                                   vsi_list_id, status);
2361                         return status;
2362                 }
2363
2364                 list_del(&vsi_list_info->list_entry);
2365                 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
2366                 fm_list->vsi_list_info = NULL;
2367         }
2368
2369         return status;
2370 }
2371
2372 /**
2373  * ice_remove_rule_internal - Remove a filter rule of a given type
2374  * @hw: pointer to the hardware structure
2375  * @recp_id: recipe ID for which the rule needs to removed
2376  * @f_entry: rule entry containing filter information
2377  */
2378 static int
2379 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2380                          struct ice_fltr_list_entry *f_entry)
2381 {
2382         struct ice_switch_info *sw = hw->switch_info;
2383         struct ice_fltr_mgmt_list_entry *list_elem;
2384         struct mutex *rule_lock; /* Lock to protect filter rule list */
2385         bool remove_rule = false;
2386         u16 vsi_handle;
2387         int status = 0;
2388
2389         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2390                 return -EINVAL;
2391         f_entry->fltr_info.fwd_id.hw_vsi_id =
2392                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2393
2394         rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2395         mutex_lock(rule_lock);
2396         list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2397         if (!list_elem) {
2398                 status = -ENOENT;
2399                 goto exit;
2400         }
2401
2402         if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2403                 remove_rule = true;
2404         } else if (!list_elem->vsi_list_info) {
2405                 status = -ENOENT;
2406                 goto exit;
2407         } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2408                 /* a ref_cnt > 1 indicates that the vsi_list is being
2409                  * shared by multiple rules. Decrement the ref_cnt and
2410                  * remove this rule, but do not modify the list, as it
2411                  * is in-use by other rules.
2412                  */
2413                 list_elem->vsi_list_info->ref_cnt--;
2414                 remove_rule = true;
2415         } else {
2416                 /* a ref_cnt of 1 indicates the vsi_list is only used
2417                  * by one rule. However, the original removal request is only
2418                  * for a single VSI. Update the vsi_list first, and only
2419                  * remove the rule if there are no further VSIs in this list.
2420                  */
2421                 vsi_handle = f_entry->fltr_info.vsi_handle;
2422                 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2423                 if (status)
2424                         goto exit;
2425                 /* if VSI count goes to zero after updating the VSI list */
2426                 if (list_elem->vsi_count == 0)
2427                         remove_rule = true;
2428         }
2429
2430         if (remove_rule) {
2431                 /* Remove the lookup rule */
2432                 struct ice_aqc_sw_rules_elem *s_rule;
2433
2434                 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2435                                       ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
2436                                       GFP_KERNEL);
2437                 if (!s_rule) {
2438                         status = -ENOMEM;
2439                         goto exit;
2440                 }
2441
2442                 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2443                                  ice_aqc_opc_remove_sw_rules);
2444
2445                 status = ice_aq_sw_rules(hw, s_rule,
2446                                          ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2447                                          ice_aqc_opc_remove_sw_rules, NULL);
2448
2449                 /* Remove a book keeping from the list */
2450                 devm_kfree(ice_hw_to_dev(hw), s_rule);
2451
2452                 if (status)
2453                         goto exit;
2454
2455                 list_del(&list_elem->list_entry);
2456                 devm_kfree(ice_hw_to_dev(hw), list_elem);
2457         }
2458 exit:
2459         mutex_unlock(rule_lock);
2460         return status;
2461 }
2462
2463 /**
2464  * ice_mac_fltr_exist - does this MAC filter exist for given VSI
2465  * @hw: pointer to the hardware structure
2466  * @mac: MAC address to be checked (for MAC filter)
2467  * @vsi_handle: check MAC filter for this VSI
2468  */
2469 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
2470 {
2471         struct ice_fltr_mgmt_list_entry *entry;
2472         struct list_head *rule_head;
2473         struct ice_switch_info *sw;
2474         struct mutex *rule_lock; /* Lock to protect filter rule list */
2475         u16 hw_vsi_id;
2476
2477         if (!ice_is_vsi_valid(hw, vsi_handle))
2478                 return false;
2479
2480         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2481         sw = hw->switch_info;
2482         rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2483         if (!rule_head)
2484                 return false;
2485
2486         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2487         mutex_lock(rule_lock);
2488         list_for_each_entry(entry, rule_head, list_entry) {
2489                 struct ice_fltr_info *f_info = &entry->fltr_info;
2490                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2491
2492                 if (is_zero_ether_addr(mac_addr))
2493                         continue;
2494
2495                 if (f_info->flag != ICE_FLTR_TX ||
2496                     f_info->src_id != ICE_SRC_ID_VSI ||
2497                     f_info->lkup_type != ICE_SW_LKUP_MAC ||
2498                     f_info->fltr_act != ICE_FWD_TO_VSI ||
2499                     hw_vsi_id != f_info->fwd_id.hw_vsi_id)
2500                         continue;
2501
2502                 if (ether_addr_equal(mac, mac_addr)) {
2503                         mutex_unlock(rule_lock);
2504                         return true;
2505                 }
2506         }
2507         mutex_unlock(rule_lock);
2508         return false;
2509 }
2510
2511 /**
2512  * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
2513  * @hw: pointer to the hardware structure
2514  * @vlan_id: VLAN ID
2515  * @vsi_handle: check MAC filter for this VSI
2516  */
2517 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
2518 {
2519         struct ice_fltr_mgmt_list_entry *entry;
2520         struct list_head *rule_head;
2521         struct ice_switch_info *sw;
2522         struct mutex *rule_lock; /* Lock to protect filter rule list */
2523         u16 hw_vsi_id;
2524
2525         if (vlan_id > ICE_MAX_VLAN_ID)
2526                 return false;
2527
2528         if (!ice_is_vsi_valid(hw, vsi_handle))
2529                 return false;
2530
2531         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2532         sw = hw->switch_info;
2533         rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
2534         if (!rule_head)
2535                 return false;
2536
2537         rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2538         mutex_lock(rule_lock);
2539         list_for_each_entry(entry, rule_head, list_entry) {
2540                 struct ice_fltr_info *f_info = &entry->fltr_info;
2541                 u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
2542                 struct ice_vsi_list_map_info *map_info;
2543
2544                 if (entry_vlan_id > ICE_MAX_VLAN_ID)
2545                         continue;
2546
2547                 if (f_info->flag != ICE_FLTR_TX ||
2548                     f_info->src_id != ICE_SRC_ID_VSI ||
2549                     f_info->lkup_type != ICE_SW_LKUP_VLAN)
2550                         continue;
2551
2552                 /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
2553                 if (f_info->fltr_act != ICE_FWD_TO_VSI &&
2554                     f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
2555                         continue;
2556
2557                 if (f_info->fltr_act == ICE_FWD_TO_VSI) {
2558                         if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
2559                                 continue;
2560                 } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2561                         /* If filter_action is FWD_TO_VSI_LIST, make sure
2562                          * that VSI being checked is part of VSI list
2563                          */
2564                         if (entry->vsi_count == 1 &&
2565                             entry->vsi_list_info) {
2566                                 map_info = entry->vsi_list_info;
2567                                 if (!test_bit(vsi_handle, map_info->vsi_map))
2568                                         continue;
2569                         }
2570                 }
2571
2572                 if (vlan_id == entry_vlan_id) {
2573                         mutex_unlock(rule_lock);
2574                         return true;
2575                 }
2576         }
2577         mutex_unlock(rule_lock);
2578
2579         return false;
2580 }
2581
2582 /**
2583  * ice_add_mac - Add a MAC address based filter rule
2584  * @hw: pointer to the hardware structure
2585  * @m_list: list of MAC addresses and forwarding information
2586  *
2587  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2588  * multiple unicast addresses, the function assumes that all the
2589  * addresses are unique in a given add_mac call. It doesn't
2590  * check for duplicates in this case, removing duplicates from a given
2591  * list should be taken care of in the caller of this function.
2592  */
2593 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
2594 {
2595         struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2596         struct ice_fltr_list_entry *m_list_itr;
2597         struct list_head *rule_head;
2598         u16 total_elem_left, s_rule_size;
2599         struct ice_switch_info *sw;
2600         struct mutex *rule_lock; /* Lock to protect filter rule list */
2601         u16 num_unicast = 0;
2602         int status = 0;
2603         u8 elem_sent;
2604
2605         if (!m_list || !hw)
2606                 return -EINVAL;
2607
2608         s_rule = NULL;
2609         sw = hw->switch_info;
2610         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2611         list_for_each_entry(m_list_itr, m_list, list_entry) {
2612                 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
2613                 u16 vsi_handle;
2614                 u16 hw_vsi_id;
2615
2616                 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
2617                 vsi_handle = m_list_itr->fltr_info.vsi_handle;
2618                 if (!ice_is_vsi_valid(hw, vsi_handle))
2619                         return -EINVAL;
2620                 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2621                 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
2622                 /* update the src in case it is VSI num */
2623                 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
2624                         return -EINVAL;
2625                 m_list_itr->fltr_info.src = hw_vsi_id;
2626                 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
2627                     is_zero_ether_addr(add))
2628                         return -EINVAL;
2629                 if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
2630                         /* Don't overwrite the unicast address */
2631                         mutex_lock(rule_lock);
2632                         if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
2633                                                 &m_list_itr->fltr_info)) {
2634                                 mutex_unlock(rule_lock);
2635                                 return -EEXIST;
2636                         }
2637                         mutex_unlock(rule_lock);
2638                         num_unicast++;
2639                 } else if (is_multicast_ether_addr(add) ||
2640                            (is_unicast_ether_addr(add) && hw->ucast_shared)) {
2641                         m_list_itr->status =
2642                                 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
2643                                                       m_list_itr);
2644                         if (m_list_itr->status)
2645                                 return m_list_itr->status;
2646                 }
2647         }
2648
2649         mutex_lock(rule_lock);
2650         /* Exit if no suitable entries were found for adding bulk switch rule */
2651         if (!num_unicast) {
2652                 status = 0;
2653                 goto ice_add_mac_exit;
2654         }
2655
2656         rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2657
2658         /* Allocate switch rule buffer for the bulk update for unicast */
2659         s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2660         s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
2661                               GFP_KERNEL);
2662         if (!s_rule) {
2663                 status = -ENOMEM;
2664                 goto ice_add_mac_exit;
2665         }
2666
2667         r_iter = s_rule;
2668         list_for_each_entry(m_list_itr, m_list, list_entry) {
2669                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2670                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2671
2672                 if (is_unicast_ether_addr(mac_addr)) {
2673                         ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
2674                                          ice_aqc_opc_add_sw_rules);
2675                         r_iter = (struct ice_aqc_sw_rules_elem *)
2676                                 ((u8 *)r_iter + s_rule_size);
2677                 }
2678         }
2679
2680         /* Call AQ bulk switch rule update for all unicast addresses */
2681         r_iter = s_rule;
2682         /* Call AQ switch rule in AQ_MAX chunk */
2683         for (total_elem_left = num_unicast; total_elem_left > 0;
2684              total_elem_left -= elem_sent) {
2685                 struct ice_aqc_sw_rules_elem *entry = r_iter;
2686
2687                 elem_sent = min_t(u8, total_elem_left,
2688                                   (ICE_AQ_MAX_BUF_LEN / s_rule_size));
2689                 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
2690                                          elem_sent, ice_aqc_opc_add_sw_rules,
2691                                          NULL);
2692                 if (status)
2693                         goto ice_add_mac_exit;
2694                 r_iter = (struct ice_aqc_sw_rules_elem *)
2695                         ((u8 *)r_iter + (elem_sent * s_rule_size));
2696         }
2697
2698         /* Fill up rule ID based on the value returned from FW */
2699         r_iter = s_rule;
2700         list_for_each_entry(m_list_itr, m_list, list_entry) {
2701                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2702                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2703                 struct ice_fltr_mgmt_list_entry *fm_entry;
2704
2705                 if (is_unicast_ether_addr(mac_addr)) {
2706                         f_info->fltr_rule_id =
2707                                 le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
2708                         f_info->fltr_act = ICE_FWD_TO_VSI;
2709                         /* Create an entry to track this MAC address */
2710                         fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
2711                                                 sizeof(*fm_entry), GFP_KERNEL);
2712                         if (!fm_entry) {
2713                                 status = -ENOMEM;
2714                                 goto ice_add_mac_exit;
2715                         }
2716                         fm_entry->fltr_info = *f_info;
2717                         fm_entry->vsi_count = 1;
2718                         /* The book keeping entries will get removed when
2719                          * base driver calls remove filter AQ command
2720                          */
2721
2722                         list_add(&fm_entry->list_entry, rule_head);
2723                         r_iter = (struct ice_aqc_sw_rules_elem *)
2724                                 ((u8 *)r_iter + s_rule_size);
2725                 }
2726         }
2727
2728 ice_add_mac_exit:
2729         mutex_unlock(rule_lock);
2730         if (s_rule)
2731                 devm_kfree(ice_hw_to_dev(hw), s_rule);
2732         return status;
2733 }
2734
2735 /**
2736  * ice_add_vlan_internal - Add one VLAN based filter rule
2737  * @hw: pointer to the hardware structure
2738  * @f_entry: filter entry containing one VLAN information
2739  */
2740 static int
2741 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
2742 {
2743         struct ice_switch_info *sw = hw->switch_info;
2744         struct ice_fltr_mgmt_list_entry *v_list_itr;
2745         struct ice_fltr_info *new_fltr, *cur_fltr;
2746         enum ice_sw_lkup_type lkup_type;
2747         u16 vsi_list_id = 0, vsi_handle;
2748         struct mutex *rule_lock; /* Lock to protect filter rule list */
2749         int status = 0;
2750
2751         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2752                 return -EINVAL;
2753
2754         f_entry->fltr_info.fwd_id.hw_vsi_id =
2755                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2756         new_fltr = &f_entry->fltr_info;
2757
2758         /* VLAN ID should only be 12 bits */
2759         if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
2760                 return -EINVAL;
2761
2762         if (new_fltr->src_id != ICE_SRC_ID_VSI)
2763                 return -EINVAL;
2764
2765         new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
2766         lkup_type = new_fltr->lkup_type;
2767         vsi_handle = new_fltr->vsi_handle;
2768         rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2769         mutex_lock(rule_lock);
2770         v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
2771         if (!v_list_itr) {
2772                 struct ice_vsi_list_map_info *map_info = NULL;
2773
2774                 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
2775                         /* All VLAN pruning rules use a VSI list. Check if
2776                          * there is already a VSI list containing VSI that we
2777                          * want to add. If found, use the same vsi_list_id for
2778                          * this new VLAN rule or else create a new list.
2779                          */
2780                         map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
2781                                                            vsi_handle,
2782                                                            &vsi_list_id);
2783                         if (!map_info) {
2784                                 status = ice_create_vsi_list_rule(hw,
2785                                                                   &vsi_handle,
2786                                                                   1,
2787                                                                   &vsi_list_id,
2788                                                                   lkup_type);
2789                                 if (status)
2790                                         goto exit;
2791                         }
2792                         /* Convert the action to forwarding to a VSI list. */
2793                         new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2794                         new_fltr->fwd_id.vsi_list_id = vsi_list_id;
2795                 }
2796
2797                 status = ice_create_pkt_fwd_rule(hw, f_entry);
2798                 if (!status) {
2799                         v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
2800                                                          new_fltr);
2801                         if (!v_list_itr) {
2802                                 status = -ENOENT;
2803                                 goto exit;
2804                         }
2805                         /* reuse VSI list for new rule and increment ref_cnt */
2806                         if (map_info) {
2807                                 v_list_itr->vsi_list_info = map_info;
2808                                 map_info->ref_cnt++;
2809                         } else {
2810                                 v_list_itr->vsi_list_info =
2811                                         ice_create_vsi_list_map(hw, &vsi_handle,
2812                                                                 1, vsi_list_id);
2813                         }
2814                 }
2815         } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
2816                 /* Update existing VSI list to add new VSI ID only if it used
2817                  * by one VLAN rule.
2818                  */
2819                 cur_fltr = &v_list_itr->fltr_info;
2820                 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
2821                                                  new_fltr);
2822         } else {
2823                 /* If VLAN rule exists and VSI list being used by this rule is
2824                  * referenced by more than 1 VLAN rule. Then create a new VSI
2825                  * list appending previous VSI with new VSI and update existing
2826                  * VLAN rule to point to new VSI list ID
2827                  */
2828                 struct ice_fltr_info tmp_fltr;
2829                 u16 vsi_handle_arr[2];
2830                 u16 cur_handle;
2831
2832                 /* Current implementation only supports reusing VSI list with
2833                  * one VSI count. We should never hit below condition
2834                  */
2835                 if (v_list_itr->vsi_count > 1 &&
2836                     v_list_itr->vsi_list_info->ref_cnt > 1) {
2837                         ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
2838                         status = -EIO;
2839                         goto exit;
2840                 }
2841
2842                 cur_handle =
2843                         find_first_bit(v_list_itr->vsi_list_info->vsi_map,
2844                                        ICE_MAX_VSI);
2845
2846                 /* A rule already exists with the new VSI being added */
2847                 if (cur_handle == vsi_handle) {
2848                         status = -EEXIST;
2849                         goto exit;
2850                 }
2851
2852                 vsi_handle_arr[0] = cur_handle;
2853                 vsi_handle_arr[1] = vsi_handle;
2854                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2855                                                   &vsi_list_id, lkup_type);
2856                 if (status)
2857                         goto exit;
2858
2859                 tmp_fltr = v_list_itr->fltr_info;
2860                 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
2861                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2862                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2863                 /* Update the previous switch rule to a new VSI list which
2864                  * includes current VSI that is requested
2865                  */
2866                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2867                 if (status)
2868                         goto exit;
2869
2870                 /* before overriding VSI list map info. decrement ref_cnt of
2871                  * previous VSI list
2872                  */
2873                 v_list_itr->vsi_list_info->ref_cnt--;
2874
2875                 /* now update to newly created list */
2876                 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
2877                 v_list_itr->vsi_list_info =
2878                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2879                                                 vsi_list_id);
2880                 v_list_itr->vsi_count++;
2881         }
2882
2883 exit:
2884         mutex_unlock(rule_lock);
2885         return status;
2886 }
2887
2888 /**
2889  * ice_add_vlan - Add VLAN based filter rule
2890  * @hw: pointer to the hardware structure
2891  * @v_list: list of VLAN entries and forwarding information
2892  */
2893 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
2894 {
2895         struct ice_fltr_list_entry *v_list_itr;
2896
2897         if (!v_list || !hw)
2898                 return -EINVAL;
2899
2900         list_for_each_entry(v_list_itr, v_list, list_entry) {
2901                 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
2902                         return -EINVAL;
2903                 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
2904                 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
2905                 if (v_list_itr->status)
2906                         return v_list_itr->status;
2907         }
2908         return 0;
2909 }
2910
2911 /**
2912  * ice_add_eth_mac - Add ethertype and MAC based filter rule
2913  * @hw: pointer to the hardware structure
2914  * @em_list: list of ether type MAC filter, MAC is optional
2915  *
2916  * This function requires the caller to populate the entries in
2917  * the filter list with the necessary fields (including flags to
2918  * indicate Tx or Rx rules).
2919  */
2920 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
2921 {
2922         struct ice_fltr_list_entry *em_list_itr;
2923
2924         if (!em_list || !hw)
2925                 return -EINVAL;
2926
2927         list_for_each_entry(em_list_itr, em_list, list_entry) {
2928                 enum ice_sw_lkup_type l_type =
2929                         em_list_itr->fltr_info.lkup_type;
2930
2931                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
2932                     l_type != ICE_SW_LKUP_ETHERTYPE)
2933                         return -EINVAL;
2934
2935                 em_list_itr->status = ice_add_rule_internal(hw, l_type,
2936                                                             em_list_itr);
2937                 if (em_list_itr->status)
2938                         return em_list_itr->status;
2939         }
2940         return 0;
2941 }
2942
2943 /**
2944  * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
2945  * @hw: pointer to the hardware structure
2946  * @em_list: list of ethertype or ethertype MAC entries
2947  */
2948 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
2949 {
2950         struct ice_fltr_list_entry *em_list_itr, *tmp;
2951
2952         if (!em_list || !hw)
2953                 return -EINVAL;
2954
2955         list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
2956                 enum ice_sw_lkup_type l_type =
2957                         em_list_itr->fltr_info.lkup_type;
2958
2959                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
2960                     l_type != ICE_SW_LKUP_ETHERTYPE)
2961                         return -EINVAL;
2962
2963                 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
2964                                                                em_list_itr);
2965                 if (em_list_itr->status)
2966                         return em_list_itr->status;
2967         }
2968         return 0;
2969 }
2970
2971 /**
2972  * ice_rem_sw_rule_info
2973  * @hw: pointer to the hardware structure
2974  * @rule_head: pointer to the switch list structure that we want to delete
2975  */
2976 static void
2977 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
2978 {
2979         if (!list_empty(rule_head)) {
2980                 struct ice_fltr_mgmt_list_entry *entry;
2981                 struct ice_fltr_mgmt_list_entry *tmp;
2982
2983                 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
2984                         list_del(&entry->list_entry);
2985                         devm_kfree(ice_hw_to_dev(hw), entry);
2986                 }
2987         }
2988 }
2989
2990 /**
2991  * ice_rem_adv_rule_info
2992  * @hw: pointer to the hardware structure
2993  * @rule_head: pointer to the switch list structure that we want to delete
2994  */
2995 static void
2996 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
2997 {
2998         struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
2999         struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3000
3001         if (list_empty(rule_head))
3002                 return;
3003
3004         list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3005                 list_del(&lst_itr->list_entry);
3006                 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3007                 devm_kfree(ice_hw_to_dev(hw), lst_itr);
3008         }
3009 }
3010
3011 /**
3012  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3013  * @hw: pointer to the hardware structure
3014  * @vsi_handle: VSI handle to set as default
3015  * @set: true to add the above mentioned switch rule, false to remove it
3016  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3017  *
3018  * add filter rule to set/unset given VSI as default VSI for the switch
3019  * (represented by swid)
3020  */
3021 int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
3022 {
3023         struct ice_aqc_sw_rules_elem *s_rule;
3024         struct ice_fltr_info f_info;
3025         enum ice_adminq_opc opcode;
3026         u16 s_rule_size;
3027         u16 hw_vsi_id;
3028         int status;
3029
3030         if (!ice_is_vsi_valid(hw, vsi_handle))
3031                 return -EINVAL;
3032         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3033
3034         s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3035                 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3036
3037         s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3038         if (!s_rule)
3039                 return -ENOMEM;
3040
3041         memset(&f_info, 0, sizeof(f_info));
3042
3043         f_info.lkup_type = ICE_SW_LKUP_DFLT;
3044         f_info.flag = direction;
3045         f_info.fltr_act = ICE_FWD_TO_VSI;
3046         f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3047
3048         if (f_info.flag & ICE_FLTR_RX) {
3049                 f_info.src = hw->port_info->lport;
3050                 f_info.src_id = ICE_SRC_ID_LPORT;
3051                 if (!set)
3052                         f_info.fltr_rule_id =
3053                                 hw->port_info->dflt_rx_vsi_rule_id;
3054         } else if (f_info.flag & ICE_FLTR_TX) {
3055                 f_info.src_id = ICE_SRC_ID_VSI;
3056                 f_info.src = hw_vsi_id;
3057                 if (!set)
3058                         f_info.fltr_rule_id =
3059                                 hw->port_info->dflt_tx_vsi_rule_id;
3060         }
3061
3062         if (set)
3063                 opcode = ice_aqc_opc_add_sw_rules;
3064         else
3065                 opcode = ice_aqc_opc_remove_sw_rules;
3066
3067         ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3068
3069         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3070         if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3071                 goto out;
3072         if (set) {
3073                 u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
3074
3075                 if (f_info.flag & ICE_FLTR_TX) {
3076                         hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
3077                         hw->port_info->dflt_tx_vsi_rule_id = index;
3078                 } else if (f_info.flag & ICE_FLTR_RX) {
3079                         hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
3080                         hw->port_info->dflt_rx_vsi_rule_id = index;
3081                 }
3082         } else {
3083                 if (f_info.flag & ICE_FLTR_TX) {
3084                         hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3085                         hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3086                 } else if (f_info.flag & ICE_FLTR_RX) {
3087                         hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3088                         hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3089                 }
3090         }
3091
3092 out:
3093         devm_kfree(ice_hw_to_dev(hw), s_rule);
3094         return status;
3095 }
3096
3097 /**
3098  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3099  * @hw: pointer to the hardware structure
3100  * @recp_id: lookup type for which the specified rule needs to be searched
3101  * @f_info: rule information
3102  *
3103  * Helper function to search for a unicast rule entry - this is to be used
3104  * to remove unicast MAC filter that is not shared with other VSIs on the
3105  * PF switch.
3106  *
3107  * Returns pointer to entry storing the rule if found
3108  */
3109 static struct ice_fltr_mgmt_list_entry *
3110 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3111                           struct ice_fltr_info *f_info)
3112 {
3113         struct ice_switch_info *sw = hw->switch_info;
3114         struct ice_fltr_mgmt_list_entry *list_itr;
3115         struct list_head *list_head;
3116
3117         list_head = &sw->recp_list[recp_id].filt_rules;
3118         list_for_each_entry(list_itr, list_head, list_entry) {
3119                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3120                             sizeof(f_info->l_data)) &&
3121                     f_info->fwd_id.hw_vsi_id ==
3122                     list_itr->fltr_info.fwd_id.hw_vsi_id &&
3123                     f_info->flag == list_itr->fltr_info.flag)
3124                         return list_itr;
3125         }
3126         return NULL;
3127 }
3128
3129 /**
3130  * ice_remove_mac - remove a MAC address based filter rule
3131  * @hw: pointer to the hardware structure
3132  * @m_list: list of MAC addresses and forwarding information
3133  *
3134  * This function removes either a MAC filter rule or a specific VSI from a
3135  * VSI list for a multicast MAC address.
3136  *
3137  * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
3138  * be aware that this call will only work if all the entries passed into m_list
3139  * were added previously. It will not attempt to do a partial remove of entries
3140  * that were found.
3141  */
3142 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
3143 {
3144         struct ice_fltr_list_entry *list_itr, *tmp;
3145         struct mutex *rule_lock; /* Lock to protect filter rule list */
3146
3147         if (!m_list)
3148                 return -EINVAL;
3149
3150         rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3151         list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
3152                 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3153                 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3154                 u16 vsi_handle;
3155
3156                 if (l_type != ICE_SW_LKUP_MAC)
3157                         return -EINVAL;
3158
3159                 vsi_handle = list_itr->fltr_info.vsi_handle;
3160                 if (!ice_is_vsi_valid(hw, vsi_handle))
3161                         return -EINVAL;
3162
3163                 list_itr->fltr_info.fwd_id.hw_vsi_id =
3164                                         ice_get_hw_vsi_num(hw, vsi_handle);
3165                 if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
3166                         /* Don't remove the unicast address that belongs to
3167                          * another VSI on the switch, since it is not being
3168                          * shared...
3169                          */
3170                         mutex_lock(rule_lock);
3171                         if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3172                                                        &list_itr->fltr_info)) {
3173                                 mutex_unlock(rule_lock);
3174                                 return -ENOENT;
3175                         }
3176                         mutex_unlock(rule_lock);
3177                 }
3178                 list_itr->status = ice_remove_rule_internal(hw,
3179                                                             ICE_SW_LKUP_MAC,
3180                                                             list_itr);
3181                 if (list_itr->status)
3182                         return list_itr->status;
3183         }
3184         return 0;
3185 }
3186
3187 /**
3188  * ice_remove_vlan - Remove VLAN based filter rule
3189  * @hw: pointer to the hardware structure
3190  * @v_list: list of VLAN entries and forwarding information
3191  */
3192 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
3193 {
3194         struct ice_fltr_list_entry *v_list_itr, *tmp;
3195
3196         if (!v_list || !hw)
3197                 return -EINVAL;
3198
3199         list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
3200                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3201
3202                 if (l_type != ICE_SW_LKUP_VLAN)
3203                         return -EINVAL;
3204                 v_list_itr->status = ice_remove_rule_internal(hw,
3205                                                               ICE_SW_LKUP_VLAN,
3206                                                               v_list_itr);
3207                 if (v_list_itr->status)
3208                         return v_list_itr->status;
3209         }
3210         return 0;
3211 }
3212
3213 /**
3214  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3215  * @fm_entry: filter entry to inspect
3216  * @vsi_handle: VSI handle to compare with filter info
3217  */
3218 static bool
3219 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3220 {
3221         return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3222                  fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3223                 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3224                  fm_entry->vsi_list_info &&
3225                  (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
3226 }
3227
3228 /**
3229  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3230  * @hw: pointer to the hardware structure
3231  * @vsi_handle: VSI handle to remove filters from
3232  * @vsi_list_head: pointer to the list to add entry to
3233  * @fi: pointer to fltr_info of filter entry to copy & add
3234  *
3235  * Helper function, used when creating a list of filters to remove from
3236  * a specific VSI. The entry added to vsi_list_head is a COPY of the
3237  * original filter entry, with the exception of fltr_info.fltr_act and
3238  * fltr_info.fwd_id fields. These are set such that later logic can
3239  * extract which VSI to remove the fltr from, and pass on that information.
3240  */
3241 static int
3242 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3243                                struct list_head *vsi_list_head,
3244                                struct ice_fltr_info *fi)
3245 {
3246         struct ice_fltr_list_entry *tmp;
3247
3248         /* this memory is freed up in the caller function
3249          * once filters for this VSI are removed
3250          */
3251         tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
3252         if (!tmp)
3253                 return -ENOMEM;
3254
3255         tmp->fltr_info = *fi;
3256
3257         /* Overwrite these fields to indicate which VSI to remove filter from,
3258          * so find and remove logic can extract the information from the
3259          * list entries. Note that original entries will still have proper
3260          * values.
3261          */
3262         tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3263         tmp->fltr_info.vsi_handle = vsi_handle;
3264         tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3265
3266         list_add(&tmp->list_entry, vsi_list_head);
3267
3268         return 0;
3269 }
3270
3271 /**
3272  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3273  * @hw: pointer to the hardware structure
3274  * @vsi_handle: VSI handle to remove filters from
3275  * @lkup_list_head: pointer to the list that has certain lookup type filters
3276  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3277  *
3278  * Locates all filters in lkup_list_head that are used by the given VSI,
3279  * and adds COPIES of those entries to vsi_list_head (intended to be used
3280  * to remove the listed filters).
3281  * Note that this means all entries in vsi_list_head must be explicitly
3282  * deallocated by the caller when done with list.
3283  */
3284 static int
3285 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3286                          struct list_head *lkup_list_head,
3287                          struct list_head *vsi_list_head)
3288 {
3289         struct ice_fltr_mgmt_list_entry *fm_entry;
3290         int status = 0;
3291
3292         /* check to make sure VSI ID is valid and within boundary */
3293         if (!ice_is_vsi_valid(hw, vsi_handle))
3294                 return -EINVAL;
3295
3296         list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
3297                 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
3298                         continue;
3299
3300                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3301                                                         vsi_list_head,
3302                                                         &fm_entry->fltr_info);
3303                 if (status)
3304                         return status;
3305         }
3306         return status;
3307 }
3308
3309 /**
3310  * ice_determine_promisc_mask
3311  * @fi: filter info to parse
3312  *
3313  * Helper function to determine which ICE_PROMISC_ mask corresponds
3314  * to given filter into.
3315  */
3316 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3317 {
3318         u16 vid = fi->l_data.mac_vlan.vlan_id;
3319         u8 *macaddr = fi->l_data.mac.mac_addr;
3320         bool is_tx_fltr = false;
3321         u8 promisc_mask = 0;
3322
3323         if (fi->flag == ICE_FLTR_TX)
3324                 is_tx_fltr = true;
3325
3326         if (is_broadcast_ether_addr(macaddr))
3327                 promisc_mask |= is_tx_fltr ?
3328                         ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3329         else if (is_multicast_ether_addr(macaddr))
3330                 promisc_mask |= is_tx_fltr ?
3331                         ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3332         else if (is_unicast_ether_addr(macaddr))
3333                 promisc_mask |= is_tx_fltr ?
3334                         ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3335         if (vid)
3336                 promisc_mask |= is_tx_fltr ?
3337                         ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3338
3339         return promisc_mask;
3340 }
3341
3342 /**
3343  * ice_remove_promisc - Remove promisc based filter rules
3344  * @hw: pointer to the hardware structure
3345  * @recp_id: recipe ID for which the rule needs to removed
3346  * @v_list: list of promisc entries
3347  */
3348 static int
3349 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
3350 {
3351         struct ice_fltr_list_entry *v_list_itr, *tmp;
3352
3353         list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
3354                 v_list_itr->status =
3355                         ice_remove_rule_internal(hw, recp_id, v_list_itr);
3356                 if (v_list_itr->status)
3357                         return v_list_itr->status;
3358         }
3359         return 0;
3360 }
3361
3362 /**
3363  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3364  * @hw: pointer to the hardware structure
3365  * @vsi_handle: VSI handle to clear mode
3366  * @promisc_mask: mask of promiscuous config bits to clear
3367  * @vid: VLAN ID to clear VLAN promiscuous
3368  */
3369 int
3370 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3371                       u16 vid)
3372 {
3373         struct ice_switch_info *sw = hw->switch_info;
3374         struct ice_fltr_list_entry *fm_entry, *tmp;
3375         struct list_head remove_list_head;
3376         struct ice_fltr_mgmt_list_entry *itr;
3377         struct list_head *rule_head;
3378         struct mutex *rule_lock;        /* Lock to protect filter rule list */
3379         int status = 0;
3380         u8 recipe_id;
3381
3382         if (!ice_is_vsi_valid(hw, vsi_handle))
3383                 return -EINVAL;
3384
3385         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
3386                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3387         else
3388                 recipe_id = ICE_SW_LKUP_PROMISC;
3389
3390         rule_head = &sw->recp_list[recipe_id].filt_rules;
3391         rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3392
3393         INIT_LIST_HEAD(&remove_list_head);
3394
3395         mutex_lock(rule_lock);
3396         list_for_each_entry(itr, rule_head, list_entry) {
3397                 struct ice_fltr_info *fltr_info;
3398                 u8 fltr_promisc_mask = 0;
3399
3400                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3401                         continue;
3402                 fltr_info = &itr->fltr_info;
3403
3404                 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
3405                     vid != fltr_info->l_data.mac_vlan.vlan_id)
3406                         continue;
3407
3408                 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
3409
3410                 /* Skip if filter is not completely specified by given mask */
3411                 if (fltr_promisc_mask & ~promisc_mask)
3412                         continue;
3413
3414                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3415                                                         &remove_list_head,
3416                                                         fltr_info);
3417                 if (status) {
3418                         mutex_unlock(rule_lock);
3419                         goto free_fltr_list;
3420                 }
3421         }
3422         mutex_unlock(rule_lock);
3423
3424         status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
3425
3426 free_fltr_list:
3427         list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
3428                 list_del(&fm_entry->list_entry);
3429                 devm_kfree(ice_hw_to_dev(hw), fm_entry);
3430         }
3431
3432         return status;
3433 }
3434
3435 /**
3436  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
3437  * @hw: pointer to the hardware structure
3438  * @vsi_handle: VSI handle to configure
3439  * @promisc_mask: mask of promiscuous config bits
3440  * @vid: VLAN ID to set VLAN promiscuous
3441  */
3442 int
3443 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
3444 {
3445         enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
3446         struct ice_fltr_list_entry f_list_entry;
3447         struct ice_fltr_info new_fltr;
3448         bool is_tx_fltr;
3449         int status = 0;
3450         u16 hw_vsi_id;
3451         int pkt_type;
3452         u8 recipe_id;
3453
3454         if (!ice_is_vsi_valid(hw, vsi_handle))
3455                 return -EINVAL;
3456         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3457
3458         memset(&new_fltr, 0, sizeof(new_fltr));
3459
3460         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
3461                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
3462                 new_fltr.l_data.mac_vlan.vlan_id = vid;
3463                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3464         } else {
3465                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
3466                 recipe_id = ICE_SW_LKUP_PROMISC;
3467         }
3468
3469         /* Separate filters must be set for each direction/packet type
3470          * combination, so we will loop over the mask value, store the
3471          * individual type, and clear it out in the input mask as it
3472          * is found.
3473          */
3474         while (promisc_mask) {
3475                 u8 *mac_addr;
3476
3477                 pkt_type = 0;
3478                 is_tx_fltr = false;
3479
3480                 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
3481                         promisc_mask &= ~ICE_PROMISC_UCAST_RX;
3482                         pkt_type = UCAST_FLTR;
3483                 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
3484                         promisc_mask &= ~ICE_PROMISC_UCAST_TX;
3485                         pkt_type = UCAST_FLTR;
3486                         is_tx_fltr = true;
3487                 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
3488                         promisc_mask &= ~ICE_PROMISC_MCAST_RX;
3489                         pkt_type = MCAST_FLTR;
3490                 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
3491                         promisc_mask &= ~ICE_PROMISC_MCAST_TX;
3492                         pkt_type = MCAST_FLTR;
3493                         is_tx_fltr = true;
3494                 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
3495                         promisc_mask &= ~ICE_PROMISC_BCAST_RX;
3496                         pkt_type = BCAST_FLTR;
3497                 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
3498                         promisc_mask &= ~ICE_PROMISC_BCAST_TX;
3499                         pkt_type = BCAST_FLTR;
3500                         is_tx_fltr = true;
3501                 }
3502
3503                 /* Check for VLAN promiscuous flag */
3504                 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
3505                         promisc_mask &= ~ICE_PROMISC_VLAN_RX;
3506                 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
3507                         promisc_mask &= ~ICE_PROMISC_VLAN_TX;
3508                         is_tx_fltr = true;
3509                 }
3510
3511                 /* Set filter DA based on packet type */
3512                 mac_addr = new_fltr.l_data.mac.mac_addr;
3513                 if (pkt_type == BCAST_FLTR) {
3514                         eth_broadcast_addr(mac_addr);
3515                 } else if (pkt_type == MCAST_FLTR ||
3516                            pkt_type == UCAST_FLTR) {
3517                         /* Use the dummy ether header DA */
3518                         ether_addr_copy(mac_addr, dummy_eth_header);
3519                         if (pkt_type == MCAST_FLTR)
3520                                 mac_addr[0] |= 0x1;     /* Set multicast bit */
3521                 }
3522
3523                 /* Need to reset this to zero for all iterations */
3524                 new_fltr.flag = 0;
3525                 if (is_tx_fltr) {
3526                         new_fltr.flag |= ICE_FLTR_TX;
3527                         new_fltr.src = hw_vsi_id;
3528                 } else {
3529                         new_fltr.flag |= ICE_FLTR_RX;
3530                         new_fltr.src = hw->port_info->lport;
3531                 }
3532
3533                 new_fltr.fltr_act = ICE_FWD_TO_VSI;
3534                 new_fltr.vsi_handle = vsi_handle;
3535                 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
3536                 f_list_entry.fltr_info = new_fltr;
3537
3538                 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
3539                 if (status)
3540                         goto set_promisc_exit;
3541         }
3542
3543 set_promisc_exit:
3544         return status;
3545 }
3546
3547 /**
3548  * ice_set_vlan_vsi_promisc
3549  * @hw: pointer to the hardware structure
3550  * @vsi_handle: VSI handle to configure
3551  * @promisc_mask: mask of promiscuous config bits
3552  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
3553  *
3554  * Configure VSI with all associated VLANs to given promiscuous mode(s)
3555  */
3556 int
3557 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3558                          bool rm_vlan_promisc)
3559 {
3560         struct ice_switch_info *sw = hw->switch_info;
3561         struct ice_fltr_list_entry *list_itr, *tmp;
3562         struct list_head vsi_list_head;
3563         struct list_head *vlan_head;
3564         struct mutex *vlan_lock; /* Lock to protect filter rule list */
3565         u16 vlan_id;
3566         int status;
3567
3568         INIT_LIST_HEAD(&vsi_list_head);
3569         vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3570         vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3571         mutex_lock(vlan_lock);
3572         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
3573                                           &vsi_list_head);
3574         mutex_unlock(vlan_lock);
3575         if (status)
3576                 goto free_fltr_list;
3577
3578         list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
3579                 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
3580                 if (rm_vlan_promisc)
3581                         status = ice_clear_vsi_promisc(hw, vsi_handle,
3582                                                        promisc_mask, vlan_id);
3583                 else
3584                         status = ice_set_vsi_promisc(hw, vsi_handle,
3585                                                      promisc_mask, vlan_id);
3586                 if (status)
3587                         break;
3588         }
3589
3590 free_fltr_list:
3591         list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
3592                 list_del(&list_itr->list_entry);
3593                 devm_kfree(ice_hw_to_dev(hw), list_itr);
3594         }
3595         return status;
3596 }
3597
3598 /**
3599  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
3600  * @hw: pointer to the hardware structure
3601  * @vsi_handle: VSI handle to remove filters from
3602  * @lkup: switch rule filter lookup type
3603  */
3604 static void
3605 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
3606                          enum ice_sw_lkup_type lkup)
3607 {
3608         struct ice_switch_info *sw = hw->switch_info;
3609         struct ice_fltr_list_entry *fm_entry;
3610         struct list_head remove_list_head;
3611         struct list_head *rule_head;
3612         struct ice_fltr_list_entry *tmp;
3613         struct mutex *rule_lock;        /* Lock to protect filter rule list */
3614         int status;
3615
3616         INIT_LIST_HEAD(&remove_list_head);
3617         rule_lock = &sw->recp_list[lkup].filt_rule_lock;
3618         rule_head = &sw->recp_list[lkup].filt_rules;
3619         mutex_lock(rule_lock);
3620         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
3621                                           &remove_list_head);
3622         mutex_unlock(rule_lock);
3623         if (status)
3624                 goto free_fltr_list;
3625
3626         switch (lkup) {
3627         case ICE_SW_LKUP_MAC:
3628                 ice_remove_mac(hw, &remove_list_head);
3629                 break;
3630         case ICE_SW_LKUP_VLAN:
3631                 ice_remove_vlan(hw, &remove_list_head);
3632                 break;
3633         case ICE_SW_LKUP_PROMISC:
3634         case ICE_SW_LKUP_PROMISC_VLAN:
3635                 ice_remove_promisc(hw, lkup, &remove_list_head);
3636                 break;
3637         case ICE_SW_LKUP_MAC_VLAN:
3638         case ICE_SW_LKUP_ETHERTYPE:
3639         case ICE_SW_LKUP_ETHERTYPE_MAC:
3640         case ICE_SW_LKUP_DFLT:
3641         case ICE_SW_LKUP_LAST:
3642         default:
3643                 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
3644                 break;
3645         }
3646
3647 free_fltr_list:
3648         list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
3649                 list_del(&fm_entry->list_entry);
3650                 devm_kfree(ice_hw_to_dev(hw), fm_entry);
3651         }
3652 }
3653
3654 /**
3655  * ice_remove_vsi_fltr - Remove all filters for a VSI
3656  * @hw: pointer to the hardware structure
3657  * @vsi_handle: VSI handle to remove filters from
3658  */
3659 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
3660 {
3661         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
3662         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
3663         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
3664         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
3665         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
3666         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
3667         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
3668         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
3669 }
3670
3671 /**
3672  * ice_alloc_res_cntr - allocating resource counter
3673  * @hw: pointer to the hardware structure
3674  * @type: type of resource
3675  * @alloc_shared: if set it is shared else dedicated
3676  * @num_items: number of entries requested for FD resource type
3677  * @counter_id: counter index returned by AQ call
3678  */
3679 int
3680 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
3681                    u16 *counter_id)
3682 {
3683         struct ice_aqc_alloc_free_res_elem *buf;
3684         u16 buf_len;
3685         int status;
3686
3687         /* Allocate resource */
3688         buf_len = struct_size(buf, elem, 1);
3689         buf = kzalloc(buf_len, GFP_KERNEL);
3690         if (!buf)
3691                 return -ENOMEM;
3692
3693         buf->num_elems = cpu_to_le16(num_items);
3694         buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
3695                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
3696
3697         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
3698                                        ice_aqc_opc_alloc_res, NULL);
3699         if (status)
3700                 goto exit;
3701
3702         *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
3703
3704 exit:
3705         kfree(buf);
3706         return status;
3707 }
3708
3709 /**
3710  * ice_free_res_cntr - free resource counter
3711  * @hw: pointer to the hardware structure
3712  * @type: type of resource
3713  * @alloc_shared: if set it is shared else dedicated
3714  * @num_items: number of entries to be freed for FD resource type
3715  * @counter_id: counter ID resource which needs to be freed
3716  */
3717 int
3718 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
3719                   u16 counter_id)
3720 {
3721         struct ice_aqc_alloc_free_res_elem *buf;
3722         u16 buf_len;
3723         int status;
3724
3725         /* Free resource */
3726         buf_len = struct_size(buf, elem, 1);
3727         buf = kzalloc(buf_len, GFP_KERNEL);
3728         if (!buf)
3729                 return -ENOMEM;
3730
3731         buf->num_elems = cpu_to_le16(num_items);
3732         buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
3733                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
3734         buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
3735
3736         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
3737                                        ice_aqc_opc_free_res, NULL);
3738         if (status)
3739                 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
3740
3741         kfree(buf);
3742         return status;
3743 }
3744
3745 /* This is mapping table entry that maps every word within a given protocol
3746  * structure to the real byte offset as per the specification of that
3747  * protocol header.
3748  * for example dst address is 3 words in ethertype header and corresponding
3749  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
3750  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
3751  * matching entry describing its field. This needs to be updated if new
3752  * structure is added to that union.
3753  */
3754 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
3755         { ICE_MAC_OFOS,         { 0, 2, 4, 6, 8, 10, 12 } },
3756         { ICE_MAC_IL,           { 0, 2, 4, 6, 8, 10, 12 } },
3757         { ICE_ETYPE_OL,         { 0 } },
3758         { ICE_VLAN_OFOS,        { 2, 0 } },
3759         { ICE_IPV4_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
3760         { ICE_IPV4_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
3761         { ICE_IPV6_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
3762                                  26, 28, 30, 32, 34, 36, 38 } },
3763         { ICE_IPV6_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
3764                                  26, 28, 30, 32, 34, 36, 38 } },
3765         { ICE_TCP_IL,           { 0, 2 } },
3766         { ICE_UDP_OF,           { 0, 2 } },
3767         { ICE_UDP_ILOS,         { 0, 2 } },
3768         { ICE_VXLAN,            { 8, 10, 12, 14 } },
3769         { ICE_GENEVE,           { 8, 10, 12, 14 } },
3770         { ICE_NVGRE,            { 0, 2, 4, 6 } },
3771 };
3772
3773 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
3774         { ICE_MAC_OFOS,         ICE_MAC_OFOS_HW },
3775         { ICE_MAC_IL,           ICE_MAC_IL_HW },
3776         { ICE_ETYPE_OL,         ICE_ETYPE_OL_HW },
3777         { ICE_VLAN_OFOS,        ICE_VLAN_OL_HW },
3778         { ICE_IPV4_OFOS,        ICE_IPV4_OFOS_HW },
3779         { ICE_IPV4_IL,          ICE_IPV4_IL_HW },
3780         { ICE_IPV6_OFOS,        ICE_IPV6_OFOS_HW },
3781         { ICE_IPV6_IL,          ICE_IPV6_IL_HW },
3782         { ICE_TCP_IL,           ICE_TCP_IL_HW },
3783         { ICE_UDP_OF,           ICE_UDP_OF_HW },
3784         { ICE_UDP_ILOS,         ICE_UDP_ILOS_HW },
3785         { ICE_VXLAN,            ICE_UDP_OF_HW },
3786         { ICE_GENEVE,           ICE_UDP_OF_HW },
3787         { ICE_NVGRE,            ICE_GRE_OF_HW },
3788 };
3789
3790 /**
3791  * ice_find_recp - find a recipe
3792  * @hw: pointer to the hardware structure
3793  * @lkup_exts: extension sequence to match
3794  * @tun_type: type of recipe tunnel
3795  *
3796  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
3797  */
3798 static u16
3799 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
3800               enum ice_sw_tunnel_type tun_type)
3801 {
3802         bool refresh_required = true;
3803         struct ice_sw_recipe *recp;
3804         u8 i;
3805
3806         /* Walk through existing recipes to find a match */
3807         recp = hw->switch_info->recp_list;
3808         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3809                 /* If recipe was not created for this ID, in SW bookkeeping,
3810                  * check if FW has an entry for this recipe. If the FW has an
3811                  * entry update it in our SW bookkeeping and continue with the
3812                  * matching.
3813                  */
3814                 if (!recp[i].recp_created)
3815                         if (ice_get_recp_frm_fw(hw,
3816                                                 hw->switch_info->recp_list, i,
3817                                                 &refresh_required))
3818                                 continue;
3819
3820                 /* Skip inverse action recipes */
3821                 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
3822                     ICE_AQ_RECIPE_ACT_INV_ACT)
3823                         continue;
3824
3825                 /* if number of words we are looking for match */
3826                 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
3827                         struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
3828                         struct ice_fv_word *be = lkup_exts->fv_words;
3829                         u16 *cr = recp[i].lkup_exts.field_mask;
3830                         u16 *de = lkup_exts->field_mask;
3831                         bool found = true;
3832                         u8 pe, qr;
3833
3834                         /* ar, cr, and qr are related to the recipe words, while
3835                          * be, de, and pe are related to the lookup words
3836                          */
3837                         for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
3838                                 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
3839                                      qr++) {
3840                                         if (ar[qr].off == be[pe].off &&
3841                                             ar[qr].prot_id == be[pe].prot_id &&
3842                                             cr[qr] == de[pe])
3843                                                 /* Found the "pe"th word in the
3844                                                  * given recipe
3845                                                  */
3846                                                 break;
3847                                 }
3848                                 /* After walking through all the words in the
3849                                  * "i"th recipe if "p"th word was not found then
3850                                  * this recipe is not what we are looking for.
3851                                  * So break out from this loop and try the next
3852                                  * recipe
3853                                  */
3854                                 if (qr >= recp[i].lkup_exts.n_val_words) {
3855                                         found = false;
3856                                         break;
3857                                 }
3858                         }
3859                         /* If for "i"th recipe the found was never set to false
3860                          * then it means we found our match
3861                          * Also tun type of recipe needs to be checked
3862                          */
3863                         if (found && recp[i].tun_type == tun_type)
3864                                 return i; /* Return the recipe ID */
3865                 }
3866         }
3867         return ICE_MAX_NUM_RECIPES;
3868 }
3869
3870 /**
3871  * ice_prot_type_to_id - get protocol ID from protocol type
3872  * @type: protocol type
3873  * @id: pointer to variable that will receive the ID
3874  *
3875  * Returns true if found, false otherwise
3876  */
3877 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
3878 {
3879         u8 i;
3880
3881         for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
3882                 if (ice_prot_id_tbl[i].type == type) {
3883                         *id = ice_prot_id_tbl[i].protocol_id;
3884                         return true;
3885                 }
3886         return false;
3887 }
3888
3889 /**
3890  * ice_fill_valid_words - count valid words
3891  * @rule: advanced rule with lookup information
3892  * @lkup_exts: byte offset extractions of the words that are valid
3893  *
3894  * calculate valid words in a lookup rule using mask value
3895  */
3896 static u8
3897 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
3898                      struct ice_prot_lkup_ext *lkup_exts)
3899 {
3900         u8 j, word, prot_id, ret_val;
3901
3902         if (!ice_prot_type_to_id(rule->type, &prot_id))
3903                 return 0;
3904
3905         word = lkup_exts->n_val_words;
3906
3907         for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
3908                 if (((u16 *)&rule->m_u)[j] &&
3909                     rule->type < ARRAY_SIZE(ice_prot_ext)) {
3910                         /* No more space to accommodate */
3911                         if (word >= ICE_MAX_CHAIN_WORDS)
3912                                 return 0;
3913                         lkup_exts->fv_words[word].off =
3914                                 ice_prot_ext[rule->type].offs[j];
3915                         lkup_exts->fv_words[word].prot_id =
3916                                 ice_prot_id_tbl[rule->type].protocol_id;
3917                         lkup_exts->field_mask[word] =
3918                                 be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
3919                         word++;
3920                 }
3921
3922         ret_val = word - lkup_exts->n_val_words;
3923         lkup_exts->n_val_words = word;
3924
3925         return ret_val;
3926 }
3927
3928 /**
3929  * ice_create_first_fit_recp_def - Create a recipe grouping
3930  * @hw: pointer to the hardware structure
3931  * @lkup_exts: an array of protocol header extractions
3932  * @rg_list: pointer to a list that stores new recipe groups
3933  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
3934  *
3935  * Using first fit algorithm, take all the words that are still not done
3936  * and start grouping them in 4-word groups. Each group makes up one
3937  * recipe.
3938  */
3939 static int
3940 ice_create_first_fit_recp_def(struct ice_hw *hw,
3941                               struct ice_prot_lkup_ext *lkup_exts,
3942                               struct list_head *rg_list,
3943                               u8 *recp_cnt)
3944 {
3945         struct ice_pref_recipe_group *grp = NULL;
3946         u8 j;
3947
3948         *recp_cnt = 0;
3949
3950         /* Walk through every word in the rule to check if it is not done. If so
3951          * then this word needs to be part of a new recipe.
3952          */
3953         for (j = 0; j < lkup_exts->n_val_words; j++)
3954                 if (!test_bit(j, lkup_exts->done)) {
3955                         if (!grp ||
3956                             grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
3957                                 struct ice_recp_grp_entry *entry;
3958
3959                                 entry = devm_kzalloc(ice_hw_to_dev(hw),
3960                                                      sizeof(*entry),
3961                                                      GFP_KERNEL);
3962                                 if (!entry)
3963                                         return -ENOMEM;
3964                                 list_add(&entry->l_entry, rg_list);
3965                                 grp = &entry->r_group;
3966                                 (*recp_cnt)++;
3967                         }
3968
3969                         grp->pairs[grp->n_val_pairs].prot_id =
3970                                 lkup_exts->fv_words[j].prot_id;
3971                         grp->pairs[grp->n_val_pairs].off =
3972                                 lkup_exts->fv_words[j].off;
3973                         grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
3974                         grp->n_val_pairs++;
3975                 }
3976
3977         return 0;
3978 }
3979
3980 /**
3981  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
3982  * @hw: pointer to the hardware structure
3983  * @fv_list: field vector with the extraction sequence information
3984  * @rg_list: recipe groupings with protocol-offset pairs
3985  *
3986  * Helper function to fill in the field vector indices for protocol-offset
3987  * pairs. These indexes are then ultimately programmed into a recipe.
3988  */
3989 static int
3990 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
3991                        struct list_head *rg_list)
3992 {
3993         struct ice_sw_fv_list_entry *fv;
3994         struct ice_recp_grp_entry *rg;
3995         struct ice_fv_word *fv_ext;
3996
3997         if (list_empty(fv_list))
3998                 return 0;
3999
4000         fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4001                               list_entry);
4002         fv_ext = fv->fv_ptr->ew;
4003
4004         list_for_each_entry(rg, rg_list, l_entry) {
4005                 u8 i;
4006
4007                 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4008                         struct ice_fv_word *pr;
4009                         bool found = false;
4010                         u16 mask;
4011                         u8 j;
4012
4013                         pr = &rg->r_group.pairs[i];
4014                         mask = rg->r_group.mask[i];
4015
4016                         for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4017                                 if (fv_ext[j].prot_id == pr->prot_id &&
4018                                     fv_ext[j].off == pr->off) {
4019                                         found = true;
4020
4021                                         /* Store index of field vector */
4022                                         rg->fv_idx[i] = j;
4023                                         rg->fv_mask[i] = mask;
4024                                         break;
4025                                 }
4026
4027                         /* Protocol/offset could not be found, caller gave an
4028                          * invalid pair
4029                          */
4030                         if (!found)
4031                                 return -EINVAL;
4032                 }
4033         }
4034
4035         return 0;
4036 }
4037
4038 /**
4039  * ice_find_free_recp_res_idx - find free result indexes for recipe
4040  * @hw: pointer to hardware structure
4041  * @profiles: bitmap of profiles that will be associated with the new recipe
4042  * @free_idx: pointer to variable to receive the free index bitmap
4043  *
4044  * The algorithm used here is:
4045  *      1. When creating a new recipe, create a set P which contains all
4046  *         Profiles that will be associated with our new recipe
4047  *
4048  *      2. For each Profile p in set P:
4049  *          a. Add all recipes associated with Profile p into set R
4050  *          b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4051  *              [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4052  *              i. Or just assume they all have the same possible indexes:
4053  *                      44, 45, 46, 47
4054  *                      i.e., PossibleIndexes = 0x0000F00000000000
4055  *
4056  *      3. For each Recipe r in set R:
4057  *          a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4058  *          b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4059  *
4060  *      FreeIndexes will contain the bits indicating the indexes free for use,
4061  *      then the code needs to update the recipe[r].used_result_idx_bits to
4062  *      indicate which indexes were selected for use by this recipe.
4063  */
4064 static u16
4065 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4066                            unsigned long *free_idx)
4067 {
4068         DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4069         DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4070         DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4071         u16 bit;
4072
4073         bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4074         bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4075
4076         bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
4077
4078         /* For each profile we are going to associate the recipe with, add the
4079          * recipes that are associated with that profile. This will give us
4080          * the set of recipes that our recipe may collide with. Also, determine
4081          * what possible result indexes are usable given this set of profiles.
4082          */
4083         for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4084                 bitmap_or(recipes, recipes, profile_to_recipe[bit],
4085                           ICE_MAX_NUM_RECIPES);
4086                 bitmap_and(possible_idx, possible_idx,
4087                            hw->switch_info->prof_res_bm[bit],
4088                            ICE_MAX_FV_WORDS);
4089         }
4090
4091         /* For each recipe that our new recipe may collide with, determine
4092          * which indexes have been used.
4093          */
4094         for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4095                 bitmap_or(used_idx, used_idx,
4096                           hw->switch_info->recp_list[bit].res_idxs,
4097                           ICE_MAX_FV_WORDS);
4098
4099         bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4100
4101         /* return number of free indexes */
4102         return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
4103 }
4104
4105 /**
4106  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4107  * @hw: pointer to hardware structure
4108  * @rm: recipe management list entry
4109  * @profiles: bitmap of profiles that will be associated.
4110  */
4111 static int
4112 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4113                   unsigned long *profiles)
4114 {
4115         DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
4116         struct ice_aqc_recipe_data_elem *tmp;
4117         struct ice_aqc_recipe_data_elem *buf;
4118         struct ice_recp_grp_entry *entry;
4119         u16 free_res_idx;
4120         u16 recipe_count;
4121         u8 chain_idx;
4122         u8 recps = 0;
4123         int status;
4124
4125         /* When more than one recipe are required, another recipe is needed to
4126          * chain them together. Matching a tunnel metadata ID takes up one of
4127          * the match fields in the chaining recipe reducing the number of
4128          * chained recipes by one.
4129          */
4130          /* check number of free result indices */
4131         bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
4132         free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
4133
4134         ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
4135                   free_res_idx, rm->n_grp_count);
4136
4137         if (rm->n_grp_count > 1) {
4138                 if (rm->n_grp_count > free_res_idx)
4139                         return -ENOSPC;
4140
4141                 rm->n_grp_count++;
4142         }
4143
4144         if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
4145                 return -ENOSPC;
4146
4147         tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
4148         if (!tmp)
4149                 return -ENOMEM;
4150
4151         buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
4152                            GFP_KERNEL);
4153         if (!buf) {
4154                 status = -ENOMEM;
4155                 goto err_mem;
4156         }
4157
4158         bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4159         recipe_count = ICE_MAX_NUM_RECIPES;
4160         status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
4161                                    NULL);
4162         if (status || recipe_count == 0)
4163                 goto err_unroll;
4164
4165         /* Allocate the recipe resources, and configure them according to the
4166          * match fields from protocol headers and extracted field vectors.
4167          */
4168         chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
4169         list_for_each_entry(entry, &rm->rg_list, l_entry) {
4170                 u8 i;
4171
4172                 status = ice_alloc_recipe(hw, &entry->rid);
4173                 if (status)
4174                         goto err_unroll;
4175
4176                 /* Clear the result index of the located recipe, as this will be
4177                  * updated, if needed, later in the recipe creation process.
4178                  */
4179                 tmp[0].content.result_indx = 0;
4180
4181                 buf[recps] = tmp[0];
4182                 buf[recps].recipe_indx = (u8)entry->rid;
4183                 /* if the recipe is a non-root recipe RID should be programmed
4184                  * as 0 for the rules to be applied correctly.
4185                  */
4186                 buf[recps].content.rid = 0;
4187                 memset(&buf[recps].content.lkup_indx, 0,
4188                        sizeof(buf[recps].content.lkup_indx));
4189
4190                 /* All recipes use look-up index 0 to match switch ID. */
4191                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4192                 buf[recps].content.mask[0] =
4193                         cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
4194                 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
4195                  * to be 0
4196                  */
4197                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4198                         buf[recps].content.lkup_indx[i] = 0x80;
4199                         buf[recps].content.mask[i] = 0;
4200                 }
4201
4202                 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
4203                         buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
4204                         buf[recps].content.mask[i + 1] =
4205                                 cpu_to_le16(entry->fv_mask[i]);
4206                 }
4207
4208                 if (rm->n_grp_count > 1) {
4209                         /* Checks to see if there really is a valid result index
4210                          * that can be used.
4211                          */
4212                         if (chain_idx >= ICE_MAX_FV_WORDS) {
4213                                 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
4214                                 status = -ENOSPC;
4215                                 goto err_unroll;
4216                         }
4217
4218                         entry->chain_idx = chain_idx;
4219                         buf[recps].content.result_indx =
4220                                 ICE_AQ_RECIPE_RESULT_EN |
4221                                 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
4222                                  ICE_AQ_RECIPE_RESULT_DATA_M);
4223                         clear_bit(chain_idx, result_idx_bm);
4224                         chain_idx = find_first_bit(result_idx_bm,
4225                                                    ICE_MAX_FV_WORDS);
4226                 }
4227
4228                 /* fill recipe dependencies */
4229                 bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
4230                             ICE_MAX_NUM_RECIPES);
4231                 set_bit(buf[recps].recipe_indx,
4232                         (unsigned long *)buf[recps].recipe_bitmap);
4233                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4234                 recps++;
4235         }
4236
4237         if (rm->n_grp_count == 1) {
4238                 rm->root_rid = buf[0].recipe_indx;
4239                 set_bit(buf[0].recipe_indx, rm->r_bitmap);
4240                 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
4241                 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
4242                         memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
4243                                sizeof(buf[0].recipe_bitmap));
4244                 } else {
4245                         status = -EINVAL;
4246                         goto err_unroll;
4247                 }
4248                 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
4249                  * the recipe which is getting created if specified
4250                  * by user. Usually any advanced switch filter, which results
4251                  * into new extraction sequence, ended up creating a new recipe
4252                  * of type ROOT and usually recipes are associated with profiles
4253                  * Switch rule referreing newly created recipe, needs to have
4254                  * either/or 'fwd' or 'join' priority, otherwise switch rule
4255                  * evaluation will not happen correctly. In other words, if
4256                  * switch rule to be evaluated on priority basis, then recipe
4257                  * needs to have priority, otherwise it will be evaluated last.
4258                  */
4259                 buf[0].content.act_ctrl_fwd_priority = rm->priority;
4260         } else {
4261                 struct ice_recp_grp_entry *last_chain_entry;
4262                 u16 rid, i;
4263
4264                 /* Allocate the last recipe that will chain the outcomes of the
4265                  * other recipes together
4266                  */
4267                 status = ice_alloc_recipe(hw, &rid);
4268                 if (status)
4269                         goto err_unroll;
4270
4271                 buf[recps].recipe_indx = (u8)rid;
4272                 buf[recps].content.rid = (u8)rid;
4273                 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
4274                 /* the new entry created should also be part of rg_list to
4275                  * make sure we have complete recipe
4276                  */
4277                 last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
4278                                                 sizeof(*last_chain_entry),
4279                                                 GFP_KERNEL);
4280                 if (!last_chain_entry) {
4281                         status = -ENOMEM;
4282                         goto err_unroll;
4283                 }
4284                 last_chain_entry->rid = rid;
4285                 memset(&buf[recps].content.lkup_indx, 0,
4286                        sizeof(buf[recps].content.lkup_indx));
4287                 /* All recipes use look-up index 0 to match switch ID. */
4288                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4289                 buf[recps].content.mask[0] =
4290                         cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
4291                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4292                         buf[recps].content.lkup_indx[i] =
4293                                 ICE_AQ_RECIPE_LKUP_IGNORE;
4294                         buf[recps].content.mask[i] = 0;
4295                 }
4296
4297                 i = 1;
4298                 /* update r_bitmap with the recp that is used for chaining */
4299                 set_bit(rid, rm->r_bitmap);
4300                 /* this is the recipe that chains all the other recipes so it
4301                  * should not have a chaining ID to indicate the same
4302                  */
4303                 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
4304                 list_for_each_entry(entry, &rm->rg_list, l_entry) {
4305                         last_chain_entry->fv_idx[i] = entry->chain_idx;
4306                         buf[recps].content.lkup_indx[i] = entry->chain_idx;
4307                         buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
4308                         set_bit(entry->rid, rm->r_bitmap);
4309                 }
4310                 list_add(&last_chain_entry->l_entry, &rm->rg_list);
4311                 if (sizeof(buf[recps].recipe_bitmap) >=
4312                     sizeof(rm->r_bitmap)) {
4313                         memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
4314                                sizeof(buf[recps].recipe_bitmap));
4315                 } else {
4316                         status = -EINVAL;
4317                         goto err_unroll;
4318                 }
4319                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4320
4321                 recps++;
4322                 rm->root_rid = (u8)rid;
4323         }
4324         status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
4325         if (status)
4326                 goto err_unroll;
4327
4328         status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
4329         ice_release_change_lock(hw);
4330         if (status)
4331                 goto err_unroll;
4332
4333         /* Every recipe that just got created add it to the recipe
4334          * book keeping list
4335          */
4336         list_for_each_entry(entry, &rm->rg_list, l_entry) {
4337                 struct ice_switch_info *sw = hw->switch_info;
4338                 bool is_root, idx_found = false;
4339                 struct ice_sw_recipe *recp;
4340                 u16 idx, buf_idx = 0;
4341
4342                 /* find buffer index for copying some data */
4343                 for (idx = 0; idx < rm->n_grp_count; idx++)
4344                         if (buf[idx].recipe_indx == entry->rid) {
4345                                 buf_idx = idx;
4346                                 idx_found = true;
4347                         }
4348
4349                 if (!idx_found) {
4350                         status = -EIO;
4351                         goto err_unroll;
4352                 }
4353
4354                 recp = &sw->recp_list[entry->rid];
4355                 is_root = (rm->root_rid == entry->rid);
4356                 recp->is_root = is_root;
4357
4358                 recp->root_rid = entry->rid;
4359                 recp->big_recp = (is_root && rm->n_grp_count > 1);
4360
4361                 memcpy(&recp->ext_words, entry->r_group.pairs,
4362                        entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
4363
4364                 memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
4365                        sizeof(recp->r_bitmap));
4366
4367                 /* Copy non-result fv index values and masks to recipe. This
4368                  * call will also update the result recipe bitmask.
4369                  */
4370                 ice_collect_result_idx(&buf[buf_idx], recp);
4371
4372                 /* for non-root recipes, also copy to the root, this allows
4373                  * easier matching of a complete chained recipe
4374                  */
4375                 if (!is_root)
4376                         ice_collect_result_idx(&buf[buf_idx],
4377                                                &sw->recp_list[rm->root_rid]);
4378
4379                 recp->n_ext_words = entry->r_group.n_val_pairs;
4380                 recp->chain_idx = entry->chain_idx;
4381                 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
4382                 recp->n_grp_count = rm->n_grp_count;
4383                 recp->tun_type = rm->tun_type;
4384                 recp->recp_created = true;
4385         }
4386         rm->root_buf = buf;
4387         kfree(tmp);
4388         return status;
4389
4390 err_unroll:
4391 err_mem:
4392         kfree(tmp);
4393         devm_kfree(ice_hw_to_dev(hw), buf);
4394         return status;
4395 }
4396
4397 /**
4398  * ice_create_recipe_group - creates recipe group
4399  * @hw: pointer to hardware structure
4400  * @rm: recipe management list entry
4401  * @lkup_exts: lookup elements
4402  */
4403 static int
4404 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
4405                         struct ice_prot_lkup_ext *lkup_exts)
4406 {
4407         u8 recp_count = 0;
4408         int status;
4409
4410         rm->n_grp_count = 0;
4411
4412         /* Create recipes for words that are marked not done by packing them
4413          * as best fit.
4414          */
4415         status = ice_create_first_fit_recp_def(hw, lkup_exts,
4416                                                &rm->rg_list, &recp_count);
4417         if (!status) {
4418                 rm->n_grp_count += recp_count;
4419                 rm->n_ext_words = lkup_exts->n_val_words;
4420                 memcpy(&rm->ext_words, lkup_exts->fv_words,
4421                        sizeof(rm->ext_words));
4422                 memcpy(rm->word_masks, lkup_exts->field_mask,
4423                        sizeof(rm->word_masks));
4424         }
4425
4426         return status;
4427 }
4428
4429 /**
4430  * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
4431  * @hw: pointer to hardware structure
4432  * @lkups: lookup elements or match criteria for the advanced recipe, one
4433  *         structure per protocol header
4434  * @lkups_cnt: number of protocols
4435  * @bm: bitmap of field vectors to consider
4436  * @fv_list: pointer to a list that holds the returned field vectors
4437  */
4438 static int
4439 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
4440            unsigned long *bm, struct list_head *fv_list)
4441 {
4442         u8 *prot_ids;
4443         int status;
4444         u16 i;
4445
4446         prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
4447         if (!prot_ids)
4448                 return -ENOMEM;
4449
4450         for (i = 0; i < lkups_cnt; i++)
4451                 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
4452                         status = -EIO;
4453                         goto free_mem;
4454                 }
4455
4456         /* Find field vectors that include all specified protocol types */
4457         status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
4458
4459 free_mem:
4460         kfree(prot_ids);
4461         return status;
4462 }
4463
4464 /**
4465  * ice_tun_type_match_word - determine if tun type needs a match mask
4466  * @tun_type: tunnel type
4467  * @mask: mask to be used for the tunnel
4468  */
4469 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
4470 {
4471         switch (tun_type) {
4472         case ICE_SW_TUN_GENEVE:
4473         case ICE_SW_TUN_VXLAN:
4474         case ICE_SW_TUN_NVGRE:
4475                 *mask = ICE_TUN_FLAG_MASK;
4476                 return true;
4477
4478         default:
4479                 *mask = 0;
4480                 return false;
4481         }
4482 }
4483
4484 /**
4485  * ice_add_special_words - Add words that are not protocols, such as metadata
4486  * @rinfo: other information regarding the rule e.g. priority and action info
4487  * @lkup_exts: lookup word structure
4488  */
4489 static int
4490 ice_add_special_words(struct ice_adv_rule_info *rinfo,
4491                       struct ice_prot_lkup_ext *lkup_exts)
4492 {
4493         u16 mask;
4494
4495         /* If this is a tunneled packet, then add recipe index to match the
4496          * tunnel bit in the packet metadata flags.
4497          */
4498         if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
4499                 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
4500                         u8 word = lkup_exts->n_val_words++;
4501
4502                         lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
4503                         lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
4504                         lkup_exts->field_mask[word] = mask;
4505                 } else {
4506                         return -ENOSPC;
4507                 }
4508         }
4509
4510         return 0;
4511 }
4512
4513 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
4514  * @hw: pointer to hardware structure
4515  * @rinfo: other information regarding the rule e.g. priority and action info
4516  * @bm: pointer to memory for returning the bitmap of field vectors
4517  */
4518 static void
4519 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
4520                          unsigned long *bm)
4521 {
4522         enum ice_prof_type prof_type;
4523
4524         bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
4525
4526         switch (rinfo->tun_type) {
4527         case ICE_NON_TUN:
4528                 prof_type = ICE_PROF_NON_TUN;
4529                 break;
4530         case ICE_ALL_TUNNELS:
4531                 prof_type = ICE_PROF_TUN_ALL;
4532                 break;
4533         case ICE_SW_TUN_GENEVE:
4534         case ICE_SW_TUN_VXLAN:
4535                 prof_type = ICE_PROF_TUN_UDP;
4536                 break;
4537         case ICE_SW_TUN_NVGRE:
4538                 prof_type = ICE_PROF_TUN_GRE;
4539                 break;
4540         case ICE_SW_TUN_AND_NON_TUN:
4541         default:
4542                 prof_type = ICE_PROF_ALL;
4543                 break;
4544         }
4545
4546         ice_get_sw_fv_bitmap(hw, prof_type, bm);
4547 }
4548
4549 /**
4550  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
4551  * @hw: pointer to hardware structure
4552  * @lkups: lookup elements or match criteria for the advanced recipe, one
4553  *  structure per protocol header
4554  * @lkups_cnt: number of protocols
4555  * @rinfo: other information regarding the rule e.g. priority and action info
4556  * @rid: return the recipe ID of the recipe created
4557  */
4558 static int
4559 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
4560                    u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
4561 {
4562         DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
4563         DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
4564         struct ice_prot_lkup_ext *lkup_exts;
4565         struct ice_recp_grp_entry *r_entry;
4566         struct ice_sw_fv_list_entry *fvit;
4567         struct ice_recp_grp_entry *r_tmp;
4568         struct ice_sw_fv_list_entry *tmp;
4569         struct ice_sw_recipe *rm;
4570         int status = 0;
4571         u8 i;
4572
4573         if (!lkups_cnt)
4574                 return -EINVAL;
4575
4576         lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
4577         if (!lkup_exts)
4578                 return -ENOMEM;
4579
4580         /* Determine the number of words to be matched and if it exceeds a
4581          * recipe's restrictions
4582          */
4583         for (i = 0; i < lkups_cnt; i++) {
4584                 u16 count;
4585
4586                 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
4587                         status = -EIO;
4588                         goto err_free_lkup_exts;
4589                 }
4590
4591                 count = ice_fill_valid_words(&lkups[i], lkup_exts);
4592                 if (!count) {
4593                         status = -EIO;
4594                         goto err_free_lkup_exts;
4595                 }
4596         }
4597
4598         rm = kzalloc(sizeof(*rm), GFP_KERNEL);
4599         if (!rm) {
4600                 status = -ENOMEM;
4601                 goto err_free_lkup_exts;
4602         }
4603
4604         /* Get field vectors that contain fields extracted from all the protocol
4605          * headers being programmed.
4606          */
4607         INIT_LIST_HEAD(&rm->fv_list);
4608         INIT_LIST_HEAD(&rm->rg_list);
4609
4610         /* Get bitmap of field vectors (profiles) that are compatible with the
4611          * rule request; only these will be searched in the subsequent call to
4612          * ice_get_fv.
4613          */
4614         ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
4615
4616         status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
4617         if (status)
4618                 goto err_unroll;
4619
4620         /* Create any special protocol/offset pairs, such as looking at tunnel
4621          * bits by extracting metadata
4622          */
4623         status = ice_add_special_words(rinfo, lkup_exts);
4624         if (status)
4625                 goto err_free_lkup_exts;
4626
4627         /* Group match words into recipes using preferred recipe grouping
4628          * criteria.
4629          */
4630         status = ice_create_recipe_group(hw, rm, lkup_exts);
4631         if (status)
4632                 goto err_unroll;
4633
4634         /* set the recipe priority if specified */
4635         rm->priority = (u8)rinfo->priority;
4636
4637         /* Find offsets from the field vector. Pick the first one for all the
4638          * recipes.
4639          */
4640         status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
4641         if (status)
4642                 goto err_unroll;
4643
4644         /* get bitmap of all profiles the recipe will be associated with */
4645         bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
4646         list_for_each_entry(fvit, &rm->fv_list, list_entry) {
4647                 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
4648                 set_bit((u16)fvit->profile_id, profiles);
4649         }
4650
4651         /* Look for a recipe which matches our requested fv / mask list */
4652         *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
4653         if (*rid < ICE_MAX_NUM_RECIPES)
4654                 /* Success if found a recipe that match the existing criteria */
4655                 goto err_unroll;
4656
4657         rm->tun_type = rinfo->tun_type;
4658         /* Recipe we need does not exist, add a recipe */
4659         status = ice_add_sw_recipe(hw, rm, profiles);
4660         if (status)
4661                 goto err_unroll;
4662
4663         /* Associate all the recipes created with all the profiles in the
4664          * common field vector.
4665          */
4666         list_for_each_entry(fvit, &rm->fv_list, list_entry) {
4667                 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
4668                 u16 j;
4669
4670                 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
4671                                                       (u8 *)r_bitmap, NULL);
4672                 if (status)
4673                         goto err_unroll;
4674
4675                 bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
4676                           ICE_MAX_NUM_RECIPES);
4677                 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
4678                 if (status)
4679                         goto err_unroll;
4680
4681                 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
4682                                                       (u8 *)r_bitmap,
4683                                                       NULL);
4684                 ice_release_change_lock(hw);
4685
4686                 if (status)
4687                         goto err_unroll;
4688
4689                 /* Update profile to recipe bitmap array */
4690                 bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
4691                             ICE_MAX_NUM_RECIPES);
4692
4693                 /* Update recipe to profile bitmap array */
4694                 for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
4695                         set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
4696         }
4697
4698         *rid = rm->root_rid;
4699         memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
4700                sizeof(*lkup_exts));
4701 err_unroll:
4702         list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
4703                 list_del(&r_entry->l_entry);
4704                 devm_kfree(ice_hw_to_dev(hw), r_entry);
4705         }
4706
4707         list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
4708                 list_del(&fvit->list_entry);
4709                 devm_kfree(ice_hw_to_dev(hw), fvit);
4710         }
4711
4712         if (rm->root_buf)
4713                 devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
4714
4715         kfree(rm);
4716
4717 err_free_lkup_exts:
4718         kfree(lkup_exts);
4719
4720         return status;
4721 }
4722
4723 /**
4724  * ice_find_dummy_packet - find dummy packet
4725  *
4726  * @lkups: lookup elements or match criteria for the advanced recipe, one
4727  *         structure per protocol header
4728  * @lkups_cnt: number of protocols
4729  * @tun_type: tunnel type
4730  * @pkt: dummy packet to fill according to filter match criteria
4731  * @pkt_len: packet length of dummy packet
4732  * @offsets: pointer to receive the pointer to the offsets for the packet
4733  */
4734 static void
4735 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
4736                       enum ice_sw_tunnel_type tun_type,
4737                       const u8 **pkt, u16 *pkt_len,
4738                       const struct ice_dummy_pkt_offsets **offsets)
4739 {
4740         bool tcp = false, udp = false, ipv6 = false, vlan = false;
4741         u16 i;
4742
4743         for (i = 0; i < lkups_cnt; i++) {
4744                 if (lkups[i].type == ICE_UDP_ILOS)
4745                         udp = true;
4746                 else if (lkups[i].type == ICE_TCP_IL)
4747                         tcp = true;
4748                 else if (lkups[i].type == ICE_IPV6_OFOS)
4749                         ipv6 = true;
4750                 else if (lkups[i].type == ICE_VLAN_OFOS)
4751                         vlan = true;
4752                 else if (lkups[i].type == ICE_ETYPE_OL &&
4753                          lkups[i].h_u.ethertype.ethtype_id ==
4754                                 cpu_to_be16(ICE_IPV6_ETHER_ID) &&
4755                          lkups[i].m_u.ethertype.ethtype_id ==
4756                                         cpu_to_be16(0xFFFF))
4757                         ipv6 = true;
4758         }
4759
4760         if (tun_type == ICE_SW_TUN_NVGRE) {
4761                 if (tcp) {
4762                         *pkt = dummy_gre_tcp_packet;
4763                         *pkt_len = sizeof(dummy_gre_tcp_packet);
4764                         *offsets = dummy_gre_tcp_packet_offsets;
4765                         return;
4766                 }
4767
4768                 *pkt = dummy_gre_udp_packet;
4769                 *pkt_len = sizeof(dummy_gre_udp_packet);
4770                 *offsets = dummy_gre_udp_packet_offsets;
4771                 return;
4772         }
4773
4774         if (tun_type == ICE_SW_TUN_VXLAN ||
4775             tun_type == ICE_SW_TUN_GENEVE) {
4776                 if (tcp) {
4777                         *pkt = dummy_udp_tun_tcp_packet;
4778                         *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
4779                         *offsets = dummy_udp_tun_tcp_packet_offsets;
4780                         return;
4781                 }
4782
4783                 *pkt = dummy_udp_tun_udp_packet;
4784                 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
4785                 *offsets = dummy_udp_tun_udp_packet_offsets;
4786                 return;
4787         }
4788
4789         if (udp && !ipv6) {
4790                 if (vlan) {
4791                         *pkt = dummy_vlan_udp_packet;
4792                         *pkt_len = sizeof(dummy_vlan_udp_packet);
4793                         *offsets = dummy_vlan_udp_packet_offsets;
4794                         return;
4795                 }
4796                 *pkt = dummy_udp_packet;
4797                 *pkt_len = sizeof(dummy_udp_packet);
4798                 *offsets = dummy_udp_packet_offsets;
4799                 return;
4800         } else if (udp && ipv6) {
4801                 if (vlan) {
4802                         *pkt = dummy_vlan_udp_ipv6_packet;
4803                         *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
4804                         *offsets = dummy_vlan_udp_ipv6_packet_offsets;
4805                         return;
4806                 }
4807                 *pkt = dummy_udp_ipv6_packet;
4808                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
4809                 *offsets = dummy_udp_ipv6_packet_offsets;
4810                 return;
4811         } else if ((tcp && ipv6) || ipv6) {
4812                 if (vlan) {
4813                         *pkt = dummy_vlan_tcp_ipv6_packet;
4814                         *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
4815                         *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
4816                         return;
4817                 }
4818                 *pkt = dummy_tcp_ipv6_packet;
4819                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
4820                 *offsets = dummy_tcp_ipv6_packet_offsets;
4821                 return;
4822         }
4823
4824         if (vlan) {
4825                 *pkt = dummy_vlan_tcp_packet;
4826                 *pkt_len = sizeof(dummy_vlan_tcp_packet);
4827                 *offsets = dummy_vlan_tcp_packet_offsets;
4828         } else {
4829                 *pkt = dummy_tcp_packet;
4830                 *pkt_len = sizeof(dummy_tcp_packet);
4831                 *offsets = dummy_tcp_packet_offsets;
4832         }
4833 }
4834
4835 /**
4836  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
4837  *
4838  * @lkups: lookup elements or match criteria for the advanced recipe, one
4839  *         structure per protocol header
4840  * @lkups_cnt: number of protocols
4841  * @s_rule: stores rule information from the match criteria
4842  * @dummy_pkt: dummy packet to fill according to filter match criteria
4843  * @pkt_len: packet length of dummy packet
4844  * @offsets: offset info for the dummy packet
4845  */
4846 static int
4847 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
4848                           struct ice_aqc_sw_rules_elem *s_rule,
4849                           const u8 *dummy_pkt, u16 pkt_len,
4850                           const struct ice_dummy_pkt_offsets *offsets)
4851 {
4852         u8 *pkt;
4853         u16 i;
4854
4855         /* Start with a packet with a pre-defined/dummy content. Then, fill
4856          * in the header values to be looked up or matched.
4857          */
4858         pkt = s_rule->pdata.lkup_tx_rx.hdr;
4859
4860         memcpy(pkt, dummy_pkt, pkt_len);
4861
4862         for (i = 0; i < lkups_cnt; i++) {
4863                 enum ice_protocol_type type;
4864                 u16 offset = 0, len = 0, j;
4865                 bool found = false;
4866
4867                 /* find the start of this layer; it should be found since this
4868                  * was already checked when search for the dummy packet
4869                  */
4870                 type = lkups[i].type;
4871                 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
4872                         if (type == offsets[j].type) {
4873                                 offset = offsets[j].offset;
4874                                 found = true;
4875                                 break;
4876                         }
4877                 }
4878                 /* this should never happen in a correct calling sequence */
4879                 if (!found)
4880                         return -EINVAL;
4881
4882                 switch (lkups[i].type) {
4883                 case ICE_MAC_OFOS:
4884                 case ICE_MAC_IL:
4885                         len = sizeof(struct ice_ether_hdr);
4886                         break;
4887                 case ICE_ETYPE_OL:
4888                         len = sizeof(struct ice_ethtype_hdr);
4889                         break;
4890                 case ICE_VLAN_OFOS:
4891                         len = sizeof(struct ice_vlan_hdr);
4892                         break;
4893                 case ICE_IPV4_OFOS:
4894                 case ICE_IPV4_IL:
4895                         len = sizeof(struct ice_ipv4_hdr);
4896                         break;
4897                 case ICE_IPV6_OFOS:
4898                 case ICE_IPV6_IL:
4899                         len = sizeof(struct ice_ipv6_hdr);
4900                         break;
4901                 case ICE_TCP_IL:
4902                 case ICE_UDP_OF:
4903                 case ICE_UDP_ILOS:
4904                         len = sizeof(struct ice_l4_hdr);
4905                         break;
4906                 case ICE_SCTP_IL:
4907                         len = sizeof(struct ice_sctp_hdr);
4908                         break;
4909                 case ICE_NVGRE:
4910                         len = sizeof(struct ice_nvgre_hdr);
4911                         break;
4912                 case ICE_VXLAN:
4913                 case ICE_GENEVE:
4914                         len = sizeof(struct ice_udp_tnl_hdr);
4915                         break;
4916                 default:
4917                         return -EINVAL;
4918                 }
4919
4920                 /* the length should be a word multiple */
4921                 if (len % ICE_BYTES_PER_WORD)
4922                         return -EIO;
4923
4924                 /* We have the offset to the header start, the length, the
4925                  * caller's header values and mask. Use this information to
4926                  * copy the data into the dummy packet appropriately based on
4927                  * the mask. Note that we need to only write the bits as
4928                  * indicated by the mask to make sure we don't improperly write
4929                  * over any significant packet data.
4930                  */
4931                 for (j = 0; j < len / sizeof(u16); j++)
4932                         if (((u16 *)&lkups[i].m_u)[j])
4933                                 ((u16 *)(pkt + offset))[j] =
4934                                         (((u16 *)(pkt + offset))[j] &
4935                                          ~((u16 *)&lkups[i].m_u)[j]) |
4936                                         (((u16 *)&lkups[i].h_u)[j] &
4937                                          ((u16 *)&lkups[i].m_u)[j]);
4938         }
4939
4940         s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len);
4941
4942         return 0;
4943 }
4944
4945 /**
4946  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
4947  * @hw: pointer to the hardware structure
4948  * @tun_type: tunnel type
4949  * @pkt: dummy packet to fill in
4950  * @offsets: offset info for the dummy packet
4951  */
4952 static int
4953 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
4954                         u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
4955 {
4956         u16 open_port, i;
4957
4958         switch (tun_type) {
4959         case ICE_SW_TUN_VXLAN:
4960                 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
4961                         return -EIO;
4962                 break;
4963         case ICE_SW_TUN_GENEVE:
4964                 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
4965                         return -EIO;
4966                 break;
4967         default:
4968                 /* Nothing needs to be done for this tunnel type */
4969                 return 0;
4970         }
4971
4972         /* Find the outer UDP protocol header and insert the port number */
4973         for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
4974                 if (offsets[i].type == ICE_UDP_OF) {
4975                         struct ice_l4_hdr *hdr;
4976                         u16 offset;
4977
4978                         offset = offsets[i].offset;
4979                         hdr = (struct ice_l4_hdr *)&pkt[offset];
4980                         hdr->dst_port = cpu_to_be16(open_port);
4981
4982                         return 0;
4983                 }
4984         }
4985
4986         return -EIO;
4987 }
4988
4989 /**
4990  * ice_find_adv_rule_entry - Search a rule entry
4991  * @hw: pointer to the hardware structure
4992  * @lkups: lookup elements or match criteria for the advanced recipe, one
4993  *         structure per protocol header
4994  * @lkups_cnt: number of protocols
4995  * @recp_id: recipe ID for which we are finding the rule
4996  * @rinfo: other information regarding the rule e.g. priority and action info
4997  *
4998  * Helper function to search for a given advance rule entry
4999  * Returns pointer to entry storing the rule if found
5000  */
5001 static struct ice_adv_fltr_mgmt_list_entry *
5002 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5003                         u16 lkups_cnt, u16 recp_id,
5004                         struct ice_adv_rule_info *rinfo)
5005 {
5006         struct ice_adv_fltr_mgmt_list_entry *list_itr;
5007         struct ice_switch_info *sw = hw->switch_info;
5008         int i;
5009
5010         list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5011                             list_entry) {
5012                 bool lkups_matched = true;
5013
5014                 if (lkups_cnt != list_itr->lkups_cnt)
5015                         continue;
5016                 for (i = 0; i < list_itr->lkups_cnt; i++)
5017                         if (memcmp(&list_itr->lkups[i], &lkups[i],
5018                                    sizeof(*lkups))) {
5019                                 lkups_matched = false;
5020                                 break;
5021                         }
5022                 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5023                     rinfo->tun_type == list_itr->rule_info.tun_type &&
5024                     lkups_matched)
5025                         return list_itr;
5026         }
5027         return NULL;
5028 }
5029
5030 /**
5031  * ice_adv_add_update_vsi_list
5032  * @hw: pointer to the hardware structure
5033  * @m_entry: pointer to current adv filter management list entry
5034  * @cur_fltr: filter information from the book keeping entry
5035  * @new_fltr: filter information with the new VSI to be added
5036  *
5037  * Call AQ command to add or update previously created VSI list with new VSI.
5038  *
5039  * Helper function to do book keeping associated with adding filter information
5040  * The algorithm to do the booking keeping is described below :
5041  * When a VSI needs to subscribe to a given advanced filter
5042  *      if only one VSI has been added till now
5043  *              Allocate a new VSI list and add two VSIs
5044  *              to this list using switch rule command
5045  *              Update the previously created switch rule with the
5046  *              newly created VSI list ID
5047  *      if a VSI list was previously created
5048  *              Add the new VSI to the previously created VSI list set
5049  *              using the update switch rule command
5050  */
5051 static int
5052 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5053                             struct ice_adv_fltr_mgmt_list_entry *m_entry,
5054                             struct ice_adv_rule_info *cur_fltr,
5055                             struct ice_adv_rule_info *new_fltr)
5056 {
5057         u16 vsi_list_id = 0;
5058         int status;
5059
5060         if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5061             cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5062             cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5063                 return -EOPNOTSUPP;
5064
5065         if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5066              new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5067             (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5068              cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5069                 return -EOPNOTSUPP;
5070
5071         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5072                  /* Only one entry existed in the mapping and it was not already
5073                   * a part of a VSI list. So, create a VSI list with the old and
5074                   * new VSIs.
5075                   */
5076                 struct ice_fltr_info tmp_fltr;
5077                 u16 vsi_handle_arr[2];
5078
5079                 /* A rule already exists with the new VSI being added */
5080                 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5081                     new_fltr->sw_act.fwd_id.hw_vsi_id)
5082                         return -EEXIST;
5083
5084                 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5085                 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5086                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5087                                                   &vsi_list_id,
5088                                                   ICE_SW_LKUP_LAST);
5089                 if (status)
5090                         return status;
5091
5092                 memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5093                 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
5094                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5095                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5096                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5097                 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
5098
5099                 /* Update the previous switch rule of "forward to VSI" to
5100                  * "fwd to VSI list"
5101                  */
5102                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5103                 if (status)
5104                         return status;
5105
5106                 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5107                 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5108                 m_entry->vsi_list_info =
5109                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5110                                                 vsi_list_id);
5111         } else {
5112                 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5113
5114                 if (!m_entry->vsi_list_info)
5115                         return -EIO;
5116
5117                 /* A rule already exists with the new VSI being added */
5118                 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
5119                         return 0;
5120
5121                 /* Update the previously created VSI list set with
5122                  * the new VSI ID passed in
5123                  */
5124                 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5125
5126                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5127                                                   vsi_list_id, false,
5128                                                   ice_aqc_opc_update_sw_rules,
5129                                                   ICE_SW_LKUP_LAST);
5130                 /* update VSI list mapping info with new VSI ID */
5131                 if (!status)
5132                         set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
5133         }
5134         if (!status)
5135                 m_entry->vsi_count++;
5136         return status;
5137 }
5138
5139 /**
5140  * ice_add_adv_rule - helper function to create an advanced switch rule
5141  * @hw: pointer to the hardware structure
5142  * @lkups: information on the words that needs to be looked up. All words
5143  * together makes one recipe
5144  * @lkups_cnt: num of entries in the lkups array
5145  * @rinfo: other information related to the rule that needs to be programmed
5146  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5147  *               ignored is case of error.
5148  *
5149  * This function can program only 1 rule at a time. The lkups is used to
5150  * describe the all the words that forms the "lookup" portion of the recipe.
5151  * These words can span multiple protocols. Callers to this function need to
5152  * pass in a list of protocol headers with lookup information along and mask
5153  * that determines which words are valid from the given protocol header.
5154  * rinfo describes other information related to this rule such as forwarding
5155  * IDs, priority of this rule, etc.
5156  */
5157 int
5158 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5159                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5160                  struct ice_rule_query_data *added_entry)
5161 {
5162         struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5163         u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
5164         const struct ice_dummy_pkt_offsets *pkt_offsets;
5165         struct ice_aqc_sw_rules_elem *s_rule = NULL;
5166         struct list_head *rule_head;
5167         struct ice_switch_info *sw;
5168         const u8 *pkt = NULL;
5169         u16 word_cnt;
5170         u32 act = 0;
5171         int status;
5172         u8 q_rgn;
5173
5174         /* Initialize profile to result index bitmap */
5175         if (!hw->switch_info->prof_res_bm_init) {
5176                 hw->switch_info->prof_res_bm_init = 1;
5177                 ice_init_prof_result_bm(hw);
5178         }
5179
5180         if (!lkups_cnt)
5181                 return -EINVAL;
5182
5183         /* get # of words we need to match */
5184         word_cnt = 0;
5185         for (i = 0; i < lkups_cnt; i++) {
5186                 u16 j, *ptr;
5187
5188                 ptr = (u16 *)&lkups[i].m_u;
5189                 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
5190                         if (ptr[j] != 0)
5191                                 word_cnt++;
5192         }
5193
5194         if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
5195                 return -EINVAL;
5196
5197         /* make sure that we can locate a dummy packet */
5198         ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
5199                               &pkt_offsets);
5200         if (!pkt) {
5201                 status = -EINVAL;
5202                 goto err_ice_add_adv_rule;
5203         }
5204
5205         if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5206               rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
5207               rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5208               rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
5209                 return -EIO;
5210
5211         vsi_handle = rinfo->sw_act.vsi_handle;
5212         if (!ice_is_vsi_valid(hw, vsi_handle))
5213                 return -EINVAL;
5214
5215         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5216                 rinfo->sw_act.fwd_id.hw_vsi_id =
5217                         ice_get_hw_vsi_num(hw, vsi_handle);
5218         if (rinfo->sw_act.flag & ICE_FLTR_TX)
5219                 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
5220
5221         status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
5222         if (status)
5223                 return status;
5224         m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5225         if (m_entry) {
5226                 /* we have to add VSI to VSI_LIST and increment vsi_count.
5227                  * Also Update VSI list so that we can change forwarding rule
5228                  * if the rule already exists, we will check if it exists with
5229                  * same vsi_id, if not then add it to the VSI list if it already
5230                  * exists if not then create a VSI list and add the existing VSI
5231                  * ID and the new VSI ID to the list
5232                  * We will add that VSI to the list
5233                  */
5234                 status = ice_adv_add_update_vsi_list(hw, m_entry,
5235                                                      &m_entry->rule_info,
5236                                                      rinfo);
5237                 if (added_entry) {
5238                         added_entry->rid = rid;
5239                         added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
5240                         added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5241                 }
5242                 return status;
5243         }
5244         rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5245         s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
5246         if (!s_rule)
5247                 return -ENOMEM;
5248         if (!rinfo->flags_info.act_valid) {
5249                 act |= ICE_SINGLE_ACT_LAN_ENABLE;
5250                 act |= ICE_SINGLE_ACT_LB_ENABLE;
5251         } else {
5252                 act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
5253                                                 ICE_SINGLE_ACT_LB_ENABLE);
5254         }
5255
5256         switch (rinfo->sw_act.fltr_act) {
5257         case ICE_FWD_TO_VSI:
5258                 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
5259                         ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
5260                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
5261                 break;
5262         case ICE_FWD_TO_Q:
5263                 act |= ICE_SINGLE_ACT_TO_Q;
5264                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5265                        ICE_SINGLE_ACT_Q_INDEX_M;
5266                 break;
5267         case ICE_FWD_TO_QGRP:
5268                 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
5269                         (u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
5270                 act |= ICE_SINGLE_ACT_TO_Q;
5271                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5272                        ICE_SINGLE_ACT_Q_INDEX_M;
5273                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
5274                        ICE_SINGLE_ACT_Q_REGION_M;
5275                 break;
5276         case ICE_DROP_PACKET:
5277                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
5278                        ICE_SINGLE_ACT_VALID_BIT;
5279                 break;
5280         default:
5281                 status = -EIO;
5282                 goto err_ice_add_adv_rule;
5283         }
5284
5285         /* set the rule LOOKUP type based on caller specified 'Rx'
5286          * instead of hardcoding it to be either LOOKUP_TX/RX
5287          *
5288          * for 'Rx' set the source to be the port number
5289          * for 'Tx' set the source to be the source HW VSI number (determined
5290          * by caller)
5291          */
5292         if (rinfo->rx) {
5293                 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
5294                 s_rule->pdata.lkup_tx_rx.src =
5295                         cpu_to_le16(hw->port_info->lport);
5296         } else {
5297                 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
5298                 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src);
5299         }
5300
5301         s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid);
5302         s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
5303
5304         status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
5305                                            pkt_len, pkt_offsets);
5306         if (status)
5307                 goto err_ice_add_adv_rule;
5308
5309         if (rinfo->tun_type != ICE_NON_TUN &&
5310             rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
5311                 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
5312                                                  s_rule->pdata.lkup_tx_rx.hdr,
5313                                                  pkt_offsets);
5314                 if (status)
5315                         goto err_ice_add_adv_rule;
5316         }
5317
5318         status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5319                                  rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
5320                                  NULL);
5321         if (status)
5322                 goto err_ice_add_adv_rule;
5323         adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
5324                                 sizeof(struct ice_adv_fltr_mgmt_list_entry),
5325                                 GFP_KERNEL);
5326         if (!adv_fltr) {
5327                 status = -ENOMEM;
5328                 goto err_ice_add_adv_rule;
5329         }
5330
5331         adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
5332                                        lkups_cnt * sizeof(*lkups), GFP_KERNEL);
5333         if (!adv_fltr->lkups) {
5334                 status = -ENOMEM;
5335                 goto err_ice_add_adv_rule;
5336         }
5337
5338         adv_fltr->lkups_cnt = lkups_cnt;
5339         adv_fltr->rule_info = *rinfo;
5340         adv_fltr->rule_info.fltr_rule_id =
5341                 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
5342         sw = hw->switch_info;
5343         sw->recp_list[rid].adv_rule = true;
5344         rule_head = &sw->recp_list[rid].filt_rules;
5345
5346         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5347                 adv_fltr->vsi_count = 1;
5348
5349         /* Add rule entry to book keeping list */
5350         list_add(&adv_fltr->list_entry, rule_head);
5351         if (added_entry) {
5352                 added_entry->rid = rid;
5353                 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
5354                 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5355         }
5356 err_ice_add_adv_rule:
5357         if (status && adv_fltr) {
5358                 devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
5359                 devm_kfree(ice_hw_to_dev(hw), adv_fltr);
5360         }
5361
5362         kfree(s_rule);
5363
5364         return status;
5365 }
5366
5367 /**
5368  * ice_replay_vsi_fltr - Replay filters for requested VSI
5369  * @hw: pointer to the hardware structure
5370  * @vsi_handle: driver VSI handle
5371  * @recp_id: Recipe ID for which rules need to be replayed
5372  * @list_head: list for which filters need to be replayed
5373  *
5374  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
5375  * It is required to pass valid VSI handle.
5376  */
5377 static int
5378 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
5379                     struct list_head *list_head)
5380 {
5381         struct ice_fltr_mgmt_list_entry *itr;
5382         int status = 0;
5383         u16 hw_vsi_id;
5384
5385         if (list_empty(list_head))
5386                 return status;
5387         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5388
5389         list_for_each_entry(itr, list_head, list_entry) {
5390                 struct ice_fltr_list_entry f_entry;
5391
5392                 f_entry.fltr_info = itr->fltr_info;
5393                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
5394                     itr->fltr_info.vsi_handle == vsi_handle) {
5395                         /* update the src in case it is VSI num */
5396                         if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
5397                                 f_entry.fltr_info.src = hw_vsi_id;
5398                         status = ice_add_rule_internal(hw, recp_id, &f_entry);
5399                         if (status)
5400                                 goto end;
5401                         continue;
5402                 }
5403                 if (!itr->vsi_list_info ||
5404                     !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
5405                         continue;
5406                 /* Clearing it so that the logic can add it back */
5407                 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
5408                 f_entry.fltr_info.vsi_handle = vsi_handle;
5409                 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
5410                 /* update the src in case it is VSI num */
5411                 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
5412                         f_entry.fltr_info.src = hw_vsi_id;
5413                 if (recp_id == ICE_SW_LKUP_VLAN)
5414                         status = ice_add_vlan_internal(hw, &f_entry);
5415                 else
5416                         status = ice_add_rule_internal(hw, recp_id, &f_entry);
5417                 if (status)
5418                         goto end;
5419         }
5420 end:
5421         return status;
5422 }
5423
5424 /**
5425  * ice_adv_rem_update_vsi_list
5426  * @hw: pointer to the hardware structure
5427  * @vsi_handle: VSI handle of the VSI to remove
5428  * @fm_list: filter management entry for which the VSI list management needs to
5429  *           be done
5430  */
5431 static int
5432 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
5433                             struct ice_adv_fltr_mgmt_list_entry *fm_list)
5434 {
5435         struct ice_vsi_list_map_info *vsi_list_info;
5436         enum ice_sw_lkup_type lkup_type;
5437         u16 vsi_list_id;
5438         int status;
5439
5440         if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
5441             fm_list->vsi_count == 0)
5442                 return -EINVAL;
5443
5444         /* A rule with the VSI being removed does not exist */
5445         if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
5446                 return -ENOENT;
5447
5448         lkup_type = ICE_SW_LKUP_LAST;
5449         vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
5450         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
5451                                           ice_aqc_opc_update_sw_rules,
5452                                           lkup_type);
5453         if (status)
5454                 return status;
5455
5456         fm_list->vsi_count--;
5457         clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
5458         vsi_list_info = fm_list->vsi_list_info;
5459         if (fm_list->vsi_count == 1) {
5460                 struct ice_fltr_info tmp_fltr;
5461                 u16 rem_vsi_handle;
5462
5463                 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
5464                                                 ICE_MAX_VSI);
5465                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
5466                         return -EIO;
5467
5468                 /* Make sure VSI list is empty before removing it below */
5469                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
5470                                                   vsi_list_id, true,
5471                                                   ice_aqc_opc_update_sw_rules,
5472                                                   lkup_type);
5473                 if (status)
5474                         return status;
5475
5476                 memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5477                 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
5478                 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
5479                 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
5480                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5481                 tmp_fltr.fwd_id.hw_vsi_id =
5482                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
5483                 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
5484                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
5485                 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
5486
5487                 /* Update the previous switch rule of "MAC forward to VSI" to
5488                  * "MAC fwd to VSI list"
5489                  */
5490                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5491                 if (status) {
5492                         ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
5493                                   tmp_fltr.fwd_id.hw_vsi_id, status);
5494                         return status;
5495                 }
5496                 fm_list->vsi_list_info->ref_cnt--;
5497
5498                 /* Remove the VSI list since it is no longer used */
5499                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
5500                 if (status) {
5501                         ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
5502                                   vsi_list_id, status);
5503                         return status;
5504                 }
5505
5506                 list_del(&vsi_list_info->list_entry);
5507                 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
5508                 fm_list->vsi_list_info = NULL;
5509         }
5510
5511         return status;
5512 }
5513
5514 /**
5515  * ice_rem_adv_rule - removes existing advanced switch rule
5516  * @hw: pointer to the hardware structure
5517  * @lkups: information on the words that needs to be looked up. All words
5518  *         together makes one recipe
5519  * @lkups_cnt: num of entries in the lkups array
5520  * @rinfo: Its the pointer to the rule information for the rule
5521  *
5522  * This function can be used to remove 1 rule at a time. The lkups is
5523  * used to describe all the words that forms the "lookup" portion of the
5524  * rule. These words can span multiple protocols. Callers to this function
5525  * need to pass in a list of protocol headers with lookup information along
5526  * and mask that determines which words are valid from the given protocol
5527  * header. rinfo describes other information related to this rule such as
5528  * forwarding IDs, priority of this rule, etc.
5529  */
5530 static int
5531 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5532                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
5533 {
5534         struct ice_adv_fltr_mgmt_list_entry *list_elem;
5535         struct ice_prot_lkup_ext lkup_exts;
5536         bool remove_rule = false;
5537         struct mutex *rule_lock; /* Lock to protect filter rule list */
5538         u16 i, rid, vsi_handle;
5539         int status = 0;
5540
5541         memset(&lkup_exts, 0, sizeof(lkup_exts));
5542         for (i = 0; i < lkups_cnt; i++) {
5543                 u16 count;
5544
5545                 if (lkups[i].type >= ICE_PROTOCOL_LAST)
5546                         return -EIO;
5547
5548                 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
5549                 if (!count)
5550                         return -EIO;
5551         }
5552
5553         /* Create any special protocol/offset pairs, such as looking at tunnel
5554          * bits by extracting metadata
5555          */
5556         status = ice_add_special_words(rinfo, &lkup_exts);
5557         if (status)
5558                 return status;
5559
5560         rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
5561         /* If did not find a recipe that match the existing criteria */
5562         if (rid == ICE_MAX_NUM_RECIPES)
5563                 return -EINVAL;
5564
5565         rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
5566         list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5567         /* the rule is already removed */
5568         if (!list_elem)
5569                 return 0;
5570         mutex_lock(rule_lock);
5571         if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
5572                 remove_rule = true;
5573         } else if (list_elem->vsi_count > 1) {
5574                 remove_rule = false;
5575                 vsi_handle = rinfo->sw_act.vsi_handle;
5576                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5577         } else {
5578                 vsi_handle = rinfo->sw_act.vsi_handle;
5579                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5580                 if (status) {
5581                         mutex_unlock(rule_lock);
5582                         return status;
5583                 }
5584                 if (list_elem->vsi_count == 0)
5585                         remove_rule = true;
5586         }
5587         mutex_unlock(rule_lock);
5588         if (remove_rule) {
5589                 struct ice_aqc_sw_rules_elem *s_rule;
5590                 u16 rule_buf_sz;
5591
5592                 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5593                 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
5594                 if (!s_rule)
5595                         return -ENOMEM;
5596                 s_rule->pdata.lkup_tx_rx.act = 0;
5597                 s_rule->pdata.lkup_tx_rx.index =
5598                         cpu_to_le16(list_elem->rule_info.fltr_rule_id);
5599                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
5600                 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5601                                          rule_buf_sz, 1,
5602                                          ice_aqc_opc_remove_sw_rules, NULL);
5603                 if (!status || status == -ENOENT) {
5604                         struct ice_switch_info *sw = hw->switch_info;
5605
5606                         mutex_lock(rule_lock);
5607                         list_del(&list_elem->list_entry);
5608                         devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
5609                         devm_kfree(ice_hw_to_dev(hw), list_elem);
5610                         mutex_unlock(rule_lock);
5611                         if (list_empty(&sw->recp_list[rid].filt_rules))
5612                                 sw->recp_list[rid].adv_rule = false;
5613                 }
5614                 kfree(s_rule);
5615         }
5616         return status;
5617 }
5618
5619 /**
5620  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
5621  * @hw: pointer to the hardware structure
5622  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
5623  *
5624  * This function is used to remove 1 rule at a time. The removal is based on
5625  * the remove_entry parameter. This function will remove rule for a given
5626  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
5627  */
5628 int
5629 ice_rem_adv_rule_by_id(struct ice_hw *hw,
5630                        struct ice_rule_query_data *remove_entry)
5631 {
5632         struct ice_adv_fltr_mgmt_list_entry *list_itr;
5633         struct list_head *list_head;
5634         struct ice_adv_rule_info rinfo;
5635         struct ice_switch_info *sw;
5636
5637         sw = hw->switch_info;
5638         if (!sw->recp_list[remove_entry->rid].recp_created)
5639                 return -EINVAL;
5640         list_head = &sw->recp_list[remove_entry->rid].filt_rules;
5641         list_for_each_entry(list_itr, list_head, list_entry) {
5642                 if (list_itr->rule_info.fltr_rule_id ==
5643                     remove_entry->rule_id) {
5644                         rinfo = list_itr->rule_info;
5645                         rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
5646                         return ice_rem_adv_rule(hw, list_itr->lkups,
5647                                                 list_itr->lkups_cnt, &rinfo);
5648                 }
5649         }
5650         /* either list is empty or unable to find rule */
5651         return -ENOENT;
5652 }
5653
5654 /**
5655  * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
5656  *                            given VSI handle
5657  * @hw: pointer to the hardware structure
5658  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
5659  *
5660  * This function is used to remove all the rules for a given VSI and as soon
5661  * as removing a rule fails, it will return immediately with the error code,
5662  * else it will return success.
5663  */
5664 int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
5665 {
5666         struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
5667         struct ice_vsi_list_map_info *map_info;
5668         struct ice_adv_rule_info rinfo;
5669         struct list_head *list_head;
5670         struct ice_switch_info *sw;
5671         int status;
5672         u8 rid;
5673
5674         sw = hw->switch_info;
5675         for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
5676                 if (!sw->recp_list[rid].recp_created)
5677                         continue;
5678                 if (!sw->recp_list[rid].adv_rule)
5679                         continue;
5680
5681                 list_head = &sw->recp_list[rid].filt_rules;
5682                 list_for_each_entry_safe(list_itr, tmp_entry, list_head,
5683                                          list_entry) {
5684                         rinfo = list_itr->rule_info;
5685
5686                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
5687                                 map_info = list_itr->vsi_list_info;
5688                                 if (!map_info)
5689                                         continue;
5690
5691                                 if (!test_bit(vsi_handle, map_info->vsi_map))
5692                                         continue;
5693                         } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
5694                                 continue;
5695                         }
5696
5697                         rinfo.sw_act.vsi_handle = vsi_handle;
5698                         status = ice_rem_adv_rule(hw, list_itr->lkups,
5699                                                   list_itr->lkups_cnt, &rinfo);
5700                         if (status)
5701                                 return status;
5702                 }
5703         }
5704         return 0;
5705 }
5706
5707 /**
5708  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
5709  * @hw: pointer to the hardware structure
5710  * @vsi_handle: driver VSI handle
5711  * @list_head: list for which filters need to be replayed
5712  *
5713  * Replay the advanced rule for the given VSI.
5714  */
5715 static int
5716 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
5717                         struct list_head *list_head)
5718 {
5719         struct ice_rule_query_data added_entry = { 0 };
5720         struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
5721         int status = 0;
5722
5723         if (list_empty(list_head))
5724                 return status;
5725         list_for_each_entry(adv_fltr, list_head, list_entry) {
5726                 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
5727                 u16 lk_cnt = adv_fltr->lkups_cnt;
5728
5729                 if (vsi_handle != rinfo->sw_act.vsi_handle)
5730                         continue;
5731                 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
5732                                           &added_entry);
5733                 if (status)
5734                         break;
5735         }
5736         return status;
5737 }
5738
5739 /**
5740  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
5741  * @hw: pointer to the hardware structure
5742  * @vsi_handle: driver VSI handle
5743  *
5744  * Replays filters for requested VSI via vsi_handle.
5745  */
5746 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
5747 {
5748         struct ice_switch_info *sw = hw->switch_info;
5749         int status;
5750         u8 i;
5751
5752         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5753                 struct list_head *head;
5754
5755                 head = &sw->recp_list[i].filt_replay_rules;
5756                 if (!sw->recp_list[i].adv_rule)
5757                         status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
5758                 else
5759                         status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
5760                 if (status)
5761                         return status;
5762         }
5763         return status;
5764 }
5765
5766 /**
5767  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
5768  * @hw: pointer to the HW struct
5769  *
5770  * Deletes the filter replay rules.
5771  */
5772 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
5773 {
5774         struct ice_switch_info *sw = hw->switch_info;
5775         u8 i;
5776
5777         if (!sw)
5778                 return;
5779
5780         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5781                 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
5782                         struct list_head *l_head;
5783
5784                         l_head = &sw->recp_list[i].filt_replay_rules;
5785                         if (!sw->recp_list[i].adv_rule)
5786                                 ice_rem_sw_rule_info(hw, l_head);
5787                         else
5788                                 ice_rem_adv_rule_info(hw, l_head);
5789                 }
5790         }
5791 }