1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
5 #include "ice_switch.h"
7 #define ICE_ETH_DA_OFFSET 0
8 #define ICE_ETH_ETHTYPE_OFFSET 12
9 #define ICE_ETH_VLAN_TCI_OFFSET 14
10 #define ICE_MAX_VLAN_ID 0xFFF
11 #define ICE_IPV6_ETHER_ID 0x86DD
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14 * struct to configure any switch filter rules.
15 * {DA (6 bytes), SA(6 bytes),
16 * Ether type (2 bytes for header without VLAN tag) OR
17 * VLAN tag (4 bytes for header with VLAN tag) }
19 * Word on Hardcoded values
20 * byte 0 = 0x2: to identify it as locally administered DA MAC
21 * byte 6 = 0x2: to identify it as locally administered SA MAC
22 * byte 12 = 0x81 & byte 13 = 0x00:
23 * In case of VLAN filter first two bytes defines ether type (0x8100)
24 * and remaining two bytes are placeholder for programming a given VLAN ID
25 * In case of Ether type filter it is treated as header without VLAN tag
26 * and byte 12 and 13 is used to program a given Ether type instead
28 #define DUMMY_ETH_HDR_LEN 16
29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 ICE_PKT_OUTER_IPV6 = BIT(0),
35 ICE_PKT_TUN_GTPC = BIT(1),
36 ICE_PKT_TUN_GTPU = BIT(2),
37 ICE_PKT_TUN_NVGRE = BIT(3),
38 ICE_PKT_TUN_UDP = BIT(4),
39 ICE_PKT_INNER_IPV6 = BIT(5),
40 ICE_PKT_INNER_TCP = BIT(6),
41 ICE_PKT_INNER_UDP = BIT(7),
42 ICE_PKT_GTP_NOPAY = BIT(8),
43 ICE_PKT_KMALLOC = BIT(9),
44 ICE_PKT_PPPOE = BIT(10),
45 ICE_PKT_L2TPV3 = BIT(11),
48 struct ice_dummy_pkt_offsets {
49 enum ice_protocol_type type;
50 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
53 struct ice_dummy_pkt_profile {
54 const struct ice_dummy_pkt_offsets *offsets;
61 #define ICE_DECLARE_PKT_OFFSETS(type) \
62 static const struct ice_dummy_pkt_offsets \
63 ice_dummy_##type##_packet_offsets[]
65 #define ICE_DECLARE_PKT_TEMPLATE(type) \
66 static const u8 ice_dummy_##type##_packet[]
68 #define ICE_PKT_PROFILE(type, m) { \
70 .pkt = ice_dummy_##type##_packet, \
71 .pkt_len = sizeof(ice_dummy_##type##_packet), \
72 .offsets = ice_dummy_##type##_packet_offsets, \
73 .offsets_len = sizeof(ice_dummy_##type##_packet_offsets), \
76 ICE_DECLARE_PKT_OFFSETS(vlan) = {
77 { ICE_VLAN_OFOS, 12 },
80 ICE_DECLARE_PKT_TEMPLATE(vlan) = {
81 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
84 ICE_DECLARE_PKT_OFFSETS(qinq) = {
89 ICE_DECLARE_PKT_TEMPLATE(qinq) = {
90 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
91 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
94 ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
97 { ICE_IPV4_OFOS, 14 },
100 { ICE_ETYPE_IL, 54 },
103 { ICE_PROTOCOL_LAST, 0 },
106 ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
107 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
108 0x00, 0x00, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
111 0x08, 0x00, /* ICE_ETYPE_OL 12 */
113 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
114 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x2F, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
117 0x00, 0x00, 0x00, 0x00,
119 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
120 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
126 0x08, 0x00, /* ICE_ETYPE_IL 54 */
128 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
129 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x06, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
132 0x00, 0x00, 0x00, 0x00,
134 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
135 0x00, 0x00, 0x00, 0x00,
136 0x00, 0x00, 0x00, 0x00,
137 0x50, 0x02, 0x20, 0x00,
138 0x00, 0x00, 0x00, 0x00
141 ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
143 { ICE_ETYPE_OL, 12 },
144 { ICE_IPV4_OFOS, 14 },
147 { ICE_ETYPE_IL, 54 },
149 { ICE_UDP_ILOS, 76 },
150 { ICE_PROTOCOL_LAST, 0 },
153 ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
154 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
155 0x00, 0x00, 0x00, 0x00,
156 0x00, 0x00, 0x00, 0x00,
158 0x08, 0x00, /* ICE_ETYPE_OL 12 */
160 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
161 0x00, 0x00, 0x00, 0x00,
162 0x00, 0x2F, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
166 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
167 0x00, 0x00, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
170 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
173 0x08, 0x00, /* ICE_ETYPE_IL 54 */
175 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
176 0x00, 0x00, 0x00, 0x00,
177 0x00, 0x11, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
179 0x00, 0x00, 0x00, 0x00,
181 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
182 0x00, 0x08, 0x00, 0x00,
185 ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
187 { ICE_ETYPE_OL, 12 },
188 { ICE_IPV4_OFOS, 14 },
192 { ICE_VXLAN_GPE, 42 },
194 { ICE_ETYPE_IL, 62 },
197 { ICE_PROTOCOL_LAST, 0 },
200 ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
201 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
202 0x00, 0x00, 0x00, 0x00,
203 0x00, 0x00, 0x00, 0x00,
205 0x08, 0x00, /* ICE_ETYPE_OL 12 */
207 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
208 0x00, 0x01, 0x00, 0x00,
209 0x40, 0x11, 0x00, 0x00,
210 0x00, 0x00, 0x00, 0x00,
211 0x00, 0x00, 0x00, 0x00,
213 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
214 0x00, 0x46, 0x00, 0x00,
216 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
217 0x00, 0x00, 0x00, 0x00,
219 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
223 0x08, 0x00, /* ICE_ETYPE_IL 62 */
225 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
226 0x00, 0x01, 0x00, 0x00,
227 0x40, 0x06, 0x00, 0x00,
228 0x00, 0x00, 0x00, 0x00,
229 0x00, 0x00, 0x00, 0x00,
231 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
232 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x00, 0x00,
234 0x50, 0x02, 0x20, 0x00,
235 0x00, 0x00, 0x00, 0x00
238 ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
240 { ICE_ETYPE_OL, 12 },
241 { ICE_IPV4_OFOS, 14 },
245 { ICE_VXLAN_GPE, 42 },
247 { ICE_ETYPE_IL, 62 },
249 { ICE_UDP_ILOS, 84 },
250 { ICE_PROTOCOL_LAST, 0 },
253 ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
254 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
255 0x00, 0x00, 0x00, 0x00,
256 0x00, 0x00, 0x00, 0x00,
258 0x08, 0x00, /* ICE_ETYPE_OL 12 */
260 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
261 0x00, 0x01, 0x00, 0x00,
262 0x00, 0x11, 0x00, 0x00,
263 0x00, 0x00, 0x00, 0x00,
264 0x00, 0x00, 0x00, 0x00,
266 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
267 0x00, 0x3a, 0x00, 0x00,
269 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
270 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
273 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00,
276 0x08, 0x00, /* ICE_ETYPE_IL 62 */
278 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
279 0x00, 0x01, 0x00, 0x00,
280 0x00, 0x11, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00,
284 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
285 0x00, 0x08, 0x00, 0x00,
288 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
290 { ICE_ETYPE_OL, 12 },
291 { ICE_IPV4_OFOS, 14 },
294 { ICE_ETYPE_IL, 54 },
297 { ICE_PROTOCOL_LAST, 0 },
300 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
301 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
302 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00,
305 0x08, 0x00, /* ICE_ETYPE_OL 12 */
307 0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
308 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x2F, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00,
313 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
314 0x00, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
317 0x00, 0x00, 0x00, 0x00,
318 0x00, 0x00, 0x00, 0x00,
320 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
322 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
323 0x00, 0x08, 0x06, 0x40,
324 0x00, 0x00, 0x00, 0x00,
325 0x00, 0x00, 0x00, 0x00,
326 0x00, 0x00, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
329 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00,
331 0x00, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x50, 0x02, 0x20, 0x00,
337 0x00, 0x00, 0x00, 0x00
340 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
342 { ICE_ETYPE_OL, 12 },
343 { ICE_IPV4_OFOS, 14 },
346 { ICE_ETYPE_IL, 54 },
348 { ICE_UDP_ILOS, 96 },
349 { ICE_PROTOCOL_LAST, 0 },
352 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
353 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
354 0x00, 0x00, 0x00, 0x00,
355 0x00, 0x00, 0x00, 0x00,
357 0x08, 0x00, /* ICE_ETYPE_OL 12 */
359 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
360 0x00, 0x00, 0x00, 0x00,
361 0x00, 0x2F, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
363 0x00, 0x00, 0x00, 0x00,
365 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
366 0x00, 0x00, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
369 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00,
372 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
374 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
375 0x00, 0x08, 0x11, 0x40,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
386 0x00, 0x08, 0x00, 0x00,
389 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
391 { ICE_ETYPE_OL, 12 },
392 { ICE_IPV4_OFOS, 14 },
396 { ICE_VXLAN_GPE, 42 },
398 { ICE_ETYPE_IL, 62 },
401 { ICE_PROTOCOL_LAST, 0 },
404 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
405 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
406 0x00, 0x00, 0x00, 0x00,
407 0x00, 0x00, 0x00, 0x00,
409 0x08, 0x00, /* ICE_ETYPE_OL 12 */
411 0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
412 0x00, 0x01, 0x00, 0x00,
413 0x40, 0x11, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
418 0x00, 0x5a, 0x00, 0x00,
420 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
421 0x00, 0x00, 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
424 0x00, 0x00, 0x00, 0x00,
425 0x00, 0x00, 0x00, 0x00,
427 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
429 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
430 0x00, 0x08, 0x06, 0x40,
431 0x00, 0x00, 0x00, 0x00,
432 0x00, 0x00, 0x00, 0x00,
433 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x00, 0x00,
435 0x00, 0x00, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
440 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
441 0x00, 0x00, 0x00, 0x00,
442 0x00, 0x00, 0x00, 0x00,
443 0x50, 0x02, 0x20, 0x00,
444 0x00, 0x00, 0x00, 0x00
447 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
449 { ICE_ETYPE_OL, 12 },
450 { ICE_IPV4_OFOS, 14 },
454 { ICE_VXLAN_GPE, 42 },
456 { ICE_ETYPE_IL, 62 },
458 { ICE_UDP_ILOS, 104 },
459 { ICE_PROTOCOL_LAST, 0 },
462 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
463 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
464 0x00, 0x00, 0x00, 0x00,
465 0x00, 0x00, 0x00, 0x00,
467 0x08, 0x00, /* ICE_ETYPE_OL 12 */
469 0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
470 0x00, 0x01, 0x00, 0x00,
471 0x00, 0x11, 0x00, 0x00,
472 0x00, 0x00, 0x00, 0x00,
473 0x00, 0x00, 0x00, 0x00,
475 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
476 0x00, 0x4e, 0x00, 0x00,
478 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
479 0x00, 0x00, 0x00, 0x00,
481 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
482 0x00, 0x00, 0x00, 0x00,
483 0x00, 0x00, 0x00, 0x00,
485 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
487 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
488 0x00, 0x08, 0x11, 0x40,
489 0x00, 0x00, 0x00, 0x00,
490 0x00, 0x00, 0x00, 0x00,
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
499 0x00, 0x08, 0x00, 0x00,
502 /* offset info for MAC + IPv4 + UDP dummy packet */
503 ICE_DECLARE_PKT_OFFSETS(udp) = {
505 { ICE_ETYPE_OL, 12 },
506 { ICE_IPV4_OFOS, 14 },
507 { ICE_UDP_ILOS, 34 },
508 { ICE_PROTOCOL_LAST, 0 },
511 /* Dummy packet for MAC + IPv4 + UDP */
512 ICE_DECLARE_PKT_TEMPLATE(udp) = {
513 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
514 0x00, 0x00, 0x00, 0x00,
515 0x00, 0x00, 0x00, 0x00,
517 0x08, 0x00, /* ICE_ETYPE_OL 12 */
519 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
520 0x00, 0x01, 0x00, 0x00,
521 0x00, 0x11, 0x00, 0x00,
522 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x00, 0x00, 0x00,
525 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
526 0x00, 0x08, 0x00, 0x00,
528 0x00, 0x00, /* 2 bytes for 4 byte alignment */
531 /* offset info for MAC + IPv4 + TCP dummy packet */
532 ICE_DECLARE_PKT_OFFSETS(tcp) = {
534 { ICE_ETYPE_OL, 12 },
535 { ICE_IPV4_OFOS, 14 },
537 { ICE_PROTOCOL_LAST, 0 },
540 /* Dummy packet for MAC + IPv4 + TCP */
541 ICE_DECLARE_PKT_TEMPLATE(tcp) = {
542 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
543 0x00, 0x00, 0x00, 0x00,
544 0x00, 0x00, 0x00, 0x00,
546 0x08, 0x00, /* ICE_ETYPE_OL 12 */
548 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
549 0x00, 0x01, 0x00, 0x00,
550 0x00, 0x06, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00,
552 0x00, 0x00, 0x00, 0x00,
554 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
555 0x00, 0x00, 0x00, 0x00,
556 0x00, 0x00, 0x00, 0x00,
557 0x50, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00,
560 0x00, 0x00, /* 2 bytes for 4 byte alignment */
563 ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
565 { ICE_ETYPE_OL, 12 },
566 { ICE_IPV6_OFOS, 14 },
568 { ICE_PROTOCOL_LAST, 0 },
571 ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
572 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
573 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x00, 0x00, 0x00,
576 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
578 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
579 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
580 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, 0x00, 0x00,
582 0x00, 0x00, 0x00, 0x00,
583 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x00,
585 0x00, 0x00, 0x00, 0x00,
586 0x00, 0x00, 0x00, 0x00,
587 0x00, 0x00, 0x00, 0x00,
589 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
592 0x50, 0x00, 0x00, 0x00,
593 0x00, 0x00, 0x00, 0x00,
595 0x00, 0x00, /* 2 bytes for 4 byte alignment */
599 ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
601 { ICE_ETYPE_OL, 12 },
602 { ICE_IPV6_OFOS, 14 },
603 { ICE_UDP_ILOS, 54 },
604 { ICE_PROTOCOL_LAST, 0 },
607 /* IPv6 + UDP dummy packet */
608 ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
609 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
610 0x00, 0x00, 0x00, 0x00,
611 0x00, 0x00, 0x00, 0x00,
613 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
615 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
616 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
617 0x00, 0x00, 0x00, 0x00,
618 0x00, 0x00, 0x00, 0x00,
619 0x00, 0x00, 0x00, 0x00,
620 0x00, 0x00, 0x00, 0x00,
621 0x00, 0x00, 0x00, 0x00,
622 0x00, 0x00, 0x00, 0x00,
623 0x00, 0x00, 0x00, 0x00,
624 0x00, 0x00, 0x00, 0x00,
626 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
627 0x00, 0x10, 0x00, 0x00,
629 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
630 0x00, 0x00, 0x00, 0x00,
632 0x00, 0x00, /* 2 bytes for 4 byte alignment */
635 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
636 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
638 { ICE_IPV4_OFOS, 14 },
643 { ICE_PROTOCOL_LAST, 0 },
646 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
647 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
648 0x00, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00,
652 0x45, 0x00, 0x00, 0x58, /* IP 14 */
653 0x00, 0x00, 0x00, 0x00,
654 0x00, 0x11, 0x00, 0x00,
655 0x00, 0x00, 0x00, 0x00,
656 0x00, 0x00, 0x00, 0x00,
658 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
659 0x00, 0x44, 0x00, 0x00,
661 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
662 0x00, 0x00, 0x00, 0x00,
663 0x00, 0x00, 0x00, 0x85,
665 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
666 0x00, 0x00, 0x00, 0x00,
668 0x45, 0x00, 0x00, 0x28, /* IP 62 */
669 0x00, 0x00, 0x00, 0x00,
670 0x00, 0x06, 0x00, 0x00,
671 0x00, 0x00, 0x00, 0x00,
672 0x00, 0x00, 0x00, 0x00,
674 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
675 0x00, 0x00, 0x00, 0x00,
676 0x00, 0x00, 0x00, 0x00,
677 0x50, 0x00, 0x00, 0x00,
678 0x00, 0x00, 0x00, 0x00,
680 0x00, 0x00, /* 2 bytes for 4 byte alignment */
683 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
684 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
686 { ICE_IPV4_OFOS, 14 },
690 { ICE_UDP_ILOS, 82 },
691 { ICE_PROTOCOL_LAST, 0 },
694 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
695 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
696 0x00, 0x00, 0x00, 0x00,
697 0x00, 0x00, 0x00, 0x00,
700 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
701 0x00, 0x00, 0x00, 0x00,
702 0x00, 0x11, 0x00, 0x00,
703 0x00, 0x00, 0x00, 0x00,
704 0x00, 0x00, 0x00, 0x00,
706 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
707 0x00, 0x38, 0x00, 0x00,
709 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
710 0x00, 0x00, 0x00, 0x00,
711 0x00, 0x00, 0x00, 0x85,
713 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
714 0x00, 0x00, 0x00, 0x00,
716 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
717 0x00, 0x00, 0x00, 0x00,
718 0x00, 0x11, 0x00, 0x00,
719 0x00, 0x00, 0x00, 0x00,
720 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
723 0x00, 0x08, 0x00, 0x00,
725 0x00, 0x00, /* 2 bytes for 4 byte alignment */
728 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
729 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
731 { ICE_IPV4_OFOS, 14 },
736 { ICE_PROTOCOL_LAST, 0 },
739 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
740 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
741 0x00, 0x00, 0x00, 0x00,
742 0x00, 0x00, 0x00, 0x00,
745 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
746 0x00, 0x00, 0x00, 0x00,
747 0x00, 0x11, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
749 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
752 0x00, 0x58, 0x00, 0x00,
754 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
755 0x00, 0x00, 0x00, 0x00,
756 0x00, 0x00, 0x00, 0x85,
758 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
759 0x00, 0x00, 0x00, 0x00,
761 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
762 0x00, 0x14, 0x06, 0x00,
763 0x00, 0x00, 0x00, 0x00,
764 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00,
766 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
770 0x00, 0x00, 0x00, 0x00,
772 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
773 0x00, 0x00, 0x00, 0x00,
774 0x00, 0x00, 0x00, 0x00,
775 0x50, 0x00, 0x00, 0x00,
776 0x00, 0x00, 0x00, 0x00,
778 0x00, 0x00, /* 2 bytes for 4 byte alignment */
781 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
783 { ICE_IPV4_OFOS, 14 },
787 { ICE_UDP_ILOS, 102 },
788 { ICE_PROTOCOL_LAST, 0 },
791 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
792 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
793 0x00, 0x00, 0x00, 0x00,
794 0x00, 0x00, 0x00, 0x00,
797 0x45, 0x00, 0x00, 0x60, /* IP 14 */
798 0x00, 0x00, 0x00, 0x00,
799 0x00, 0x11, 0x00, 0x00,
800 0x00, 0x00, 0x00, 0x00,
801 0x00, 0x00, 0x00, 0x00,
803 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
804 0x00, 0x4c, 0x00, 0x00,
806 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
807 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x85,
810 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
811 0x00, 0x00, 0x00, 0x00,
813 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
814 0x00, 0x08, 0x11, 0x00,
815 0x00, 0x00, 0x00, 0x00,
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
818 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00,
820 0x00, 0x00, 0x00, 0x00,
821 0x00, 0x00, 0x00, 0x00,
822 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
825 0x00, 0x08, 0x00, 0x00,
827 0x00, 0x00, /* 2 bytes for 4 byte alignment */
830 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
832 { ICE_IPV6_OFOS, 14 },
837 { ICE_PROTOCOL_LAST, 0 },
840 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
841 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
842 0x00, 0x00, 0x00, 0x00,
843 0x00, 0x00, 0x00, 0x00,
846 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
847 0x00, 0x44, 0x11, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00,
851 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x00, 0x00,
853 0x00, 0x00, 0x00, 0x00,
854 0x00, 0x00, 0x00, 0x00,
855 0x00, 0x00, 0x00, 0x00,
857 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
858 0x00, 0x44, 0x00, 0x00,
860 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
861 0x00, 0x00, 0x00, 0x00,
862 0x00, 0x00, 0x00, 0x85,
864 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
865 0x00, 0x00, 0x00, 0x00,
867 0x45, 0x00, 0x00, 0x28, /* IP 82 */
868 0x00, 0x00, 0x00, 0x00,
869 0x00, 0x06, 0x00, 0x00,
870 0x00, 0x00, 0x00, 0x00,
871 0x00, 0x00, 0x00, 0x00,
873 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
874 0x00, 0x00, 0x00, 0x00,
875 0x00, 0x00, 0x00, 0x00,
876 0x50, 0x00, 0x00, 0x00,
877 0x00, 0x00, 0x00, 0x00,
879 0x00, 0x00, /* 2 bytes for 4 byte alignment */
882 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
884 { ICE_IPV6_OFOS, 14 },
888 { ICE_UDP_ILOS, 102 },
889 { ICE_PROTOCOL_LAST, 0 },
892 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
893 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
894 0x00, 0x00, 0x00, 0x00,
895 0x00, 0x00, 0x00, 0x00,
898 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
899 0x00, 0x38, 0x11, 0x00,
900 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
910 0x00, 0x38, 0x00, 0x00,
912 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
913 0x00, 0x00, 0x00, 0x00,
914 0x00, 0x00, 0x00, 0x85,
916 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
917 0x00, 0x00, 0x00, 0x00,
919 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
920 0x00, 0x00, 0x00, 0x00,
921 0x00, 0x11, 0x00, 0x00,
922 0x00, 0x00, 0x00, 0x00,
923 0x00, 0x00, 0x00, 0x00,
925 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
926 0x00, 0x08, 0x00, 0x00,
928 0x00, 0x00, /* 2 bytes for 4 byte alignment */
931 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
933 { ICE_IPV6_OFOS, 14 },
938 { ICE_PROTOCOL_LAST, 0 },
941 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
942 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
943 0x00, 0x00, 0x00, 0x00,
944 0x00, 0x00, 0x00, 0x00,
947 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
948 0x00, 0x58, 0x11, 0x00,
949 0x00, 0x00, 0x00, 0x00,
950 0x00, 0x00, 0x00, 0x00,
951 0x00, 0x00, 0x00, 0x00,
952 0x00, 0x00, 0x00, 0x00,
953 0x00, 0x00, 0x00, 0x00,
954 0x00, 0x00, 0x00, 0x00,
955 0x00, 0x00, 0x00, 0x00,
956 0x00, 0x00, 0x00, 0x00,
958 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
959 0x00, 0x58, 0x00, 0x00,
961 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
962 0x00, 0x00, 0x00, 0x00,
963 0x00, 0x00, 0x00, 0x85,
965 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
966 0x00, 0x00, 0x00, 0x00,
968 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
969 0x00, 0x14, 0x06, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, 0x00, 0x00,
972 0x00, 0x00, 0x00, 0x00,
973 0x00, 0x00, 0x00, 0x00,
974 0x00, 0x00, 0x00, 0x00,
975 0x00, 0x00, 0x00, 0x00,
976 0x00, 0x00, 0x00, 0x00,
977 0x00, 0x00, 0x00, 0x00,
979 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
980 0x00, 0x00, 0x00, 0x00,
981 0x00, 0x00, 0x00, 0x00,
982 0x50, 0x00, 0x00, 0x00,
983 0x00, 0x00, 0x00, 0x00,
985 0x00, 0x00, /* 2 bytes for 4 byte alignment */
988 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
990 { ICE_IPV6_OFOS, 14 },
994 { ICE_UDP_ILOS, 122 },
995 { ICE_PROTOCOL_LAST, 0 },
998 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
999 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
1000 0x00, 0x00, 0x00, 0x00,
1001 0x00, 0x00, 0x00, 0x00,
1004 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1005 0x00, 0x4c, 0x11, 0x00,
1006 0x00, 0x00, 0x00, 0x00,
1007 0x00, 0x00, 0x00, 0x00,
1008 0x00, 0x00, 0x00, 0x00,
1009 0x00, 0x00, 0x00, 0x00,
1010 0x00, 0x00, 0x00, 0x00,
1011 0x00, 0x00, 0x00, 0x00,
1012 0x00, 0x00, 0x00, 0x00,
1013 0x00, 0x00, 0x00, 0x00,
1015 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1016 0x00, 0x4c, 0x00, 0x00,
1018 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
1019 0x00, 0x00, 0x00, 0x00,
1020 0x00, 0x00, 0x00, 0x85,
1022 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1023 0x00, 0x00, 0x00, 0x00,
1025 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1026 0x00, 0x08, 0x11, 0x00,
1027 0x00, 0x00, 0x00, 0x00,
1028 0x00, 0x00, 0x00, 0x00,
1029 0x00, 0x00, 0x00, 0x00,
1030 0x00, 0x00, 0x00, 0x00,
1031 0x00, 0x00, 0x00, 0x00,
1032 0x00, 0x00, 0x00, 0x00,
1033 0x00, 0x00, 0x00, 0x00,
1034 0x00, 0x00, 0x00, 0x00,
1036 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
1037 0x00, 0x08, 0x00, 0x00,
1039 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1042 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
1043 { ICE_MAC_OFOS, 0 },
1044 { ICE_IPV4_OFOS, 14 },
1046 { ICE_GTP_NO_PAY, 42 },
1047 { ICE_PROTOCOL_LAST, 0 },
1050 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
1051 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1052 0x00, 0x00, 0x00, 0x00,
1053 0x00, 0x00, 0x00, 0x00,
1056 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
1057 0x00, 0x00, 0x40, 0x00,
1058 0x40, 0x11, 0x00, 0x00,
1059 0x00, 0x00, 0x00, 0x00,
1060 0x00, 0x00, 0x00, 0x00,
1062 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1063 0x00, 0x00, 0x00, 0x00,
1065 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1066 0x00, 0x00, 0x00, 0x00,
1067 0x00, 0x00, 0x00, 0x85,
1069 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1070 0x00, 0x00, 0x00, 0x00,
1072 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
1073 0x00, 0x00, 0x40, 0x00,
1074 0x40, 0x00, 0x00, 0x00,
1075 0x00, 0x00, 0x00, 0x00,
1076 0x00, 0x00, 0x00, 0x00,
1080 ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
1081 { ICE_MAC_OFOS, 0 },
1082 { ICE_IPV6_OFOS, 14 },
1084 { ICE_GTP_NO_PAY, 62 },
1085 { ICE_PROTOCOL_LAST, 0 },
1088 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
1089 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1090 0x00, 0x00, 0x00, 0x00,
1091 0x00, 0x00, 0x00, 0x00,
1094 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1095 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1096 0x00, 0x00, 0x00, 0x00,
1097 0x00, 0x00, 0x00, 0x00,
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x00,
1100 0x00, 0x00, 0x00, 0x00,
1101 0x00, 0x00, 0x00, 0x00,
1102 0x00, 0x00, 0x00, 0x00,
1103 0x00, 0x00, 0x00, 0x00,
1105 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1106 0x00, 0x00, 0x00, 0x00,
1108 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1109 0x00, 0x00, 0x00, 0x00,
1114 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
1115 { ICE_MAC_OFOS, 0 },
1116 { ICE_ETYPE_OL, 12 },
1118 { ICE_IPV4_OFOS, 22 },
1120 { ICE_PROTOCOL_LAST, 0 },
1123 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_tcp) = {
1124 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1125 0x00, 0x00, 0x00, 0x00,
1126 0x00, 0x00, 0x00, 0x00,
1128 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1130 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1133 0x00, 0x21, /* PPP Link Layer 20 */
1135 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
1136 0x00, 0x01, 0x00, 0x00,
1137 0x00, 0x06, 0x00, 0x00,
1138 0x00, 0x00, 0x00, 0x00,
1139 0x00, 0x00, 0x00, 0x00,
1141 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
1142 0x00, 0x00, 0x00, 0x00,
1143 0x00, 0x00, 0x00, 0x00,
1144 0x50, 0x00, 0x00, 0x00,
1145 0x00, 0x00, 0x00, 0x00,
1147 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1150 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_udp) = {
1151 { ICE_MAC_OFOS, 0 },
1152 { ICE_ETYPE_OL, 12 },
1154 { ICE_IPV4_OFOS, 22 },
1155 { ICE_UDP_ILOS, 42 },
1156 { ICE_PROTOCOL_LAST, 0 },
1159 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_udp) = {
1160 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1161 0x00, 0x00, 0x00, 0x00,
1162 0x00, 0x00, 0x00, 0x00,
1164 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1166 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1169 0x00, 0x21, /* PPP Link Layer 20 */
1171 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1172 0x00, 0x01, 0x00, 0x00,
1173 0x00, 0x11, 0x00, 0x00,
1174 0x00, 0x00, 0x00, 0x00,
1175 0x00, 0x00, 0x00, 0x00,
1177 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1178 0x00, 0x08, 0x00, 0x00,
1180 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1183 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_tcp) = {
1184 { ICE_MAC_OFOS, 0 },
1185 { ICE_ETYPE_OL, 12 },
1187 { ICE_IPV6_OFOS, 22 },
1189 { ICE_PROTOCOL_LAST, 0 },
1192 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_tcp) = {
1193 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1194 0x00, 0x00, 0x00, 0x00,
1195 0x00, 0x00, 0x00, 0x00,
1197 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1199 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1202 0x00, 0x57, /* PPP Link Layer 20 */
1204 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1205 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1206 0x00, 0x00, 0x00, 0x00,
1207 0x00, 0x00, 0x00, 0x00,
1208 0x00, 0x00, 0x00, 0x00,
1209 0x00, 0x00, 0x00, 0x00,
1210 0x00, 0x00, 0x00, 0x00,
1211 0x00, 0x00, 0x00, 0x00,
1212 0x00, 0x00, 0x00, 0x00,
1213 0x00, 0x00, 0x00, 0x00,
1215 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
1216 0x00, 0x00, 0x00, 0x00,
1217 0x00, 0x00, 0x00, 0x00,
1218 0x50, 0x00, 0x00, 0x00,
1219 0x00, 0x00, 0x00, 0x00,
1221 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1224 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_udp) = {
1225 { ICE_MAC_OFOS, 0 },
1226 { ICE_ETYPE_OL, 12 },
1228 { ICE_IPV6_OFOS, 22 },
1229 { ICE_UDP_ILOS, 62 },
1230 { ICE_PROTOCOL_LAST, 0 },
1233 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
1234 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1235 0x00, 0x00, 0x00, 0x00,
1236 0x00, 0x00, 0x00, 0x00,
1238 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1240 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1243 0x00, 0x57, /* PPP Link Layer 20 */
1245 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1246 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1247 0x00, 0x00, 0x00, 0x00,
1248 0x00, 0x00, 0x00, 0x00,
1249 0x00, 0x00, 0x00, 0x00,
1250 0x00, 0x00, 0x00, 0x00,
1251 0x00, 0x00, 0x00, 0x00,
1252 0x00, 0x00, 0x00, 0x00,
1253 0x00, 0x00, 0x00, 0x00,
1254 0x00, 0x00, 0x00, 0x00,
1256 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1257 0x00, 0x08, 0x00, 0x00,
1259 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1262 ICE_DECLARE_PKT_OFFSETS(ipv4_l2tpv3) = {
1263 { ICE_MAC_OFOS, 0 },
1264 { ICE_ETYPE_OL, 12 },
1265 { ICE_IPV4_OFOS, 14 },
1267 { ICE_PROTOCOL_LAST, 0 },
1270 ICE_DECLARE_PKT_TEMPLATE(ipv4_l2tpv3) = {
1271 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1272 0x00, 0x00, 0x00, 0x00,
1273 0x00, 0x00, 0x00, 0x00,
1275 0x08, 0x00, /* ICE_ETYPE_OL 12 */
1277 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1278 0x00, 0x00, 0x40, 0x00,
1279 0x40, 0x73, 0x00, 0x00,
1280 0x00, 0x00, 0x00, 0x00,
1281 0x00, 0x00, 0x00, 0x00,
1283 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1284 0x00, 0x00, 0x00, 0x00,
1285 0x00, 0x00, 0x00, 0x00,
1286 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1289 ICE_DECLARE_PKT_OFFSETS(ipv6_l2tpv3) = {
1290 { ICE_MAC_OFOS, 0 },
1291 { ICE_ETYPE_OL, 12 },
1292 { ICE_IPV6_OFOS, 14 },
1294 { ICE_PROTOCOL_LAST, 0 },
1297 ICE_DECLARE_PKT_TEMPLATE(ipv6_l2tpv3) = {
1298 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1299 0x00, 0x00, 0x00, 0x00,
1300 0x00, 0x00, 0x00, 0x00,
1302 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
1304 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1305 0x00, 0x0c, 0x73, 0x40,
1306 0x00, 0x00, 0x00, 0x00,
1307 0x00, 0x00, 0x00, 0x00,
1308 0x00, 0x00, 0x00, 0x00,
1309 0x00, 0x00, 0x00, 0x00,
1310 0x00, 0x00, 0x00, 0x00,
1311 0x00, 0x00, 0x00, 0x00,
1312 0x00, 0x00, 0x00, 0x00,
1313 0x00, 0x00, 0x00, 0x00,
1315 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1316 0x00, 0x00, 0x00, 0x00,
1317 0x00, 0x00, 0x00, 0x00,
1318 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1321 static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
1322 ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
1324 ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1325 ICE_PKT_OUTER_IPV6 |
1326 ICE_PKT_INNER_IPV6 |
1328 ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1329 ICE_PKT_OUTER_IPV6 |
1330 ICE_PKT_INNER_IPV6),
1331 ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1332 ICE_PKT_OUTER_IPV6 |
1334 ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
1335 ICE_PKT_OUTER_IPV6),
1336 ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
1337 ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1338 ICE_PKT_INNER_IPV6 |
1340 ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1341 ICE_PKT_INNER_IPV6),
1342 ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1344 ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
1345 ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
1346 ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
1347 ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
1349 ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
1350 ICE_PKT_PROFILE(pppoe_ipv4_udp, ICE_PKT_PPPOE | ICE_PKT_INNER_UDP),
1351 ICE_PKT_PROFILE(pppoe_ipv4_tcp, ICE_PKT_PPPOE),
1352 ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
1354 ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
1355 ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
1356 ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
1357 ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
1358 ICE_PKT_INNER_IPV6 |
1360 ICE_PKT_PROFILE(ipv6_l2tpv3, ICE_PKT_L2TPV3 | ICE_PKT_OUTER_IPV6),
1361 ICE_PKT_PROFILE(ipv4_l2tpv3, ICE_PKT_L2TPV3),
1362 ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
1363 ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
1364 ICE_PKT_INNER_IPV6),
1365 ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
1366 ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
1367 ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
1368 ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
1369 ICE_PKT_PROFILE(tcp, 0),
1372 /* this is a recipe to profile association bitmap */
1373 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1374 ICE_MAX_NUM_PROFILES);
1376 /* this is a profile to recipe association bitmap */
1377 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1378 ICE_MAX_NUM_RECIPES);
1381 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1382 * @hw: pointer to the HW struct
1384 * Allocate memory for the entire recipe table and initialize the structures/
1385 * entries corresponding to basic recipes.
1387 int ice_init_def_sw_recp(struct ice_hw *hw)
1389 struct ice_sw_recipe *recps;
1392 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
1393 sizeof(*recps), GFP_KERNEL);
1397 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1398 recps[i].root_rid = i;
1399 INIT_LIST_HEAD(&recps[i].filt_rules);
1400 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1401 INIT_LIST_HEAD(&recps[i].rg_list);
1402 mutex_init(&recps[i].filt_rule_lock);
1405 hw->switch_info->recp_list = recps;
1411 * ice_aq_get_sw_cfg - get switch configuration
1412 * @hw: pointer to the hardware structure
1413 * @buf: pointer to the result buffer
1414 * @buf_size: length of the buffer available for response
1415 * @req_desc: pointer to requested descriptor
1416 * @num_elems: pointer to number of elements
1417 * @cd: pointer to command details structure or NULL
1419 * Get switch configuration (0x0200) to be placed in buf.
1420 * This admin command returns information such as initial VSI/port number
1421 * and switch ID it belongs to.
1423 * NOTE: *req_desc is both an input/output parameter.
1424 * The caller of this function first calls this function with *request_desc set
1425 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1426 * configuration information has been returned; if non-zero (meaning not all
1427 * the information was returned), the caller should call this function again
1428 * with *req_desc set to the previous value returned by f/w to get the
1429 * next block of switch configuration information.
1431 * *num_elems is output only parameter. This reflects the number of elements
1432 * in response buffer. The caller of this function to use *num_elems while
1433 * parsing the response buffer.
1436 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1437 u16 buf_size, u16 *req_desc, u16 *num_elems,
1438 struct ice_sq_cd *cd)
1440 struct ice_aqc_get_sw_cfg *cmd;
1441 struct ice_aq_desc desc;
1444 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1445 cmd = &desc.params.get_sw_conf;
1446 cmd->element = cpu_to_le16(*req_desc);
1448 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1450 *req_desc = le16_to_cpu(cmd->element);
1451 *num_elems = le16_to_cpu(cmd->num_elems);
1459 * @hw: pointer to the HW struct
1460 * @vsi_ctx: pointer to a VSI context struct
1461 * @cd: pointer to command details structure or NULL
1463 * Add a VSI context to the hardware (0x0210)
1466 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1467 struct ice_sq_cd *cd)
1469 struct ice_aqc_add_update_free_vsi_resp *res;
1470 struct ice_aqc_add_get_update_free_vsi *cmd;
1471 struct ice_aq_desc desc;
1474 cmd = &desc.params.vsi_cmd;
1475 res = &desc.params.add_update_free_vsi_res;
1477 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1479 if (!vsi_ctx->alloc_from_pool)
1480 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
1481 ICE_AQ_VSI_IS_VALID);
1482 cmd->vf_id = vsi_ctx->vf_num;
1484 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1486 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1488 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1489 sizeof(vsi_ctx->info), cd);
1492 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1493 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
1494 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
1502 * @hw: pointer to the HW struct
1503 * @vsi_ctx: pointer to a VSI context struct
1504 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1505 * @cd: pointer to command details structure or NULL
1507 * Free VSI context info from hardware (0x0213)
1510 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1511 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1513 struct ice_aqc_add_update_free_vsi_resp *resp;
1514 struct ice_aqc_add_get_update_free_vsi *cmd;
1515 struct ice_aq_desc desc;
1518 cmd = &desc.params.vsi_cmd;
1519 resp = &desc.params.add_update_free_vsi_res;
1521 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1523 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1525 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
1527 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1529 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1530 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1538 * @hw: pointer to the HW struct
1539 * @vsi_ctx: pointer to a VSI context struct
1540 * @cd: pointer to command details structure or NULL
1542 * Update VSI context in the hardware (0x0211)
1545 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1546 struct ice_sq_cd *cd)
1548 struct ice_aqc_add_update_free_vsi_resp *resp;
1549 struct ice_aqc_add_get_update_free_vsi *cmd;
1550 struct ice_aq_desc desc;
1553 cmd = &desc.params.vsi_cmd;
1554 resp = &desc.params.add_update_free_vsi_res;
1556 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1558 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1560 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1562 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1563 sizeof(vsi_ctx->info), cd);
1566 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1567 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1574 * ice_is_vsi_valid - check whether the VSI is valid or not
1575 * @hw: pointer to the HW struct
1576 * @vsi_handle: VSI handle
1578 * check whether the VSI is valid or not
1580 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1582 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1586 * ice_get_hw_vsi_num - return the HW VSI number
1587 * @hw: pointer to the HW struct
1588 * @vsi_handle: VSI handle
1590 * return the HW VSI number
1591 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1593 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1595 return hw->vsi_ctx[vsi_handle]->vsi_num;
1599 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1600 * @hw: pointer to the HW struct
1601 * @vsi_handle: VSI handle
1603 * return the VSI context entry for a given VSI handle
1605 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1607 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1611 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1612 * @hw: pointer to the HW struct
1613 * @vsi_handle: VSI handle
1614 * @vsi: VSI context pointer
1616 * save the VSI context entry for a given VSI handle
1619 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1621 hw->vsi_ctx[vsi_handle] = vsi;
1625 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1626 * @hw: pointer to the HW struct
1627 * @vsi_handle: VSI handle
1629 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1631 struct ice_vsi_ctx *vsi = ice_get_vsi_ctx(hw, vsi_handle);
1636 ice_for_each_traffic_class(i) {
1637 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
1638 vsi->lan_q_ctx[i] = NULL;
1639 devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
1640 vsi->rdma_q_ctx[i] = NULL;
1645 * ice_clear_vsi_ctx - clear the VSI context entry
1646 * @hw: pointer to the HW struct
1647 * @vsi_handle: VSI handle
1649 * clear the VSI context entry
1651 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1653 struct ice_vsi_ctx *vsi;
1655 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1657 ice_clear_vsi_q_ctx(hw, vsi_handle);
1658 devm_kfree(ice_hw_to_dev(hw), vsi);
1659 hw->vsi_ctx[vsi_handle] = NULL;
1664 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1665 * @hw: pointer to the HW struct
1667 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1671 for (i = 0; i < ICE_MAX_VSI; i++)
1672 ice_clear_vsi_ctx(hw, i);
1676 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1677 * @hw: pointer to the HW struct
1678 * @vsi_handle: unique VSI handle provided by drivers
1679 * @vsi_ctx: pointer to a VSI context struct
1680 * @cd: pointer to command details structure or NULL
1682 * Add a VSI context to the hardware also add it into the VSI handle list.
1683 * If this function gets called after reset for existing VSIs then update
1684 * with the new HW VSI number in the corresponding VSI handle list entry.
1687 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1688 struct ice_sq_cd *cd)
1690 struct ice_vsi_ctx *tmp_vsi_ctx;
1693 if (vsi_handle >= ICE_MAX_VSI)
1695 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1698 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1700 /* Create a new VSI context */
1701 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
1702 sizeof(*tmp_vsi_ctx), GFP_KERNEL);
1704 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1707 *tmp_vsi_ctx = *vsi_ctx;
1708 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1710 /* update with new HW VSI num */
1711 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1718 * ice_free_vsi- free VSI context from hardware and VSI handle list
1719 * @hw: pointer to the HW struct
1720 * @vsi_handle: unique VSI handle
1721 * @vsi_ctx: pointer to a VSI context struct
1722 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1723 * @cd: pointer to command details structure or NULL
1725 * Free VSI context info from hardware as well as from VSI handle list
1728 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1729 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1733 if (!ice_is_vsi_valid(hw, vsi_handle))
1735 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1736 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1738 ice_clear_vsi_ctx(hw, vsi_handle);
1744 * @hw: pointer to the HW struct
1745 * @vsi_handle: unique VSI handle
1746 * @vsi_ctx: pointer to a VSI context struct
1747 * @cd: pointer to command details structure or NULL
1749 * Update VSI context in the hardware
1752 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1753 struct ice_sq_cd *cd)
1755 if (!ice_is_vsi_valid(hw, vsi_handle))
1757 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1758 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1762 * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
1763 * @hw: pointer to HW struct
1764 * @vsi_handle: VSI SW index
1765 * @enable: boolean for enable/disable
1768 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
1770 struct ice_vsi_ctx *ctx, *cached_ctx;
1773 cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1777 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1781 ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
1782 ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
1783 ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
1785 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
1788 ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1790 ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1792 status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
1794 cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
1795 cached_ctx->info.valid_sections |= ctx->info.valid_sections;
1803 * ice_aq_alloc_free_vsi_list
1804 * @hw: pointer to the HW struct
1805 * @vsi_list_id: VSI list ID returned or used for lookup
1806 * @lkup_type: switch rule filter lookup type
1807 * @opc: switch rules population command type - pass in the command opcode
1809 * allocates or free a VSI list resource
1812 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1813 enum ice_sw_lkup_type lkup_type,
1814 enum ice_adminq_opc opc)
1816 struct ice_aqc_alloc_free_res_elem *sw_buf;
1817 struct ice_aqc_res_elem *vsi_ele;
1821 buf_len = struct_size(sw_buf, elem, 1);
1822 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
1825 sw_buf->num_elems = cpu_to_le16(1);
1827 if (lkup_type == ICE_SW_LKUP_MAC ||
1828 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1829 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1830 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1831 lkup_type == ICE_SW_LKUP_PROMISC ||
1832 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1833 lkup_type == ICE_SW_LKUP_DFLT) {
1834 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1835 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1836 if (opc == ice_aqc_opc_alloc_res)
1838 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE |
1839 ICE_AQC_RES_TYPE_FLAG_SHARED);
1842 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1845 goto ice_aq_alloc_free_vsi_list_exit;
1848 if (opc == ice_aqc_opc_free_res)
1849 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
1851 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1853 goto ice_aq_alloc_free_vsi_list_exit;
1855 if (opc == ice_aqc_opc_alloc_res) {
1856 vsi_ele = &sw_buf->elem[0];
1857 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
1860 ice_aq_alloc_free_vsi_list_exit:
1861 devm_kfree(ice_hw_to_dev(hw), sw_buf);
1866 * ice_aq_sw_rules - add/update/remove switch rules
1867 * @hw: pointer to the HW struct
1868 * @rule_list: pointer to switch rule population list
1869 * @rule_list_sz: total size of the rule list in bytes
1870 * @num_rules: number of switch rules in the rule_list
1871 * @opc: switch rules population command type - pass in the command opcode
1872 * @cd: pointer to command details structure or NULL
1874 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1877 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1878 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1880 struct ice_aq_desc desc;
1883 if (opc != ice_aqc_opc_add_sw_rules &&
1884 opc != ice_aqc_opc_update_sw_rules &&
1885 opc != ice_aqc_opc_remove_sw_rules)
1888 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1890 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1891 desc.params.sw_rules.num_rules_fltr_entry_index =
1892 cpu_to_le16(num_rules);
1893 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1894 if (opc != ice_aqc_opc_add_sw_rules &&
1895 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1902 * ice_aq_add_recipe - add switch recipe
1903 * @hw: pointer to the HW struct
1904 * @s_recipe_list: pointer to switch rule population list
1905 * @num_recipes: number of switch recipes in the list
1906 * @cd: pointer to command details structure or NULL
1911 ice_aq_add_recipe(struct ice_hw *hw,
1912 struct ice_aqc_recipe_data_elem *s_recipe_list,
1913 u16 num_recipes, struct ice_sq_cd *cd)
1915 struct ice_aqc_add_get_recipe *cmd;
1916 struct ice_aq_desc desc;
1919 cmd = &desc.params.add_get_recipe;
1920 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1922 cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1923 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1925 buf_size = num_recipes * sizeof(*s_recipe_list);
1927 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1931 * ice_aq_get_recipe - get switch recipe
1932 * @hw: pointer to the HW struct
1933 * @s_recipe_list: pointer to switch rule population list
1934 * @num_recipes: pointer to the number of recipes (input and output)
1935 * @recipe_root: root recipe number of recipe(s) to retrieve
1936 * @cd: pointer to command details structure or NULL
1940 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1941 * On output, *num_recipes will equal the number of entries returned in
1944 * The caller must supply enough space in s_recipe_list to hold all possible
1945 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1948 ice_aq_get_recipe(struct ice_hw *hw,
1949 struct ice_aqc_recipe_data_elem *s_recipe_list,
1950 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1952 struct ice_aqc_add_get_recipe *cmd;
1953 struct ice_aq_desc desc;
1957 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1960 cmd = &desc.params.add_get_recipe;
1961 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1963 cmd->return_index = cpu_to_le16(recipe_root);
1964 cmd->num_sub_recipes = 0;
1966 buf_size = *num_recipes * sizeof(*s_recipe_list);
1968 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1969 *num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1975 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
1976 * @hw: pointer to the HW struct
1977 * @params: parameters used to update the default recipe
1979 * This function only supports updating default recipes and it only supports
1980 * updating a single recipe based on the lkup_idx at a time.
1982 * This is done as a read-modify-write operation. First, get the current recipe
1983 * contents based on the recipe's ID. Then modify the field vector index and
1984 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
1985 * the pre-existing recipe with the modifications.
1988 ice_update_recipe_lkup_idx(struct ice_hw *hw,
1989 struct ice_update_recipe_lkup_idx_params *params)
1991 struct ice_aqc_recipe_data_elem *rcp_list;
1992 u16 num_recps = ICE_MAX_NUM_RECIPES;
1995 rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
1999 /* read current recipe list from firmware */
2000 rcp_list->recipe_indx = params->rid;
2001 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
2003 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
2004 params->rid, status);
2008 /* only modify existing recipe's lkup_idx and mask if valid, while
2009 * leaving all other fields the same, then update the recipe firmware
2011 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
2012 if (params->mask_valid)
2013 rcp_list->content.mask[params->lkup_idx] =
2014 cpu_to_le16(params->mask);
2016 if (params->ignore_valid)
2017 rcp_list->content.lkup_indx[params->lkup_idx] |=
2018 ICE_AQ_RECIPE_LKUP_IGNORE;
2020 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
2022 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
2023 params->rid, params->lkup_idx, params->fv_idx,
2024 params->mask, params->mask_valid ? "true" : "false",
2033 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2034 * @hw: pointer to the HW struct
2035 * @profile_id: package profile ID to associate the recipe with
2036 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2037 * @cd: pointer to command details structure or NULL
2038 * Recipe to profile association (0x0291)
2041 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2042 struct ice_sq_cd *cd)
2044 struct ice_aqc_recipe_to_profile *cmd;
2045 struct ice_aq_desc desc;
2047 cmd = &desc.params.recipe_to_profile;
2048 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2049 cmd->profile_id = cpu_to_le16(profile_id);
2050 /* Set the recipe ID bit in the bitmask to let the device know which
2051 * profile we are associating the recipe to
2053 memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
2055 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2059 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2060 * @hw: pointer to the HW struct
2061 * @profile_id: package profile ID to associate the recipe with
2062 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2063 * @cd: pointer to command details structure or NULL
2064 * Associate profile ID with given recipe (0x0293)
2067 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2068 struct ice_sq_cd *cd)
2070 struct ice_aqc_recipe_to_profile *cmd;
2071 struct ice_aq_desc desc;
2074 cmd = &desc.params.recipe_to_profile;
2075 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2076 cmd->profile_id = cpu_to_le16(profile_id);
2078 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2080 memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
2086 * ice_alloc_recipe - add recipe resource
2087 * @hw: pointer to the hardware structure
2088 * @rid: recipe ID returned as response to AQ call
2090 int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2092 struct ice_aqc_alloc_free_res_elem *sw_buf;
2096 buf_len = struct_size(sw_buf, elem, 1);
2097 sw_buf = kzalloc(buf_len, GFP_KERNEL);
2101 sw_buf->num_elems = cpu_to_le16(1);
2102 sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
2103 ICE_AQC_RES_TYPE_S) |
2104 ICE_AQC_RES_TYPE_FLAG_SHARED);
2105 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2106 ice_aqc_opc_alloc_res, NULL);
2108 *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
2115 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2116 * @hw: pointer to hardware structure
2118 * This function is used to populate recipe_to_profile matrix where index to
2119 * this array is the recipe ID and the element is the mapping of which profiles
2120 * is this recipe mapped to.
2122 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2124 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
2127 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2130 bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2131 bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
2132 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2134 bitmap_copy(profile_to_recipe[i], r_bitmap,
2135 ICE_MAX_NUM_RECIPES);
2136 for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2137 set_bit(i, recipe_to_profile[j]);
2142 * ice_collect_result_idx - copy result index values
2143 * @buf: buffer that contains the result index
2144 * @recp: the recipe struct to copy data into
2147 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
2148 struct ice_sw_recipe *recp)
2150 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2151 set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2156 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2157 * @hw: pointer to hardware structure
2158 * @recps: struct that we need to populate
2159 * @rid: recipe ID that we are populating
2160 * @refresh_required: true if we should get recipe to profile mapping from FW
2162 * This function is used to populate all the necessary entries into our
2163 * bookkeeping so that we have a current list of all the recipes that are
2164 * programmed in the firmware.
2167 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2168 bool *refresh_required)
2170 DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
2171 struct ice_aqc_recipe_data_elem *tmp;
2172 u16 num_recps = ICE_MAX_NUM_RECIPES;
2173 struct ice_prot_lkup_ext *lkup_exts;
2178 bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
2180 /* we need a buffer big enough to accommodate all the recipes */
2181 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
2185 tmp[0].recipe_indx = rid;
2186 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2187 /* non-zero status meaning recipe doesn't exist */
2191 /* Get recipe to profile map so that we can get the fv from lkups that
2192 * we read for a recipe from FW. Since we want to minimize the number of
2193 * times we make this FW call, just make one call and cache the copy
2194 * until a new recipe is added. This operation is only required the
2195 * first time to get the changes from FW. Then to search existing
2196 * entries we don't need to update the cache again until another recipe
2199 if (*refresh_required) {
2200 ice_get_recp_to_prof_map(hw);
2201 *refresh_required = false;
2204 /* Start populating all the entries for recps[rid] based on lkups from
2205 * firmware. Note that we are only creating the root recipe in our
2208 lkup_exts = &recps[rid].lkup_exts;
2210 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2211 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2212 struct ice_recp_grp_entry *rg_entry;
2213 u8 i, prof, idx, prot = 0;
2217 rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
2224 idx = root_bufs.recipe_indx;
2225 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2227 /* Mark all result indices in this chain */
2228 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2229 set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2232 /* get the first profile that is associated with rid */
2233 prof = find_first_bit(recipe_to_profile[idx],
2234 ICE_MAX_NUM_PROFILES);
2235 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2236 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2238 rg_entry->fv_idx[i] = lkup_indx;
2239 rg_entry->fv_mask[i] =
2240 le16_to_cpu(root_bufs.content.mask[i + 1]);
2242 /* If the recipe is a chained recipe then all its
2243 * child recipe's result will have a result index.
2244 * To fill fv_words we should not use those result
2245 * index, we only need the protocol ids and offsets.
2246 * We will skip all the fv_idx which stores result
2247 * index in them. We also need to skip any fv_idx which
2248 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2249 * valid offset value.
2251 if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
2252 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2253 rg_entry->fv_idx[i] == 0)
2256 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2257 rg_entry->fv_idx[i], &prot, &off);
2258 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2259 lkup_exts->fv_words[fv_word_idx].off = off;
2260 lkup_exts->field_mask[fv_word_idx] =
2261 rg_entry->fv_mask[i];
2264 /* populate rg_list with the data from the child entry of this
2267 list_add(&rg_entry->l_entry, &recps[rid].rg_list);
2269 /* Propagate some data to the recipe database */
2270 recps[idx].is_root = !!is_root;
2271 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2272 recps[idx].need_pass_l2 = root_bufs.content.act_ctrl &
2273 ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
2274 recps[idx].allow_pass_l2 = root_bufs.content.act_ctrl &
2275 ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
2276 bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2277 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2278 recps[idx].chain_idx = root_bufs.content.result_indx &
2279 ~ICE_AQ_RECIPE_RESULT_EN;
2280 set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2282 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2288 /* Only do the following for root recipes entries */
2289 memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2290 sizeof(recps[idx].r_bitmap));
2291 recps[idx].root_rid = root_bufs.content.rid &
2292 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2293 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2296 /* Complete initialization of the root recipe entry */
2297 lkup_exts->n_val_words = fv_word_idx;
2298 recps[rid].big_recp = (num_recps > 1);
2299 recps[rid].n_grp_count = (u8)num_recps;
2300 recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
2301 recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
2303 if (!recps[rid].root_buf) {
2308 /* Copy result indexes */
2309 bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2310 recps[rid].recp_created = true;
2317 /* ice_init_port_info - Initialize port_info with switch configuration data
2318 * @pi: pointer to port_info
2319 * @vsi_port_num: VSI number or port number
2320 * @type: Type of switch element (port or VSI)
2321 * @swid: switch ID of the switch the element is attached to
2322 * @pf_vf_num: PF or VF number
2323 * @is_vf: true if the element is a VF, false otherwise
2326 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2327 u16 swid, u16 pf_vf_num, bool is_vf)
2330 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2331 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2333 pi->pf_vf_num = pf_vf_num;
2337 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2342 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2343 * @hw: pointer to the hardware structure
2345 int ice_get_initial_sw_cfg(struct ice_hw *hw)
2347 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2353 rbuf = kzalloc(ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL);
2357 /* Multiple calls to ice_aq_get_sw_cfg may be required
2358 * to get all the switch configuration information. The need
2359 * for additional calls is indicated by ice_aq_get_sw_cfg
2360 * writing a non-zero value in req_desc
2363 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2365 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2366 &req_desc, &num_elems, NULL);
2371 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2372 u16 pf_vf_num, swid, vsi_port_num;
2376 vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
2377 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2379 pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
2380 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2382 swid = le16_to_cpu(ele->swid);
2384 if (le16_to_cpu(ele->pf_vf_num) &
2385 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2388 res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
2389 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2391 if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
2392 /* FW VSI is not needed. Just continue. */
2396 ice_init_port_info(hw->port_info, vsi_port_num,
2397 res_type, swid, pf_vf_num, is_vf);
2399 } while (req_desc && !status);
2406 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2407 * @hw: pointer to the hardware structure
2408 * @fi: filter info structure to fill/update
2410 * This helper function populates the lb_en and lan_en elements of the provided
2411 * ice_fltr_info struct using the switch's type and characteristics of the
2412 * switch rule being configured.
2414 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2418 if ((fi->flag & ICE_FLTR_TX) &&
2419 (fi->fltr_act == ICE_FWD_TO_VSI ||
2420 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2421 fi->fltr_act == ICE_FWD_TO_Q ||
2422 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2423 /* Setting LB for prune actions will result in replicated
2424 * packets to the internal switch that will be dropped.
2426 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2429 /* Set lan_en to TRUE if
2430 * 1. The switch is a VEB AND
2432 * 2.1 The lookup is a directional lookup like ethertype,
2433 * promiscuous, ethertype-MAC, promiscuous-VLAN
2434 * and default-port OR
2435 * 2.2 The lookup is VLAN, OR
2436 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2437 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2441 * The switch is a VEPA.
2443 * In all other cases, the LAN enable has to be set to false.
2446 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2447 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2448 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2449 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2450 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2451 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2452 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2453 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
2454 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2455 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
2464 * ice_fill_sw_rule - Helper function to fill switch rule structure
2465 * @hw: pointer to the hardware structure
2466 * @f_info: entry containing packet forwarding information
2467 * @s_rule: switch rule structure to be filled in based on mac_entry
2468 * @opc: switch rules population command type - pass in the command opcode
2471 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2472 struct ice_sw_rule_lkup_rx_tx *s_rule,
2473 enum ice_adminq_opc opc)
2475 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2476 u16 vlan_tpid = ETH_P_8021Q;
2484 if (opc == ice_aqc_opc_remove_sw_rules) {
2486 s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2487 s_rule->hdr_len = 0;
2491 eth_hdr_sz = sizeof(dummy_eth_header);
2492 eth_hdr = s_rule->hdr_data;
2494 /* initialize the ether header with a dummy header */
2495 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
2496 ice_fill_sw_info(hw, f_info);
2498 switch (f_info->fltr_act) {
2499 case ICE_FWD_TO_VSI:
2500 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2501 ICE_SINGLE_ACT_VSI_ID_M;
2502 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2503 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2504 ICE_SINGLE_ACT_VALID_BIT;
2506 case ICE_FWD_TO_VSI_LIST:
2507 act |= ICE_SINGLE_ACT_VSI_LIST;
2508 act |= (f_info->fwd_id.vsi_list_id <<
2509 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2510 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2511 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2512 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2513 ICE_SINGLE_ACT_VALID_BIT;
2516 act |= ICE_SINGLE_ACT_TO_Q;
2517 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2518 ICE_SINGLE_ACT_Q_INDEX_M;
2520 case ICE_DROP_PACKET:
2521 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2522 ICE_SINGLE_ACT_VALID_BIT;
2524 case ICE_FWD_TO_QGRP:
2525 q_rgn = f_info->qgrp_size > 0 ?
2526 (u8)ilog2(f_info->qgrp_size) : 0;
2527 act |= ICE_SINGLE_ACT_TO_Q;
2528 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2529 ICE_SINGLE_ACT_Q_INDEX_M;
2530 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2531 ICE_SINGLE_ACT_Q_REGION_M;
2538 act |= ICE_SINGLE_ACT_LB_ENABLE;
2540 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2542 switch (f_info->lkup_type) {
2543 case ICE_SW_LKUP_MAC:
2544 daddr = f_info->l_data.mac.mac_addr;
2546 case ICE_SW_LKUP_VLAN:
2547 vlan_id = f_info->l_data.vlan.vlan_id;
2548 if (f_info->l_data.vlan.tpid_valid)
2549 vlan_tpid = f_info->l_data.vlan.tpid;
2550 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2551 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2552 act |= ICE_SINGLE_ACT_PRUNE;
2553 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2556 case ICE_SW_LKUP_ETHERTYPE_MAC:
2557 daddr = f_info->l_data.ethertype_mac.mac_addr;
2559 case ICE_SW_LKUP_ETHERTYPE:
2560 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2561 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
2563 case ICE_SW_LKUP_MAC_VLAN:
2564 daddr = f_info->l_data.mac_vlan.mac_addr;
2565 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2567 case ICE_SW_LKUP_PROMISC_VLAN:
2568 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2570 case ICE_SW_LKUP_PROMISC:
2571 daddr = f_info->l_data.mac_vlan.mac_addr;
2577 s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
2578 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2579 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
2581 /* Recipe set depending on lookup type */
2582 s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
2583 s_rule->src = cpu_to_le16(f_info->src);
2584 s_rule->act = cpu_to_le32(act);
2587 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
2589 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2590 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2591 *off = cpu_to_be16(vlan_id);
2592 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2593 *off = cpu_to_be16(vlan_tpid);
2596 /* Create the switch rule with the final dummy Ethernet header */
2597 if (opc != ice_aqc_opc_update_sw_rules)
2598 s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
2602 * ice_add_marker_act
2603 * @hw: pointer to the hardware structure
2604 * @m_ent: the management entry for which sw marker needs to be added
2605 * @sw_marker: sw marker to tag the Rx descriptor with
2606 * @l_id: large action resource ID
2608 * Create a large action to hold software marker and update the switch rule
2609 * entry pointed by m_ent with newly created large action
2612 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2613 u16 sw_marker, u16 l_id)
2615 struct ice_sw_rule_lkup_rx_tx *rx_tx;
2616 struct ice_sw_rule_lg_act *lg_act;
2617 /* For software marker we need 3 large actions
2618 * 1. FWD action: FWD TO VSI or VSI LIST
2619 * 2. GENERIC VALUE action to hold the profile ID
2620 * 3. GENERIC VALUE action to hold the software marker ID
2622 const u16 num_lg_acts = 3;
2629 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2632 /* Create two back-to-back switch rules and submit them to the HW using
2633 * one memory buffer:
2637 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
2638 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
2639 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
2643 rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
2645 /* Fill in the first switch rule i.e. large action */
2646 lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
2647 lg_act->index = cpu_to_le16(l_id);
2648 lg_act->size = cpu_to_le16(num_lg_acts);
2650 /* First action VSI forwarding or VSI list forwarding depending on how
2653 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2654 m_ent->fltr_info.fwd_id.hw_vsi_id;
2656 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2657 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
2658 if (m_ent->vsi_count > 1)
2659 act |= ICE_LG_ACT_VSI_LIST;
2660 lg_act->act[0] = cpu_to_le32(act);
2662 /* Second action descriptor type */
2663 act = ICE_LG_ACT_GENERIC;
2665 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2666 lg_act->act[1] = cpu_to_le32(act);
2668 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2669 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2671 /* Third action Marker value */
2672 act |= ICE_LG_ACT_GENERIC;
2673 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2674 ICE_LG_ACT_GENERIC_VALUE_M;
2676 lg_act->act[2] = cpu_to_le32(act);
2678 /* call the fill switch rule to fill the lookup Tx Rx structure */
2679 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2680 ice_aqc_opc_update_sw_rules);
2682 /* Update the action to point to the large action ID */
2683 rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
2684 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2685 ICE_SINGLE_ACT_PTR_VAL_M));
2687 /* Use the filter rule ID of the previously created rule with single
2688 * act. Once the update happens, hardware will treat this as large
2691 rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
2693 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2694 ice_aqc_opc_update_sw_rules, NULL);
2696 m_ent->lg_act_idx = l_id;
2697 m_ent->sw_marker_id = sw_marker;
2700 devm_kfree(ice_hw_to_dev(hw), lg_act);
2705 * ice_create_vsi_list_map
2706 * @hw: pointer to the hardware structure
2707 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2708 * @num_vsi: number of VSI handles in the array
2709 * @vsi_list_id: VSI list ID generated as part of allocate resource
2711 * Helper function to create a new entry of VSI list ID to VSI mapping
2712 * using the given VSI list ID
2714 static struct ice_vsi_list_map_info *
2715 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2718 struct ice_switch_info *sw = hw->switch_info;
2719 struct ice_vsi_list_map_info *v_map;
2722 v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
2726 v_map->vsi_list_id = vsi_list_id;
2728 for (i = 0; i < num_vsi; i++)
2729 set_bit(vsi_handle_arr[i], v_map->vsi_map);
2731 list_add(&v_map->list_entry, &sw->vsi_list_map_head);
2736 * ice_update_vsi_list_rule
2737 * @hw: pointer to the hardware structure
2738 * @vsi_handle_arr: array of VSI handles to form a VSI list
2739 * @num_vsi: number of VSI handles in the array
2740 * @vsi_list_id: VSI list ID generated as part of allocate resource
2741 * @remove: Boolean value to indicate if this is a remove action
2742 * @opc: switch rules population command type - pass in the command opcode
2743 * @lkup_type: lookup type of the filter
2745 * Call AQ command to add a new switch rule or update existing switch rule
2746 * using the given VSI list ID
2749 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2750 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2751 enum ice_sw_lkup_type lkup_type)
2753 struct ice_sw_rule_vsi_list *s_rule;
2762 if (lkup_type == ICE_SW_LKUP_MAC ||
2763 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2764 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2765 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2766 lkup_type == ICE_SW_LKUP_PROMISC ||
2767 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2768 lkup_type == ICE_SW_LKUP_DFLT)
2769 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2770 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2771 else if (lkup_type == ICE_SW_LKUP_VLAN)
2772 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2773 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2777 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
2778 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2781 for (i = 0; i < num_vsi; i++) {
2782 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2786 /* AQ call requires hw_vsi_id(s) */
2788 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2791 s_rule->hdr.type = cpu_to_le16(rule_type);
2792 s_rule->number_vsi = cpu_to_le16(num_vsi);
2793 s_rule->index = cpu_to_le16(vsi_list_id);
2795 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2798 devm_kfree(ice_hw_to_dev(hw), s_rule);
2803 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2804 * @hw: pointer to the HW struct
2805 * @vsi_handle_arr: array of VSI handles to form a VSI list
2806 * @num_vsi: number of VSI handles in the array
2807 * @vsi_list_id: stores the ID of the VSI list to be created
2808 * @lkup_type: switch rule filter's lookup type
2811 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2812 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2816 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2817 ice_aqc_opc_alloc_res);
2821 /* Update the newly created VSI list to include the specified VSIs */
2822 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2823 *vsi_list_id, false,
2824 ice_aqc_opc_add_sw_rules, lkup_type);
2828 * ice_create_pkt_fwd_rule
2829 * @hw: pointer to the hardware structure
2830 * @f_entry: entry containing packet forwarding information
2832 * Create switch rule with given filter information and add an entry
2833 * to the corresponding filter management list to track this switch rule
2837 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2838 struct ice_fltr_list_entry *f_entry)
2840 struct ice_fltr_mgmt_list_entry *fm_entry;
2841 struct ice_sw_rule_lkup_rx_tx *s_rule;
2842 enum ice_sw_lkup_type l_type;
2843 struct ice_sw_recipe *recp;
2846 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2847 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2851 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
2855 goto ice_create_pkt_fwd_rule_exit;
2858 fm_entry->fltr_info = f_entry->fltr_info;
2860 /* Initialize all the fields for the management entry */
2861 fm_entry->vsi_count = 1;
2862 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2863 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2864 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2866 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2867 ice_aqc_opc_add_sw_rules);
2869 status = ice_aq_sw_rules(hw, s_rule,
2870 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2871 ice_aqc_opc_add_sw_rules, NULL);
2873 devm_kfree(ice_hw_to_dev(hw), fm_entry);
2874 goto ice_create_pkt_fwd_rule_exit;
2877 f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2878 fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2880 /* The book keeping entries will get removed when base driver
2881 * calls remove filter AQ command
2883 l_type = fm_entry->fltr_info.lkup_type;
2884 recp = &hw->switch_info->recp_list[l_type];
2885 list_add(&fm_entry->list_entry, &recp->filt_rules);
2887 ice_create_pkt_fwd_rule_exit:
2888 devm_kfree(ice_hw_to_dev(hw), s_rule);
2893 * ice_update_pkt_fwd_rule
2894 * @hw: pointer to the hardware structure
2895 * @f_info: filter information for switch rule
2897 * Call AQ command to update a previously created switch rule with a
2901 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2903 struct ice_sw_rule_lkup_rx_tx *s_rule;
2906 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2907 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2912 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2914 s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2916 /* Update switch rule with new rule set to forward VSI list */
2917 status = ice_aq_sw_rules(hw, s_rule,
2918 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2919 ice_aqc_opc_update_sw_rules, NULL);
2921 devm_kfree(ice_hw_to_dev(hw), s_rule);
2926 * ice_update_sw_rule_bridge_mode
2927 * @hw: pointer to the HW struct
2929 * Updates unicast switch filter rules based on VEB/VEPA mode
2931 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2933 struct ice_switch_info *sw = hw->switch_info;
2934 struct ice_fltr_mgmt_list_entry *fm_entry;
2935 struct list_head *rule_head;
2936 struct mutex *rule_lock; /* Lock to protect filter rule list */
2939 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2940 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2942 mutex_lock(rule_lock);
2943 list_for_each_entry(fm_entry, rule_head, list_entry) {
2944 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2945 u8 *addr = fi->l_data.mac.mac_addr;
2947 /* Update unicast Tx rules to reflect the selected
2950 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2951 (fi->fltr_act == ICE_FWD_TO_VSI ||
2952 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2953 fi->fltr_act == ICE_FWD_TO_Q ||
2954 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2955 status = ice_update_pkt_fwd_rule(hw, fi);
2961 mutex_unlock(rule_lock);
2967 * ice_add_update_vsi_list
2968 * @hw: pointer to the hardware structure
2969 * @m_entry: pointer to current filter management list entry
2970 * @cur_fltr: filter information from the book keeping entry
2971 * @new_fltr: filter information with the new VSI to be added
2973 * Call AQ command to add or update previously created VSI list with new VSI.
2975 * Helper function to do book keeping associated with adding filter information
2976 * The algorithm to do the book keeping is described below :
2977 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2978 * if only one VSI has been added till now
2979 * Allocate a new VSI list and add two VSIs
2980 * to this list using switch rule command
2981 * Update the previously created switch rule with the
2982 * newly created VSI list ID
2983 * if a VSI list was previously created
2984 * Add the new VSI to the previously created VSI list set
2985 * using the update switch rule command
2988 ice_add_update_vsi_list(struct ice_hw *hw,
2989 struct ice_fltr_mgmt_list_entry *m_entry,
2990 struct ice_fltr_info *cur_fltr,
2991 struct ice_fltr_info *new_fltr)
2993 u16 vsi_list_id = 0;
2996 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2997 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3000 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3001 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3002 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3003 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3006 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3007 /* Only one entry existed in the mapping and it was not already
3008 * a part of a VSI list. So, create a VSI list with the old and
3011 struct ice_fltr_info tmp_fltr;
3012 u16 vsi_handle_arr[2];
3014 /* A rule already exists with the new VSI being added */
3015 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3018 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3019 vsi_handle_arr[1] = new_fltr->vsi_handle;
3020 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3022 new_fltr->lkup_type);
3026 tmp_fltr = *new_fltr;
3027 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3028 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3029 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3030 /* Update the previous switch rule of "MAC forward to VSI" to
3031 * "MAC fwd to VSI list"
3033 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3037 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3038 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3039 m_entry->vsi_list_info =
3040 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3043 if (!m_entry->vsi_list_info)
3046 /* If this entry was large action then the large action needs
3047 * to be updated to point to FWD to VSI list
3049 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3051 ice_add_marker_act(hw, m_entry,
3052 m_entry->sw_marker_id,
3053 m_entry->lg_act_idx);
3055 u16 vsi_handle = new_fltr->vsi_handle;
3056 enum ice_adminq_opc opcode;
3058 if (!m_entry->vsi_list_info)
3061 /* A rule already exists with the new VSI being added */
3062 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
3065 /* Update the previously created VSI list set with
3066 * the new VSI ID passed in
3068 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3069 opcode = ice_aqc_opc_update_sw_rules;
3071 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3072 vsi_list_id, false, opcode,
3073 new_fltr->lkup_type);
3074 /* update VSI list mapping info with new VSI ID */
3076 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
3079 m_entry->vsi_count++;
3084 * ice_find_rule_entry - Search a rule entry
3085 * @hw: pointer to the hardware structure
3086 * @recp_id: lookup type for which the specified rule needs to be searched
3087 * @f_info: rule information
3089 * Helper function to search for a given rule entry
3090 * Returns pointer to entry storing the rule if found
3092 static struct ice_fltr_mgmt_list_entry *
3093 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
3095 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3096 struct ice_switch_info *sw = hw->switch_info;
3097 struct list_head *list_head;
3099 list_head = &sw->recp_list[recp_id].filt_rules;
3100 list_for_each_entry(list_itr, list_head, list_entry) {
3101 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3102 sizeof(f_info->l_data)) &&
3103 f_info->flag == list_itr->fltr_info.flag) {
3112 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3113 * @hw: pointer to the hardware structure
3114 * @recp_id: lookup type for which VSI lists needs to be searched
3115 * @vsi_handle: VSI handle to be found in VSI list
3116 * @vsi_list_id: VSI list ID found containing vsi_handle
3118 * Helper function to search a VSI list with single entry containing given VSI
3119 * handle element. This can be extended further to search VSI list with more
3120 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3122 struct ice_vsi_list_map_info *
3123 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
3126 struct ice_vsi_list_map_info *map_info = NULL;
3127 struct ice_switch_info *sw = hw->switch_info;
3128 struct ice_fltr_mgmt_list_entry *list_itr;
3129 struct list_head *list_head;
3131 list_head = &sw->recp_list[recp_id].filt_rules;
3132 list_for_each_entry(list_itr, list_head, list_entry) {
3133 if (list_itr->vsi_list_info) {
3134 map_info = list_itr->vsi_list_info;
3135 if (test_bit(vsi_handle, map_info->vsi_map)) {
3136 *vsi_list_id = map_info->vsi_list_id;
3145 * ice_add_rule_internal - add rule for a given lookup type
3146 * @hw: pointer to the hardware structure
3147 * @recp_id: lookup type (recipe ID) for which rule has to be added
3148 * @f_entry: structure containing MAC forwarding information
3150 * Adds or updates the rule lists for a given recipe
3153 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
3154 struct ice_fltr_list_entry *f_entry)
3156 struct ice_switch_info *sw = hw->switch_info;
3157 struct ice_fltr_info *new_fltr, *cur_fltr;
3158 struct ice_fltr_mgmt_list_entry *m_entry;
3159 struct mutex *rule_lock; /* Lock to protect filter rule list */
3162 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3164 f_entry->fltr_info.fwd_id.hw_vsi_id =
3165 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3167 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3169 mutex_lock(rule_lock);
3170 new_fltr = &f_entry->fltr_info;
3171 if (new_fltr->flag & ICE_FLTR_RX)
3172 new_fltr->src = hw->port_info->lport;
3173 else if (new_fltr->flag & ICE_FLTR_TX)
3174 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
3176 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
3178 mutex_unlock(rule_lock);
3179 return ice_create_pkt_fwd_rule(hw, f_entry);
3182 cur_fltr = &m_entry->fltr_info;
3183 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3184 mutex_unlock(rule_lock);
3190 * ice_remove_vsi_list_rule
3191 * @hw: pointer to the hardware structure
3192 * @vsi_list_id: VSI list ID generated as part of allocate resource
3193 * @lkup_type: switch rule filter lookup type
3195 * The VSI list should be emptied before this function is called to remove the
3199 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3200 enum ice_sw_lkup_type lkup_type)
3202 struct ice_sw_rule_vsi_list *s_rule;
3206 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
3207 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3211 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3212 s_rule->index = cpu_to_le16(vsi_list_id);
3214 /* Free the vsi_list resource that we allocated. It is assumed that the
3215 * list is empty at this point.
3217 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3218 ice_aqc_opc_free_res);
3220 devm_kfree(ice_hw_to_dev(hw), s_rule);
3225 * ice_rem_update_vsi_list
3226 * @hw: pointer to the hardware structure
3227 * @vsi_handle: VSI handle of the VSI to remove
3228 * @fm_list: filter management entry for which the VSI list management needs to
3232 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3233 struct ice_fltr_mgmt_list_entry *fm_list)
3235 enum ice_sw_lkup_type lkup_type;
3239 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3240 fm_list->vsi_count == 0)
3243 /* A rule with the VSI being removed does not exist */
3244 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
3247 lkup_type = fm_list->fltr_info.lkup_type;
3248 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3249 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3250 ice_aqc_opc_update_sw_rules,
3255 fm_list->vsi_count--;
3256 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3258 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3259 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3260 struct ice_vsi_list_map_info *vsi_list_info =
3261 fm_list->vsi_list_info;
3264 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
3266 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3269 /* Make sure VSI list is empty before removing it below */
3270 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3272 ice_aqc_opc_update_sw_rules,
3277 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3278 tmp_fltr_info.fwd_id.hw_vsi_id =
3279 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3280 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3281 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3283 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3284 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3288 fm_list->fltr_info = tmp_fltr_info;
3291 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3292 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3293 struct ice_vsi_list_map_info *vsi_list_info =
3294 fm_list->vsi_list_info;
3296 /* Remove the VSI list since it is no longer used */
3297 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3299 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3300 vsi_list_id, status);
3304 list_del(&vsi_list_info->list_entry);
3305 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
3306 fm_list->vsi_list_info = NULL;
3313 * ice_remove_rule_internal - Remove a filter rule of a given type
3314 * @hw: pointer to the hardware structure
3315 * @recp_id: recipe ID for which the rule needs to removed
3316 * @f_entry: rule entry containing filter information
3319 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
3320 struct ice_fltr_list_entry *f_entry)
3322 struct ice_switch_info *sw = hw->switch_info;
3323 struct ice_fltr_mgmt_list_entry *list_elem;
3324 struct mutex *rule_lock; /* Lock to protect filter rule list */
3325 bool remove_rule = false;
3329 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3331 f_entry->fltr_info.fwd_id.hw_vsi_id =
3332 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3334 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3335 mutex_lock(rule_lock);
3336 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
3342 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3344 } else if (!list_elem->vsi_list_info) {
3347 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3348 /* a ref_cnt > 1 indicates that the vsi_list is being
3349 * shared by multiple rules. Decrement the ref_cnt and
3350 * remove this rule, but do not modify the list, as it
3351 * is in-use by other rules.
3353 list_elem->vsi_list_info->ref_cnt--;
3356 /* a ref_cnt of 1 indicates the vsi_list is only used
3357 * by one rule. However, the original removal request is only
3358 * for a single VSI. Update the vsi_list first, and only
3359 * remove the rule if there are no further VSIs in this list.
3361 vsi_handle = f_entry->fltr_info.vsi_handle;
3362 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3365 /* if VSI count goes to zero after updating the VSI list */
3366 if (list_elem->vsi_count == 0)
3371 /* Remove the lookup rule */
3372 struct ice_sw_rule_lkup_rx_tx *s_rule;
3374 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
3375 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3382 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3383 ice_aqc_opc_remove_sw_rules);
3385 status = ice_aq_sw_rules(hw, s_rule,
3386 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3387 1, ice_aqc_opc_remove_sw_rules, NULL);
3389 /* Remove a book keeping from the list */
3390 devm_kfree(ice_hw_to_dev(hw), s_rule);
3395 list_del(&list_elem->list_entry);
3396 devm_kfree(ice_hw_to_dev(hw), list_elem);
3399 mutex_unlock(rule_lock);
3404 * ice_mac_fltr_exist - does this MAC filter exist for given VSI
3405 * @hw: pointer to the hardware structure
3406 * @mac: MAC address to be checked (for MAC filter)
3407 * @vsi_handle: check MAC filter for this VSI
3409 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
3411 struct ice_fltr_mgmt_list_entry *entry;
3412 struct list_head *rule_head;
3413 struct ice_switch_info *sw;
3414 struct mutex *rule_lock; /* Lock to protect filter rule list */
3417 if (!ice_is_vsi_valid(hw, vsi_handle))
3420 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3421 sw = hw->switch_info;
3422 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3426 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3427 mutex_lock(rule_lock);
3428 list_for_each_entry(entry, rule_head, list_entry) {
3429 struct ice_fltr_info *f_info = &entry->fltr_info;
3430 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3432 if (is_zero_ether_addr(mac_addr))
3435 if (f_info->flag != ICE_FLTR_TX ||
3436 f_info->src_id != ICE_SRC_ID_VSI ||
3437 f_info->lkup_type != ICE_SW_LKUP_MAC ||
3438 f_info->fltr_act != ICE_FWD_TO_VSI ||
3439 hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3442 if (ether_addr_equal(mac, mac_addr)) {
3443 mutex_unlock(rule_lock);
3447 mutex_unlock(rule_lock);
3452 * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
3453 * @hw: pointer to the hardware structure
3455 * @vsi_handle: check MAC filter for this VSI
3457 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
3459 struct ice_fltr_mgmt_list_entry *entry;
3460 struct list_head *rule_head;
3461 struct ice_switch_info *sw;
3462 struct mutex *rule_lock; /* Lock to protect filter rule list */
3465 if (vlan_id > ICE_MAX_VLAN_ID)
3468 if (!ice_is_vsi_valid(hw, vsi_handle))
3471 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3472 sw = hw->switch_info;
3473 rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3477 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3478 mutex_lock(rule_lock);
3479 list_for_each_entry(entry, rule_head, list_entry) {
3480 struct ice_fltr_info *f_info = &entry->fltr_info;
3481 u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
3482 struct ice_vsi_list_map_info *map_info;
3484 if (entry_vlan_id > ICE_MAX_VLAN_ID)
3487 if (f_info->flag != ICE_FLTR_TX ||
3488 f_info->src_id != ICE_SRC_ID_VSI ||
3489 f_info->lkup_type != ICE_SW_LKUP_VLAN)
3492 /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
3493 if (f_info->fltr_act != ICE_FWD_TO_VSI &&
3494 f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
3497 if (f_info->fltr_act == ICE_FWD_TO_VSI) {
3498 if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3500 } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3501 /* If filter_action is FWD_TO_VSI_LIST, make sure
3502 * that VSI being checked is part of VSI list
3504 if (entry->vsi_count == 1 &&
3505 entry->vsi_list_info) {
3506 map_info = entry->vsi_list_info;
3507 if (!test_bit(vsi_handle, map_info->vsi_map))
3512 if (vlan_id == entry_vlan_id) {
3513 mutex_unlock(rule_lock);
3517 mutex_unlock(rule_lock);
3523 * ice_add_mac - Add a MAC address based filter rule
3524 * @hw: pointer to the hardware structure
3525 * @m_list: list of MAC addresses and forwarding information
3527 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
3529 struct ice_fltr_list_entry *m_list_itr;
3535 list_for_each_entry(m_list_itr, m_list, list_entry) {
3536 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3540 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3541 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3542 if (!ice_is_vsi_valid(hw, vsi_handle))
3544 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3545 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3546 /* update the src in case it is VSI num */
3547 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3549 m_list_itr->fltr_info.src = hw_vsi_id;
3550 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3551 is_zero_ether_addr(add))
3554 m_list_itr->status = ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3556 if (m_list_itr->status)
3557 return m_list_itr->status;
3564 * ice_add_vlan_internal - Add one VLAN based filter rule
3565 * @hw: pointer to the hardware structure
3566 * @f_entry: filter entry containing one VLAN information
3569 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3571 struct ice_switch_info *sw = hw->switch_info;
3572 struct ice_fltr_mgmt_list_entry *v_list_itr;
3573 struct ice_fltr_info *new_fltr, *cur_fltr;
3574 enum ice_sw_lkup_type lkup_type;
3575 u16 vsi_list_id = 0, vsi_handle;
3576 struct mutex *rule_lock; /* Lock to protect filter rule list */
3579 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3582 f_entry->fltr_info.fwd_id.hw_vsi_id =
3583 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3584 new_fltr = &f_entry->fltr_info;
3586 /* VLAN ID should only be 12 bits */
3587 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3590 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3593 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3594 lkup_type = new_fltr->lkup_type;
3595 vsi_handle = new_fltr->vsi_handle;
3596 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3597 mutex_lock(rule_lock);
3598 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3600 struct ice_vsi_list_map_info *map_info = NULL;
3602 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3603 /* All VLAN pruning rules use a VSI list. Check if
3604 * there is already a VSI list containing VSI that we
3605 * want to add. If found, use the same vsi_list_id for
3606 * this new VLAN rule or else create a new list.
3608 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3612 status = ice_create_vsi_list_rule(hw,
3620 /* Convert the action to forwarding to a VSI list. */
3621 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3622 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3625 status = ice_create_pkt_fwd_rule(hw, f_entry);
3627 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3633 /* reuse VSI list for new rule and increment ref_cnt */
3635 v_list_itr->vsi_list_info = map_info;
3636 map_info->ref_cnt++;
3638 v_list_itr->vsi_list_info =
3639 ice_create_vsi_list_map(hw, &vsi_handle,
3643 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3644 /* Update existing VSI list to add new VSI ID only if it used
3647 cur_fltr = &v_list_itr->fltr_info;
3648 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3651 /* If VLAN rule exists and VSI list being used by this rule is
3652 * referenced by more than 1 VLAN rule. Then create a new VSI
3653 * list appending previous VSI with new VSI and update existing
3654 * VLAN rule to point to new VSI list ID
3656 struct ice_fltr_info tmp_fltr;
3657 u16 vsi_handle_arr[2];
3660 /* Current implementation only supports reusing VSI list with
3661 * one VSI count. We should never hit below condition
3663 if (v_list_itr->vsi_count > 1 &&
3664 v_list_itr->vsi_list_info->ref_cnt > 1) {
3665 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3671 find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3674 /* A rule already exists with the new VSI being added */
3675 if (cur_handle == vsi_handle) {
3680 vsi_handle_arr[0] = cur_handle;
3681 vsi_handle_arr[1] = vsi_handle;
3682 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3683 &vsi_list_id, lkup_type);
3687 tmp_fltr = v_list_itr->fltr_info;
3688 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3689 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3690 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3691 /* Update the previous switch rule to a new VSI list which
3692 * includes current VSI that is requested
3694 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3698 /* before overriding VSI list map info. decrement ref_cnt of
3701 v_list_itr->vsi_list_info->ref_cnt--;
3703 /* now update to newly created list */
3704 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3705 v_list_itr->vsi_list_info =
3706 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3708 v_list_itr->vsi_count++;
3712 mutex_unlock(rule_lock);
3717 * ice_add_vlan - Add VLAN based filter rule
3718 * @hw: pointer to the hardware structure
3719 * @v_list: list of VLAN entries and forwarding information
3721 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
3723 struct ice_fltr_list_entry *v_list_itr;
3728 list_for_each_entry(v_list_itr, v_list, list_entry) {
3729 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3731 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3732 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3733 if (v_list_itr->status)
3734 return v_list_itr->status;
3740 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3741 * @hw: pointer to the hardware structure
3742 * @em_list: list of ether type MAC filter, MAC is optional
3744 * This function requires the caller to populate the entries in
3745 * the filter list with the necessary fields (including flags to
3746 * indicate Tx or Rx rules).
3748 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3750 struct ice_fltr_list_entry *em_list_itr;
3752 if (!em_list || !hw)
3755 list_for_each_entry(em_list_itr, em_list, list_entry) {
3756 enum ice_sw_lkup_type l_type =
3757 em_list_itr->fltr_info.lkup_type;
3759 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3760 l_type != ICE_SW_LKUP_ETHERTYPE)
3763 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3765 if (em_list_itr->status)
3766 return em_list_itr->status;
3772 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3773 * @hw: pointer to the hardware structure
3774 * @em_list: list of ethertype or ethertype MAC entries
3776 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3778 struct ice_fltr_list_entry *em_list_itr, *tmp;
3780 if (!em_list || !hw)
3783 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
3784 enum ice_sw_lkup_type l_type =
3785 em_list_itr->fltr_info.lkup_type;
3787 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3788 l_type != ICE_SW_LKUP_ETHERTYPE)
3791 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3793 if (em_list_itr->status)
3794 return em_list_itr->status;
3800 * ice_rem_sw_rule_info
3801 * @hw: pointer to the hardware structure
3802 * @rule_head: pointer to the switch list structure that we want to delete
3805 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3807 if (!list_empty(rule_head)) {
3808 struct ice_fltr_mgmt_list_entry *entry;
3809 struct ice_fltr_mgmt_list_entry *tmp;
3811 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
3812 list_del(&entry->list_entry);
3813 devm_kfree(ice_hw_to_dev(hw), entry);
3819 * ice_rem_adv_rule_info
3820 * @hw: pointer to the hardware structure
3821 * @rule_head: pointer to the switch list structure that we want to delete
3824 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3826 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3827 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3829 if (list_empty(rule_head))
3832 list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3833 list_del(&lst_itr->list_entry);
3834 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3835 devm_kfree(ice_hw_to_dev(hw), lst_itr);
3840 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3841 * @pi: pointer to the port_info structure
3842 * @vsi_handle: VSI handle to set as default
3843 * @set: true to add the above mentioned switch rule, false to remove it
3844 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3846 * add filter rule to set/unset given VSI as default VSI for the switch
3847 * (represented by swid)
3850 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3853 struct ice_fltr_list_entry f_list_entry;
3854 struct ice_fltr_info f_info;
3855 struct ice_hw *hw = pi->hw;
3859 if (!ice_is_vsi_valid(hw, vsi_handle))
3862 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3864 memset(&f_info, 0, sizeof(f_info));
3866 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3867 f_info.flag = direction;
3868 f_info.fltr_act = ICE_FWD_TO_VSI;
3869 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3870 f_info.vsi_handle = vsi_handle;
3872 if (f_info.flag & ICE_FLTR_RX) {
3873 f_info.src = hw->port_info->lport;
3874 f_info.src_id = ICE_SRC_ID_LPORT;
3875 } else if (f_info.flag & ICE_FLTR_TX) {
3876 f_info.src_id = ICE_SRC_ID_VSI;
3877 f_info.src = hw_vsi_id;
3879 f_list_entry.fltr_info = f_info;
3882 status = ice_add_rule_internal(hw, ICE_SW_LKUP_DFLT,
3885 status = ice_remove_rule_internal(hw, ICE_SW_LKUP_DFLT,
3892 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3893 * @fm_entry: filter entry to inspect
3894 * @vsi_handle: VSI handle to compare with filter info
3897 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3899 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3900 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3901 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3902 fm_entry->vsi_list_info &&
3903 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
3907 * ice_check_if_dflt_vsi - check if VSI is default VSI
3908 * @pi: pointer to the port_info structure
3909 * @vsi_handle: vsi handle to check for in filter list
3910 * @rule_exists: indicates if there are any VSI's in the rule list
3912 * checks if the VSI is in a default VSI list, and also indicates
3913 * if the default VSI list is empty
3916 ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
3919 struct ice_fltr_mgmt_list_entry *fm_entry;
3920 struct ice_sw_recipe *recp_list;
3921 struct list_head *rule_head;
3922 struct mutex *rule_lock; /* Lock to protect filter rule list */
3925 recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
3926 rule_lock = &recp_list->filt_rule_lock;
3927 rule_head = &recp_list->filt_rules;
3929 mutex_lock(rule_lock);
3931 if (rule_exists && !list_empty(rule_head))
3932 *rule_exists = true;
3934 list_for_each_entry(fm_entry, rule_head, list_entry) {
3935 if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
3941 mutex_unlock(rule_lock);
3947 * ice_remove_mac - remove a MAC address based filter rule
3948 * @hw: pointer to the hardware structure
3949 * @m_list: list of MAC addresses and forwarding information
3951 * This function removes either a MAC filter rule or a specific VSI from a
3952 * VSI list for a multicast MAC address.
3954 * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
3955 * be aware that this call will only work if all the entries passed into m_list
3956 * were added previously. It will not attempt to do a partial remove of entries
3959 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
3961 struct ice_fltr_list_entry *list_itr, *tmp;
3966 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
3967 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3970 if (l_type != ICE_SW_LKUP_MAC)
3973 vsi_handle = list_itr->fltr_info.vsi_handle;
3974 if (!ice_is_vsi_valid(hw, vsi_handle))
3977 list_itr->fltr_info.fwd_id.hw_vsi_id =
3978 ice_get_hw_vsi_num(hw, vsi_handle);
3980 list_itr->status = ice_remove_rule_internal(hw,
3983 if (list_itr->status)
3984 return list_itr->status;
3990 * ice_remove_vlan - Remove VLAN based filter rule
3991 * @hw: pointer to the hardware structure
3992 * @v_list: list of VLAN entries and forwarding information
3994 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
3996 struct ice_fltr_list_entry *v_list_itr, *tmp;
4001 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4002 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4004 if (l_type != ICE_SW_LKUP_VLAN)
4006 v_list_itr->status = ice_remove_rule_internal(hw,
4009 if (v_list_itr->status)
4010 return v_list_itr->status;
4016 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4017 * @hw: pointer to the hardware structure
4018 * @vsi_handle: VSI handle to remove filters from
4019 * @vsi_list_head: pointer to the list to add entry to
4020 * @fi: pointer to fltr_info of filter entry to copy & add
4022 * Helper function, used when creating a list of filters to remove from
4023 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4024 * original filter entry, with the exception of fltr_info.fltr_act and
4025 * fltr_info.fwd_id fields. These are set such that later logic can
4026 * extract which VSI to remove the fltr from, and pass on that information.
4029 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4030 struct list_head *vsi_list_head,
4031 struct ice_fltr_info *fi)
4033 struct ice_fltr_list_entry *tmp;
4035 /* this memory is freed up in the caller function
4036 * once filters for this VSI are removed
4038 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
4042 tmp->fltr_info = *fi;
4044 /* Overwrite these fields to indicate which VSI to remove filter from,
4045 * so find and remove logic can extract the information from the
4046 * list entries. Note that original entries will still have proper
4049 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4050 tmp->fltr_info.vsi_handle = vsi_handle;
4051 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4053 list_add(&tmp->list_entry, vsi_list_head);
4059 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4060 * @hw: pointer to the hardware structure
4061 * @vsi_handle: VSI handle to remove filters from
4062 * @lkup_list_head: pointer to the list that has certain lookup type filters
4063 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4065 * Locates all filters in lkup_list_head that are used by the given VSI,
4066 * and adds COPIES of those entries to vsi_list_head (intended to be used
4067 * to remove the listed filters).
4068 * Note that this means all entries in vsi_list_head must be explicitly
4069 * deallocated by the caller when done with list.
4072 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4073 struct list_head *lkup_list_head,
4074 struct list_head *vsi_list_head)
4076 struct ice_fltr_mgmt_list_entry *fm_entry;
4079 /* check to make sure VSI ID is valid and within boundary */
4080 if (!ice_is_vsi_valid(hw, vsi_handle))
4083 list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
4084 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
4087 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4089 &fm_entry->fltr_info);
4097 * ice_determine_promisc_mask
4098 * @fi: filter info to parse
4100 * Helper function to determine which ICE_PROMISC_ mask corresponds
4101 * to given filter into.
4103 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4105 u16 vid = fi->l_data.mac_vlan.vlan_id;
4106 u8 *macaddr = fi->l_data.mac.mac_addr;
4107 bool is_tx_fltr = false;
4108 u8 promisc_mask = 0;
4110 if (fi->flag == ICE_FLTR_TX)
4113 if (is_broadcast_ether_addr(macaddr))
4114 promisc_mask |= is_tx_fltr ?
4115 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4116 else if (is_multicast_ether_addr(macaddr))
4117 promisc_mask |= is_tx_fltr ?
4118 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4119 else if (is_unicast_ether_addr(macaddr))
4120 promisc_mask |= is_tx_fltr ?
4121 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4123 promisc_mask |= is_tx_fltr ?
4124 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4126 return promisc_mask;
4130 * ice_remove_promisc - Remove promisc based filter rules
4131 * @hw: pointer to the hardware structure
4132 * @recp_id: recipe ID for which the rule needs to removed
4133 * @v_list: list of promisc entries
4136 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
4138 struct ice_fltr_list_entry *v_list_itr, *tmp;
4140 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4141 v_list_itr->status =
4142 ice_remove_rule_internal(hw, recp_id, v_list_itr);
4143 if (v_list_itr->status)
4144 return v_list_itr->status;
4150 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4151 * @hw: pointer to the hardware structure
4152 * @vsi_handle: VSI handle to clear mode
4153 * @promisc_mask: mask of promiscuous config bits to clear
4154 * @vid: VLAN ID to clear VLAN promiscuous
4157 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4160 struct ice_switch_info *sw = hw->switch_info;
4161 struct ice_fltr_list_entry *fm_entry, *tmp;
4162 struct list_head remove_list_head;
4163 struct ice_fltr_mgmt_list_entry *itr;
4164 struct list_head *rule_head;
4165 struct mutex *rule_lock; /* Lock to protect filter rule list */
4169 if (!ice_is_vsi_valid(hw, vsi_handle))
4172 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4173 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4175 recipe_id = ICE_SW_LKUP_PROMISC;
4177 rule_head = &sw->recp_list[recipe_id].filt_rules;
4178 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4180 INIT_LIST_HEAD(&remove_list_head);
4182 mutex_lock(rule_lock);
4183 list_for_each_entry(itr, rule_head, list_entry) {
4184 struct ice_fltr_info *fltr_info;
4185 u8 fltr_promisc_mask = 0;
4187 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4189 fltr_info = &itr->fltr_info;
4191 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4192 vid != fltr_info->l_data.mac_vlan.vlan_id)
4195 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4197 /* Skip if filter is not completely specified by given mask */
4198 if (fltr_promisc_mask & ~promisc_mask)
4201 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4205 mutex_unlock(rule_lock);
4206 goto free_fltr_list;
4209 mutex_unlock(rule_lock);
4211 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4214 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4215 list_del(&fm_entry->list_entry);
4216 devm_kfree(ice_hw_to_dev(hw), fm_entry);
4223 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4224 * @hw: pointer to the hardware structure
4225 * @vsi_handle: VSI handle to configure
4226 * @promisc_mask: mask of promiscuous config bits
4227 * @vid: VLAN ID to set VLAN promiscuous
4230 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4232 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4233 struct ice_fltr_list_entry f_list_entry;
4234 struct ice_fltr_info new_fltr;
4241 if (!ice_is_vsi_valid(hw, vsi_handle))
4243 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4245 memset(&new_fltr, 0, sizeof(new_fltr));
4247 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4248 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4249 new_fltr.l_data.mac_vlan.vlan_id = vid;
4250 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4252 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4253 recipe_id = ICE_SW_LKUP_PROMISC;
4256 /* Separate filters must be set for each direction/packet type
4257 * combination, so we will loop over the mask value, store the
4258 * individual type, and clear it out in the input mask as it
4261 while (promisc_mask) {
4267 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4268 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4269 pkt_type = UCAST_FLTR;
4270 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4271 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4272 pkt_type = UCAST_FLTR;
4274 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4275 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4276 pkt_type = MCAST_FLTR;
4277 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4278 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4279 pkt_type = MCAST_FLTR;
4281 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4282 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4283 pkt_type = BCAST_FLTR;
4284 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4285 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4286 pkt_type = BCAST_FLTR;
4290 /* Check for VLAN promiscuous flag */
4291 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4292 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4293 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4294 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4298 /* Set filter DA based on packet type */
4299 mac_addr = new_fltr.l_data.mac.mac_addr;
4300 if (pkt_type == BCAST_FLTR) {
4301 eth_broadcast_addr(mac_addr);
4302 } else if (pkt_type == MCAST_FLTR ||
4303 pkt_type == UCAST_FLTR) {
4304 /* Use the dummy ether header DA */
4305 ether_addr_copy(mac_addr, dummy_eth_header);
4306 if (pkt_type == MCAST_FLTR)
4307 mac_addr[0] |= 0x1; /* Set multicast bit */
4310 /* Need to reset this to zero for all iterations */
4313 new_fltr.flag |= ICE_FLTR_TX;
4314 new_fltr.src = hw_vsi_id;
4316 new_fltr.flag |= ICE_FLTR_RX;
4317 new_fltr.src = hw->port_info->lport;
4320 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4321 new_fltr.vsi_handle = vsi_handle;
4322 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4323 f_list_entry.fltr_info = new_fltr;
4325 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4327 goto set_promisc_exit;
4335 * ice_set_vlan_vsi_promisc
4336 * @hw: pointer to the hardware structure
4337 * @vsi_handle: VSI handle to configure
4338 * @promisc_mask: mask of promiscuous config bits
4339 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4341 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4344 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4345 bool rm_vlan_promisc)
4347 struct ice_switch_info *sw = hw->switch_info;
4348 struct ice_fltr_list_entry *list_itr, *tmp;
4349 struct list_head vsi_list_head;
4350 struct list_head *vlan_head;
4351 struct mutex *vlan_lock; /* Lock to protect filter rule list */
4355 INIT_LIST_HEAD(&vsi_list_head);
4356 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4357 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4358 mutex_lock(vlan_lock);
4359 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4361 mutex_unlock(vlan_lock);
4363 goto free_fltr_list;
4365 list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
4366 /* Avoid enabling or disabling VLAN zero twice when in double
4369 if (ice_is_dvm_ena(hw) &&
4370 list_itr->fltr_info.l_data.vlan.tpid == 0)
4373 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4374 if (rm_vlan_promisc)
4375 status = ice_clear_vsi_promisc(hw, vsi_handle,
4376 promisc_mask, vlan_id);
4378 status = ice_set_vsi_promisc(hw, vsi_handle,
4379 promisc_mask, vlan_id);
4380 if (status && status != -EEXIST)
4385 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
4386 list_del(&list_itr->list_entry);
4387 devm_kfree(ice_hw_to_dev(hw), list_itr);
4393 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4394 * @hw: pointer to the hardware structure
4395 * @vsi_handle: VSI handle to remove filters from
4396 * @lkup: switch rule filter lookup type
4399 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4400 enum ice_sw_lkup_type lkup)
4402 struct ice_switch_info *sw = hw->switch_info;
4403 struct ice_fltr_list_entry *fm_entry;
4404 struct list_head remove_list_head;
4405 struct list_head *rule_head;
4406 struct ice_fltr_list_entry *tmp;
4407 struct mutex *rule_lock; /* Lock to protect filter rule list */
4410 INIT_LIST_HEAD(&remove_list_head);
4411 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4412 rule_head = &sw->recp_list[lkup].filt_rules;
4413 mutex_lock(rule_lock);
4414 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4416 mutex_unlock(rule_lock);
4418 goto free_fltr_list;
4421 case ICE_SW_LKUP_MAC:
4422 ice_remove_mac(hw, &remove_list_head);
4424 case ICE_SW_LKUP_VLAN:
4425 ice_remove_vlan(hw, &remove_list_head);
4427 case ICE_SW_LKUP_PROMISC:
4428 case ICE_SW_LKUP_PROMISC_VLAN:
4429 ice_remove_promisc(hw, lkup, &remove_list_head);
4431 case ICE_SW_LKUP_MAC_VLAN:
4432 case ICE_SW_LKUP_ETHERTYPE:
4433 case ICE_SW_LKUP_ETHERTYPE_MAC:
4434 case ICE_SW_LKUP_DFLT:
4435 case ICE_SW_LKUP_LAST:
4437 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
4442 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4443 list_del(&fm_entry->list_entry);
4444 devm_kfree(ice_hw_to_dev(hw), fm_entry);
4449 * ice_remove_vsi_fltr - Remove all filters for a VSI
4450 * @hw: pointer to the hardware structure
4451 * @vsi_handle: VSI handle to remove filters from
4453 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4455 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4456 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4457 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4458 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4459 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4460 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4461 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4462 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4466 * ice_alloc_res_cntr - allocating resource counter
4467 * @hw: pointer to the hardware structure
4468 * @type: type of resource
4469 * @alloc_shared: if set it is shared else dedicated
4470 * @num_items: number of entries requested for FD resource type
4471 * @counter_id: counter index returned by AQ call
4474 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4477 struct ice_aqc_alloc_free_res_elem *buf;
4481 /* Allocate resource */
4482 buf_len = struct_size(buf, elem, 1);
4483 buf = kzalloc(buf_len, GFP_KERNEL);
4487 buf->num_elems = cpu_to_le16(num_items);
4488 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4489 ICE_AQC_RES_TYPE_M) | alloc_shared);
4491 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4492 ice_aqc_opc_alloc_res, NULL);
4496 *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
4504 * ice_free_res_cntr - free resource counter
4505 * @hw: pointer to the hardware structure
4506 * @type: type of resource
4507 * @alloc_shared: if set it is shared else dedicated
4508 * @num_items: number of entries to be freed for FD resource type
4509 * @counter_id: counter ID resource which needs to be freed
4512 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4515 struct ice_aqc_alloc_free_res_elem *buf;
4520 buf_len = struct_size(buf, elem, 1);
4521 buf = kzalloc(buf_len, GFP_KERNEL);
4525 buf->num_elems = cpu_to_le16(num_items);
4526 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4527 ICE_AQC_RES_TYPE_M) | alloc_shared);
4528 buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
4530 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4531 ice_aqc_opc_free_res, NULL);
4533 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
4539 #define ICE_PROTOCOL_ENTRY(id, ...) { \
4541 .offs = {__VA_ARGS__}, \
4545 * ice_share_res - set a resource as shared or dedicated
4546 * @hw: hw struct of original owner of resource
4547 * @type: resource type
4548 * @shared: is the resource being set to shared
4549 * @res_id: resource id (descriptor)
4551 int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
4553 struct ice_aqc_alloc_free_res_elem *buf;
4557 buf_len = struct_size(buf, elem, 1);
4558 buf = kzalloc(buf_len, GFP_KERNEL);
4562 buf->num_elems = cpu_to_le16(1);
4564 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4565 ICE_AQC_RES_TYPE_M) |
4566 ICE_AQC_RES_TYPE_FLAG_SHARED);
4568 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4569 ICE_AQC_RES_TYPE_M) &
4570 ~ICE_AQC_RES_TYPE_FLAG_SHARED);
4572 buf->elem[0].e.sw_resp = cpu_to_le16(res_id);
4573 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4574 ice_aqc_opc_share_res, NULL);
4576 ice_debug(hw, ICE_DBG_SW, "Could not set resource type %u id %u to %s\n",
4577 type, res_id, shared ? "SHARED" : "DEDICATED");
4583 /* This is mapping table entry that maps every word within a given protocol
4584 * structure to the real byte offset as per the specification of that
4586 * for example dst address is 3 words in ethertype header and corresponding
4587 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4588 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4589 * matching entry describing its field. This needs to be updated if new
4590 * structure is added to that union.
4592 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4593 ICE_PROTOCOL_ENTRY(ICE_MAC_OFOS, 0, 2, 4, 6, 8, 10, 12),
4594 ICE_PROTOCOL_ENTRY(ICE_MAC_IL, 0, 2, 4, 6, 8, 10, 12),
4595 ICE_PROTOCOL_ENTRY(ICE_ETYPE_OL, 0),
4596 ICE_PROTOCOL_ENTRY(ICE_ETYPE_IL, 0),
4597 ICE_PROTOCOL_ENTRY(ICE_VLAN_OFOS, 2, 0),
4598 ICE_PROTOCOL_ENTRY(ICE_IPV4_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18),
4599 ICE_PROTOCOL_ENTRY(ICE_IPV4_IL, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18),
4600 ICE_PROTOCOL_ENTRY(ICE_IPV6_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
4601 20, 22, 24, 26, 28, 30, 32, 34, 36, 38),
4602 ICE_PROTOCOL_ENTRY(ICE_IPV6_IL, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
4603 22, 24, 26, 28, 30, 32, 34, 36, 38),
4604 ICE_PROTOCOL_ENTRY(ICE_TCP_IL, 0, 2),
4605 ICE_PROTOCOL_ENTRY(ICE_UDP_OF, 0, 2),
4606 ICE_PROTOCOL_ENTRY(ICE_UDP_ILOS, 0, 2),
4607 ICE_PROTOCOL_ENTRY(ICE_VXLAN, 8, 10, 12, 14),
4608 ICE_PROTOCOL_ENTRY(ICE_GENEVE, 8, 10, 12, 14),
4609 ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6),
4610 ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22),
4611 ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14),
4612 ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6),
4613 ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10),
4614 ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0),
4615 ICE_PROTOCOL_ENTRY(ICE_VLAN_IN, 2, 0),
4616 ICE_PROTOCOL_ENTRY(ICE_HW_METADATA,
4617 ICE_SOURCE_PORT_MDID_OFFSET,
4618 ICE_PTYPE_MDID_OFFSET,
4619 ICE_PACKET_LENGTH_MDID_OFFSET,
4620 ICE_SOURCE_VSI_MDID_OFFSET,
4621 ICE_PKT_VLAN_MDID_OFFSET,
4622 ICE_PKT_TUNNEL_MDID_OFFSET,
4623 ICE_PKT_TCP_MDID_OFFSET,
4624 ICE_PKT_ERROR_MDID_OFFSET),
4627 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4628 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4629 { ICE_MAC_IL, ICE_MAC_IL_HW },
4630 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4631 { ICE_ETYPE_IL, ICE_ETYPE_IL_HW },
4632 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4633 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4634 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4635 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4636 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4637 { ICE_TCP_IL, ICE_TCP_IL_HW },
4638 { ICE_UDP_OF, ICE_UDP_OF_HW },
4639 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4640 { ICE_VXLAN, ICE_UDP_OF_HW },
4641 { ICE_GENEVE, ICE_UDP_OF_HW },
4642 { ICE_NVGRE, ICE_GRE_OF_HW },
4643 { ICE_GTP, ICE_UDP_OF_HW },
4644 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
4645 { ICE_PPPOE, ICE_PPPOE_HW },
4646 { ICE_L2TPV3, ICE_L2TPV3_HW },
4647 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
4648 { ICE_VLAN_IN, ICE_VLAN_OL_HW },
4649 { ICE_HW_METADATA, ICE_META_DATA_ID_HW },
4653 * ice_find_recp - find a recipe
4654 * @hw: pointer to the hardware structure
4655 * @lkup_exts: extension sequence to match
4656 * @rinfo: information regarding the rule e.g. priority and action info
4658 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4661 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
4662 const struct ice_adv_rule_info *rinfo)
4664 bool refresh_required = true;
4665 struct ice_sw_recipe *recp;
4668 /* Walk through existing recipes to find a match */
4669 recp = hw->switch_info->recp_list;
4670 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4671 /* If recipe was not created for this ID, in SW bookkeeping,
4672 * check if FW has an entry for this recipe. If the FW has an
4673 * entry update it in our SW bookkeeping and continue with the
4676 if (!recp[i].recp_created)
4677 if (ice_get_recp_frm_fw(hw,
4678 hw->switch_info->recp_list, i,
4682 /* Skip inverse action recipes */
4683 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4684 ICE_AQ_RECIPE_ACT_INV_ACT)
4687 /* if number of words we are looking for match */
4688 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4689 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
4690 struct ice_fv_word *be = lkup_exts->fv_words;
4691 u16 *cr = recp[i].lkup_exts.field_mask;
4692 u16 *de = lkup_exts->field_mask;
4696 /* ar, cr, and qr are related to the recipe words, while
4697 * be, de, and pe are related to the lookup words
4699 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
4700 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
4702 if (ar[qr].off == be[pe].off &&
4703 ar[qr].prot_id == be[pe].prot_id &&
4705 /* Found the "pe"th word in the
4710 /* After walking through all the words in the
4711 * "i"th recipe if "p"th word was not found then
4712 * this recipe is not what we are looking for.
4713 * So break out from this loop and try the next
4716 if (qr >= recp[i].lkup_exts.n_val_words) {
4721 /* If for "i"th recipe the found was never set to false
4722 * then it means we found our match
4723 * Also tun type and *_pass_l2 of recipe needs to be
4726 if (found && recp[i].tun_type == rinfo->tun_type &&
4727 recp[i].need_pass_l2 == rinfo->need_pass_l2 &&
4728 recp[i].allow_pass_l2 == rinfo->allow_pass_l2)
4729 return i; /* Return the recipe ID */
4732 return ICE_MAX_NUM_RECIPES;
4736 * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
4738 * As protocol id for outer vlan is different in dvm and svm, if dvm is
4739 * supported protocol array record for outer vlan has to be modified to
4740 * reflect the value proper for DVM.
4742 void ice_change_proto_id_to_dvm(void)
4746 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4747 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
4748 ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
4749 ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
4753 * ice_prot_type_to_id - get protocol ID from protocol type
4754 * @type: protocol type
4755 * @id: pointer to variable that will receive the ID
4757 * Returns true if found, false otherwise
4759 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
4763 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4764 if (ice_prot_id_tbl[i].type == type) {
4765 *id = ice_prot_id_tbl[i].protocol_id;
4772 * ice_fill_valid_words - count valid words
4773 * @rule: advanced rule with lookup information
4774 * @lkup_exts: byte offset extractions of the words that are valid
4776 * calculate valid words in a lookup rule using mask value
4779 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4780 struct ice_prot_lkup_ext *lkup_exts)
4782 u8 j, word, prot_id, ret_val;
4784 if (!ice_prot_type_to_id(rule->type, &prot_id))
4787 word = lkup_exts->n_val_words;
4789 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4790 if (((u16 *)&rule->m_u)[j] &&
4791 rule->type < ARRAY_SIZE(ice_prot_ext)) {
4792 /* No more space to accommodate */
4793 if (word >= ICE_MAX_CHAIN_WORDS)
4795 lkup_exts->fv_words[word].off =
4796 ice_prot_ext[rule->type].offs[j];
4797 lkup_exts->fv_words[word].prot_id =
4798 ice_prot_id_tbl[rule->type].protocol_id;
4799 lkup_exts->field_mask[word] =
4800 be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
4804 ret_val = word - lkup_exts->n_val_words;
4805 lkup_exts->n_val_words = word;
4811 * ice_create_first_fit_recp_def - Create a recipe grouping
4812 * @hw: pointer to the hardware structure
4813 * @lkup_exts: an array of protocol header extractions
4814 * @rg_list: pointer to a list that stores new recipe groups
4815 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4817 * Using first fit algorithm, take all the words that are still not done
4818 * and start grouping them in 4-word groups. Each group makes up one
4822 ice_create_first_fit_recp_def(struct ice_hw *hw,
4823 struct ice_prot_lkup_ext *lkup_exts,
4824 struct list_head *rg_list,
4827 struct ice_pref_recipe_group *grp = NULL;
4832 /* Walk through every word in the rule to check if it is not done. If so
4833 * then this word needs to be part of a new recipe.
4835 for (j = 0; j < lkup_exts->n_val_words; j++)
4836 if (!test_bit(j, lkup_exts->done)) {
4838 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4839 struct ice_recp_grp_entry *entry;
4841 entry = devm_kzalloc(ice_hw_to_dev(hw),
4846 list_add(&entry->l_entry, rg_list);
4847 grp = &entry->r_group;
4851 grp->pairs[grp->n_val_pairs].prot_id =
4852 lkup_exts->fv_words[j].prot_id;
4853 grp->pairs[grp->n_val_pairs].off =
4854 lkup_exts->fv_words[j].off;
4855 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4863 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4864 * @hw: pointer to the hardware structure
4865 * @fv_list: field vector with the extraction sequence information
4866 * @rg_list: recipe groupings with protocol-offset pairs
4868 * Helper function to fill in the field vector indices for protocol-offset
4869 * pairs. These indexes are then ultimately programmed into a recipe.
4872 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
4873 struct list_head *rg_list)
4875 struct ice_sw_fv_list_entry *fv;
4876 struct ice_recp_grp_entry *rg;
4877 struct ice_fv_word *fv_ext;
4879 if (list_empty(fv_list))
4882 fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4884 fv_ext = fv->fv_ptr->ew;
4886 list_for_each_entry(rg, rg_list, l_entry) {
4889 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4890 struct ice_fv_word *pr;
4895 pr = &rg->r_group.pairs[i];
4896 mask = rg->r_group.mask[i];
4898 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4899 if (fv_ext[j].prot_id == pr->prot_id &&
4900 fv_ext[j].off == pr->off) {
4903 /* Store index of field vector */
4905 rg->fv_mask[i] = mask;
4909 /* Protocol/offset could not be found, caller gave an
4921 * ice_find_free_recp_res_idx - find free result indexes for recipe
4922 * @hw: pointer to hardware structure
4923 * @profiles: bitmap of profiles that will be associated with the new recipe
4924 * @free_idx: pointer to variable to receive the free index bitmap
4926 * The algorithm used here is:
4927 * 1. When creating a new recipe, create a set P which contains all
4928 * Profiles that will be associated with our new recipe
4930 * 2. For each Profile p in set P:
4931 * a. Add all recipes associated with Profile p into set R
4932 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4933 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4934 * i. Or just assume they all have the same possible indexes:
4936 * i.e., PossibleIndexes = 0x0000F00000000000
4938 * 3. For each Recipe r in set R:
4939 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4940 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4942 * FreeIndexes will contain the bits indicating the indexes free for use,
4943 * then the code needs to update the recipe[r].used_result_idx_bits to
4944 * indicate which indexes were selected for use by this recipe.
4947 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4948 unsigned long *free_idx)
4950 DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4951 DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4952 DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4955 bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4956 bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4958 bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
4960 /* For each profile we are going to associate the recipe with, add the
4961 * recipes that are associated with that profile. This will give us
4962 * the set of recipes that our recipe may collide with. Also, determine
4963 * what possible result indexes are usable given this set of profiles.
4965 for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4966 bitmap_or(recipes, recipes, profile_to_recipe[bit],
4967 ICE_MAX_NUM_RECIPES);
4968 bitmap_and(possible_idx, possible_idx,
4969 hw->switch_info->prof_res_bm[bit],
4973 /* For each recipe that our new recipe may collide with, determine
4974 * which indexes have been used.
4976 for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4977 bitmap_or(used_idx, used_idx,
4978 hw->switch_info->recp_list[bit].res_idxs,
4981 bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4983 /* return number of free indexes */
4984 return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
4988 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4989 * @hw: pointer to hardware structure
4990 * @rm: recipe management list entry
4991 * @profiles: bitmap of profiles that will be associated.
4994 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4995 unsigned long *profiles)
4997 DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
4998 struct ice_aqc_recipe_content *content;
4999 struct ice_aqc_recipe_data_elem *tmp;
5000 struct ice_aqc_recipe_data_elem *buf;
5001 struct ice_recp_grp_entry *entry;
5008 /* When more than one recipe are required, another recipe is needed to
5009 * chain them together. Matching a tunnel metadata ID takes up one of
5010 * the match fields in the chaining recipe reducing the number of
5011 * chained recipes by one.
5013 /* check number of free result indices */
5014 bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
5015 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5017 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5018 free_res_idx, rm->n_grp_count);
5020 if (rm->n_grp_count > 1) {
5021 if (rm->n_grp_count > free_res_idx)
5027 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
5030 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
5034 buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
5041 bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5042 recipe_count = ICE_MAX_NUM_RECIPES;
5043 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5045 if (status || recipe_count == 0)
5048 /* Allocate the recipe resources, and configure them according to the
5049 * match fields from protocol headers and extracted field vectors.
5051 chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5052 list_for_each_entry(entry, &rm->rg_list, l_entry) {
5055 status = ice_alloc_recipe(hw, &entry->rid);
5059 content = &buf[recps].content;
5061 /* Clear the result index of the located recipe, as this will be
5062 * updated, if needed, later in the recipe creation process.
5064 tmp[0].content.result_indx = 0;
5066 buf[recps] = tmp[0];
5067 buf[recps].recipe_indx = (u8)entry->rid;
5068 /* if the recipe is a non-root recipe RID should be programmed
5069 * as 0 for the rules to be applied correctly.
5072 memset(&content->lkup_indx, 0,
5073 sizeof(content->lkup_indx));
5075 /* All recipes use look-up index 0 to match switch ID. */
5076 content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5077 content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5078 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5081 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5082 content->lkup_indx[i] = 0x80;
5083 content->mask[i] = 0;
5086 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5087 content->lkup_indx[i + 1] = entry->fv_idx[i];
5088 content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]);
5091 if (rm->n_grp_count > 1) {
5092 /* Checks to see if there really is a valid result index
5095 if (chain_idx >= ICE_MAX_FV_WORDS) {
5096 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
5101 entry->chain_idx = chain_idx;
5102 content->result_indx =
5103 ICE_AQ_RECIPE_RESULT_EN |
5104 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5105 ICE_AQ_RECIPE_RESULT_DATA_M);
5106 clear_bit(chain_idx, result_idx_bm);
5107 chain_idx = find_first_bit(result_idx_bm,
5111 /* fill recipe dependencies */
5112 bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
5113 ICE_MAX_NUM_RECIPES);
5114 set_bit(buf[recps].recipe_indx,
5115 (unsigned long *)buf[recps].recipe_bitmap);
5116 content->act_ctrl_fwd_priority = rm->priority;
5118 if (rm->need_pass_l2)
5119 content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
5121 if (rm->allow_pass_l2)
5122 content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
5126 if (rm->n_grp_count == 1) {
5127 rm->root_rid = buf[0].recipe_indx;
5128 set_bit(buf[0].recipe_indx, rm->r_bitmap);
5129 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5130 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5131 memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5132 sizeof(buf[0].recipe_bitmap));
5137 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5138 * the recipe which is getting created if specified
5139 * by user. Usually any advanced switch filter, which results
5140 * into new extraction sequence, ended up creating a new recipe
5141 * of type ROOT and usually recipes are associated with profiles
5142 * Switch rule referreing newly created recipe, needs to have
5143 * either/or 'fwd' or 'join' priority, otherwise switch rule
5144 * evaluation will not happen correctly. In other words, if
5145 * switch rule to be evaluated on priority basis, then recipe
5146 * needs to have priority, otherwise it will be evaluated last.
5148 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5150 struct ice_recp_grp_entry *last_chain_entry;
5153 /* Allocate the last recipe that will chain the outcomes of the
5154 * other recipes together
5156 status = ice_alloc_recipe(hw, &rid);
5160 content = &buf[recps].content;
5162 buf[recps].recipe_indx = (u8)rid;
5163 content->rid = (u8)rid;
5164 content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5165 /* the new entry created should also be part of rg_list to
5166 * make sure we have complete recipe
5168 last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
5169 sizeof(*last_chain_entry),
5171 if (!last_chain_entry) {
5175 last_chain_entry->rid = rid;
5176 memset(&content->lkup_indx, 0, sizeof(content->lkup_indx));
5177 /* All recipes use look-up index 0 to match switch ID. */
5178 content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5179 content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5180 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5181 content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE;
5182 content->mask[i] = 0;
5186 /* update r_bitmap with the recp that is used for chaining */
5187 set_bit(rid, rm->r_bitmap);
5188 /* this is the recipe that chains all the other recipes so it
5189 * should not have a chaining ID to indicate the same
5191 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5192 list_for_each_entry(entry, &rm->rg_list, l_entry) {
5193 last_chain_entry->fv_idx[i] = entry->chain_idx;
5194 content->lkup_indx[i] = entry->chain_idx;
5195 content->mask[i++] = cpu_to_le16(0xFFFF);
5196 set_bit(entry->rid, rm->r_bitmap);
5198 list_add(&last_chain_entry->l_entry, &rm->rg_list);
5199 if (sizeof(buf[recps].recipe_bitmap) >=
5200 sizeof(rm->r_bitmap)) {
5201 memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5202 sizeof(buf[recps].recipe_bitmap));
5207 content->act_ctrl_fwd_priority = rm->priority;
5210 rm->root_rid = (u8)rid;
5212 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5216 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5217 ice_release_change_lock(hw);
5221 /* Every recipe that just got created add it to the recipe
5224 list_for_each_entry(entry, &rm->rg_list, l_entry) {
5225 struct ice_switch_info *sw = hw->switch_info;
5226 bool is_root, idx_found = false;
5227 struct ice_sw_recipe *recp;
5228 u16 idx, buf_idx = 0;
5230 /* find buffer index for copying some data */
5231 for (idx = 0; idx < rm->n_grp_count; idx++)
5232 if (buf[idx].recipe_indx == entry->rid) {
5242 recp = &sw->recp_list[entry->rid];
5243 is_root = (rm->root_rid == entry->rid);
5244 recp->is_root = is_root;
5246 recp->root_rid = entry->rid;
5247 recp->big_recp = (is_root && rm->n_grp_count > 1);
5249 memcpy(&recp->ext_words, entry->r_group.pairs,
5250 entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
5252 memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5253 sizeof(recp->r_bitmap));
5255 /* Copy non-result fv index values and masks to recipe. This
5256 * call will also update the result recipe bitmask.
5258 ice_collect_result_idx(&buf[buf_idx], recp);
5260 /* for non-root recipes, also copy to the root, this allows
5261 * easier matching of a complete chained recipe
5264 ice_collect_result_idx(&buf[buf_idx],
5265 &sw->recp_list[rm->root_rid]);
5267 recp->n_ext_words = entry->r_group.n_val_pairs;
5268 recp->chain_idx = entry->chain_idx;
5269 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5270 recp->n_grp_count = rm->n_grp_count;
5271 recp->tun_type = rm->tun_type;
5272 recp->need_pass_l2 = rm->need_pass_l2;
5273 recp->allow_pass_l2 = rm->allow_pass_l2;
5274 recp->recp_created = true;
5283 devm_kfree(ice_hw_to_dev(hw), buf);
5288 * ice_create_recipe_group - creates recipe group
5289 * @hw: pointer to hardware structure
5290 * @rm: recipe management list entry
5291 * @lkup_exts: lookup elements
5294 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5295 struct ice_prot_lkup_ext *lkup_exts)
5300 rm->n_grp_count = 0;
5302 /* Create recipes for words that are marked not done by packing them
5305 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5306 &rm->rg_list, &recp_count);
5308 rm->n_grp_count += recp_count;
5309 rm->n_ext_words = lkup_exts->n_val_words;
5310 memcpy(&rm->ext_words, lkup_exts->fv_words,
5311 sizeof(rm->ext_words));
5312 memcpy(rm->word_masks, lkup_exts->field_mask,
5313 sizeof(rm->word_masks));
5319 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5320 * @hw: pointer to hardware structure
5321 * @rinfo: other information regarding the rule e.g. priority and action info
5322 * @bm: pointer to memory for returning the bitmap of field vectors
5325 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5328 enum ice_prof_type prof_type;
5330 bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
5332 switch (rinfo->tun_type) {
5334 prof_type = ICE_PROF_NON_TUN;
5336 case ICE_ALL_TUNNELS:
5337 prof_type = ICE_PROF_TUN_ALL;
5339 case ICE_SW_TUN_GENEVE:
5340 case ICE_SW_TUN_VXLAN:
5341 prof_type = ICE_PROF_TUN_UDP;
5343 case ICE_SW_TUN_NVGRE:
5344 prof_type = ICE_PROF_TUN_GRE;
5346 case ICE_SW_TUN_GTPU:
5347 prof_type = ICE_PROF_TUN_GTPU;
5349 case ICE_SW_TUN_GTPC:
5350 prof_type = ICE_PROF_TUN_GTPC;
5352 case ICE_SW_TUN_AND_NON_TUN:
5354 prof_type = ICE_PROF_ALL;
5358 ice_get_sw_fv_bitmap(hw, prof_type, bm);
5362 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5363 * @hw: pointer to hardware structure
5364 * @lkups: lookup elements or match criteria for the advanced recipe, one
5365 * structure per protocol header
5366 * @lkups_cnt: number of protocols
5367 * @rinfo: other information regarding the rule e.g. priority and action info
5368 * @rid: return the recipe ID of the recipe created
5371 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5372 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5374 DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
5375 DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
5376 struct ice_prot_lkup_ext *lkup_exts;
5377 struct ice_recp_grp_entry *r_entry;
5378 struct ice_sw_fv_list_entry *fvit;
5379 struct ice_recp_grp_entry *r_tmp;
5380 struct ice_sw_fv_list_entry *tmp;
5381 struct ice_sw_recipe *rm;
5388 lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
5392 /* Determine the number of words to be matched and if it exceeds a
5393 * recipe's restrictions
5395 for (i = 0; i < lkups_cnt; i++) {
5398 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5400 goto err_free_lkup_exts;
5403 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5406 goto err_free_lkup_exts;
5410 rm = kzalloc(sizeof(*rm), GFP_KERNEL);
5413 goto err_free_lkup_exts;
5416 /* Get field vectors that contain fields extracted from all the protocol
5417 * headers being programmed.
5419 INIT_LIST_HEAD(&rm->fv_list);
5420 INIT_LIST_HEAD(&rm->rg_list);
5422 /* Get bitmap of field vectors (profiles) that are compatible with the
5423 * rule request; only these will be searched in the subsequent call to
5424 * ice_get_sw_fv_list.
5426 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5428 status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
5432 /* Group match words into recipes using preferred recipe grouping
5435 status = ice_create_recipe_group(hw, rm, lkup_exts);
5439 /* set the recipe priority if specified */
5440 rm->priority = (u8)rinfo->priority;
5442 rm->need_pass_l2 = rinfo->need_pass_l2;
5443 rm->allow_pass_l2 = rinfo->allow_pass_l2;
5445 /* Find offsets from the field vector. Pick the first one for all the
5448 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5452 /* get bitmap of all profiles the recipe will be associated with */
5453 bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
5454 list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5455 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5456 set_bit((u16)fvit->profile_id, profiles);
5459 /* Look for a recipe which matches our requested fv / mask list */
5460 *rid = ice_find_recp(hw, lkup_exts, rinfo);
5461 if (*rid < ICE_MAX_NUM_RECIPES)
5462 /* Success if found a recipe that match the existing criteria */
5465 rm->tun_type = rinfo->tun_type;
5466 /* Recipe we need does not exist, add a recipe */
5467 status = ice_add_sw_recipe(hw, rm, profiles);
5471 /* Associate all the recipes created with all the profiles in the
5472 * common field vector.
5474 list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5475 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
5478 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5479 (u8 *)r_bitmap, NULL);
5483 bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
5484 ICE_MAX_NUM_RECIPES);
5485 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5489 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5492 ice_release_change_lock(hw);
5497 /* Update profile to recipe bitmap array */
5498 bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
5499 ICE_MAX_NUM_RECIPES);
5501 /* Update recipe to profile bitmap array */
5502 for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
5503 set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
5506 *rid = rm->root_rid;
5507 memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
5508 sizeof(*lkup_exts));
5510 list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
5511 list_del(&r_entry->l_entry);
5512 devm_kfree(ice_hw_to_dev(hw), r_entry);
5515 list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
5516 list_del(&fvit->list_entry);
5517 devm_kfree(ice_hw_to_dev(hw), fvit);
5520 devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
5530 * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt
5532 * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added
5533 * @num_vlan: number of VLAN tags
5535 static struct ice_dummy_pkt_profile *
5536 ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt,
5539 struct ice_dummy_pkt_profile *profile;
5540 struct ice_dummy_pkt_offsets *offsets;
5541 u32 buf_len, off, etype_off, i;
5544 if (num_vlan < 1 || num_vlan > 2)
5545 return ERR_PTR(-EINVAL);
5547 off = num_vlan * VLAN_HLEN;
5549 buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) +
5550 dummy_pkt->offsets_len;
5551 offsets = kzalloc(buf_len, GFP_KERNEL);
5553 return ERR_PTR(-ENOMEM);
5555 offsets[0] = dummy_pkt->offsets[0];
5556 if (num_vlan == 2) {
5557 offsets[1] = ice_dummy_qinq_packet_offsets[0];
5558 offsets[2] = ice_dummy_qinq_packet_offsets[1];
5559 } else if (num_vlan == 1) {
5560 offsets[1] = ice_dummy_vlan_packet_offsets[0];
5563 for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5564 offsets[i + num_vlan].type = dummy_pkt->offsets[i].type;
5565 offsets[i + num_vlan].offset =
5566 dummy_pkt->offsets[i].offset + off;
5568 offsets[i + num_vlan] = dummy_pkt->offsets[i];
5570 etype_off = dummy_pkt->offsets[1].offset;
5572 buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) +
5574 pkt = kzalloc(buf_len, GFP_KERNEL);
5577 return ERR_PTR(-ENOMEM);
5580 memcpy(pkt, dummy_pkt->pkt, etype_off);
5581 memcpy(pkt + etype_off,
5582 num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet,
5584 memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off,
5585 dummy_pkt->pkt_len - etype_off);
5587 profile = kzalloc(sizeof(*profile), GFP_KERNEL);
5591 return ERR_PTR(-ENOMEM);
5594 profile->offsets = offsets;
5596 profile->pkt_len = buf_len;
5597 profile->match |= ICE_PKT_KMALLOC;
5603 * ice_find_dummy_packet - find dummy packet
5605 * @lkups: lookup elements or match criteria for the advanced recipe, one
5606 * structure per protocol header
5607 * @lkups_cnt: number of protocols
5608 * @tun_type: tunnel type
5610 * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
5612 static const struct ice_dummy_pkt_profile *
5613 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5614 enum ice_sw_tunnel_type tun_type)
5616 const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
5617 u32 match = 0, vlan_count = 0;
5621 case ICE_SW_TUN_GTPC:
5622 match |= ICE_PKT_TUN_GTPC;
5624 case ICE_SW_TUN_GTPU:
5625 match |= ICE_PKT_TUN_GTPU;
5627 case ICE_SW_TUN_NVGRE:
5628 match |= ICE_PKT_TUN_NVGRE;
5630 case ICE_SW_TUN_GENEVE:
5631 case ICE_SW_TUN_VXLAN:
5632 match |= ICE_PKT_TUN_UDP;
5638 for (i = 0; i < lkups_cnt; i++) {
5639 if (lkups[i].type == ICE_UDP_ILOS)
5640 match |= ICE_PKT_INNER_UDP;
5641 else if (lkups[i].type == ICE_TCP_IL)
5642 match |= ICE_PKT_INNER_TCP;
5643 else if (lkups[i].type == ICE_IPV6_OFOS)
5644 match |= ICE_PKT_OUTER_IPV6;
5645 else if (lkups[i].type == ICE_VLAN_OFOS ||
5646 lkups[i].type == ICE_VLAN_EX)
5648 else if (lkups[i].type == ICE_VLAN_IN)
5650 else if (lkups[i].type == ICE_ETYPE_OL &&
5651 lkups[i].h_u.ethertype.ethtype_id ==
5652 cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5653 lkups[i].m_u.ethertype.ethtype_id ==
5654 cpu_to_be16(0xFFFF))
5655 match |= ICE_PKT_OUTER_IPV6;
5656 else if (lkups[i].type == ICE_ETYPE_IL &&
5657 lkups[i].h_u.ethertype.ethtype_id ==
5658 cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5659 lkups[i].m_u.ethertype.ethtype_id ==
5660 cpu_to_be16(0xFFFF))
5661 match |= ICE_PKT_INNER_IPV6;
5662 else if (lkups[i].type == ICE_IPV6_IL)
5663 match |= ICE_PKT_INNER_IPV6;
5664 else if (lkups[i].type == ICE_GTP_NO_PAY)
5665 match |= ICE_PKT_GTP_NOPAY;
5666 else if (lkups[i].type == ICE_PPPOE) {
5667 match |= ICE_PKT_PPPOE;
5668 if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
5670 match |= ICE_PKT_OUTER_IPV6;
5671 } else if (lkups[i].type == ICE_L2TPV3)
5672 match |= ICE_PKT_L2TPV3;
5675 while (ret->match && (match & ret->match) != ret->match)
5678 if (vlan_count != 0)
5679 ret = ice_dummy_packet_add_vlan(ret, vlan_count);
5685 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5687 * @lkups: lookup elements or match criteria for the advanced recipe, one
5688 * structure per protocol header
5689 * @lkups_cnt: number of protocols
5690 * @s_rule: stores rule information from the match criteria
5691 * @profile: dummy packet profile (the template, its size and header offsets)
5694 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5695 struct ice_sw_rule_lkup_rx_tx *s_rule,
5696 const struct ice_dummy_pkt_profile *profile)
5701 /* Start with a packet with a pre-defined/dummy content. Then, fill
5702 * in the header values to be looked up or matched.
5704 pkt = s_rule->hdr_data;
5706 memcpy(pkt, profile->pkt, profile->pkt_len);
5708 for (i = 0; i < lkups_cnt; i++) {
5709 const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
5710 enum ice_protocol_type type;
5711 u16 offset = 0, len = 0, j;
5714 /* find the start of this layer; it should be found since this
5715 * was already checked when search for the dummy packet
5717 type = lkups[i].type;
5718 /* metadata isn't present in the packet */
5719 if (type == ICE_HW_METADATA)
5722 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5723 if (type == offsets[j].type) {
5724 offset = offsets[j].offset;
5729 /* this should never happen in a correct calling sequence */
5733 switch (lkups[i].type) {
5736 len = sizeof(struct ice_ether_hdr);
5740 len = sizeof(struct ice_ethtype_hdr);
5745 len = sizeof(struct ice_vlan_hdr);
5749 len = sizeof(struct ice_ipv4_hdr);
5753 len = sizeof(struct ice_ipv6_hdr);
5758 len = sizeof(struct ice_l4_hdr);
5761 len = sizeof(struct ice_sctp_hdr);
5764 len = sizeof(struct ice_nvgre_hdr);
5768 len = sizeof(struct ice_udp_tnl_hdr);
5770 case ICE_GTP_NO_PAY:
5772 len = sizeof(struct ice_udp_gtp_hdr);
5775 len = sizeof(struct ice_pppoe_hdr);
5778 len = sizeof(struct ice_l2tpv3_sess_hdr);
5784 /* the length should be a word multiple */
5785 if (len % ICE_BYTES_PER_WORD)
5788 /* We have the offset to the header start, the length, the
5789 * caller's header values and mask. Use this information to
5790 * copy the data into the dummy packet appropriately based on
5791 * the mask. Note that we need to only write the bits as
5792 * indicated by the mask to make sure we don't improperly write
5793 * over any significant packet data.
5795 for (j = 0; j < len / sizeof(u16); j++) {
5796 u16 *ptr = (u16 *)(pkt + offset);
5797 u16 mask = lkups[i].m_raw[j];
5802 ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
5806 s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
5812 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5813 * @hw: pointer to the hardware structure
5814 * @tun_type: tunnel type
5815 * @pkt: dummy packet to fill in
5816 * @offsets: offset info for the dummy packet
5819 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5820 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5825 case ICE_SW_TUN_VXLAN:
5826 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
5829 case ICE_SW_TUN_GENEVE:
5830 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
5834 /* Nothing needs to be done for this tunnel type */
5838 /* Find the outer UDP protocol header and insert the port number */
5839 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5840 if (offsets[i].type == ICE_UDP_OF) {
5841 struct ice_l4_hdr *hdr;
5844 offset = offsets[i].offset;
5845 hdr = (struct ice_l4_hdr *)&pkt[offset];
5846 hdr->dst_port = cpu_to_be16(open_port);
5856 * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
5857 * @hw: pointer to hw structure
5858 * @vlan_type: VLAN tag type
5859 * @pkt: dummy packet to fill in
5860 * @offsets: offset info for the dummy packet
5863 ice_fill_adv_packet_vlan(struct ice_hw *hw, u16 vlan_type, u8 *pkt,
5864 const struct ice_dummy_pkt_offsets *offsets)
5868 /* Check if there is something to do */
5869 if (!vlan_type || !ice_is_dvm_ena(hw))
5872 /* Find VLAN header and insert VLAN TPID */
5873 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5874 if (offsets[i].type == ICE_VLAN_OFOS ||
5875 offsets[i].type == ICE_VLAN_EX) {
5876 struct ice_vlan_hdr *hdr;
5879 offset = offsets[i].offset;
5880 hdr = (struct ice_vlan_hdr *)&pkt[offset];
5881 hdr->type = cpu_to_be16(vlan_type);
5890 static bool ice_rules_equal(const struct ice_adv_rule_info *first,
5891 const struct ice_adv_rule_info *second)
5893 return first->sw_act.flag == second->sw_act.flag &&
5894 first->tun_type == second->tun_type &&
5895 first->vlan_type == second->vlan_type &&
5896 first->src_vsi == second->src_vsi &&
5897 first->need_pass_l2 == second->need_pass_l2 &&
5898 first->allow_pass_l2 == second->allow_pass_l2;
5902 * ice_find_adv_rule_entry - Search a rule entry
5903 * @hw: pointer to the hardware structure
5904 * @lkups: lookup elements or match criteria for the advanced recipe, one
5905 * structure per protocol header
5906 * @lkups_cnt: number of protocols
5907 * @recp_id: recipe ID for which we are finding the rule
5908 * @rinfo: other information regarding the rule e.g. priority and action info
5910 * Helper function to search for a given advance rule entry
5911 * Returns pointer to entry storing the rule if found
5913 static struct ice_adv_fltr_mgmt_list_entry *
5914 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5915 u16 lkups_cnt, u16 recp_id,
5916 struct ice_adv_rule_info *rinfo)
5918 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5919 struct ice_switch_info *sw = hw->switch_info;
5922 list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5924 bool lkups_matched = true;
5926 if (lkups_cnt != list_itr->lkups_cnt)
5928 for (i = 0; i < list_itr->lkups_cnt; i++)
5929 if (memcmp(&list_itr->lkups[i], &lkups[i],
5931 lkups_matched = false;
5934 if (ice_rules_equal(rinfo, &list_itr->rule_info) &&
5942 * ice_adv_add_update_vsi_list
5943 * @hw: pointer to the hardware structure
5944 * @m_entry: pointer to current adv filter management list entry
5945 * @cur_fltr: filter information from the book keeping entry
5946 * @new_fltr: filter information with the new VSI to be added
5948 * Call AQ command to add or update previously created VSI list with new VSI.
5950 * Helper function to do book keeping associated with adding filter information
5951 * The algorithm to do the booking keeping is described below :
5952 * When a VSI needs to subscribe to a given advanced filter
5953 * if only one VSI has been added till now
5954 * Allocate a new VSI list and add two VSIs
5955 * to this list using switch rule command
5956 * Update the previously created switch rule with the
5957 * newly created VSI list ID
5958 * if a VSI list was previously created
5959 * Add the new VSI to the previously created VSI list set
5960 * using the update switch rule command
5963 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5964 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5965 struct ice_adv_rule_info *cur_fltr,
5966 struct ice_adv_rule_info *new_fltr)
5968 u16 vsi_list_id = 0;
5971 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5972 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5973 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5976 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5977 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5978 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5979 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5982 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5983 /* Only one entry existed in the mapping and it was not already
5984 * a part of a VSI list. So, create a VSI list with the old and
5987 struct ice_fltr_info tmp_fltr;
5988 u16 vsi_handle_arr[2];
5990 /* A rule already exists with the new VSI being added */
5991 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5992 new_fltr->sw_act.fwd_id.hw_vsi_id)
5995 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5996 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5997 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6003 memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6004 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
6005 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6006 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6007 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6008 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6010 /* Update the previous switch rule of "forward to VSI" to
6013 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6017 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6018 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6019 m_entry->vsi_list_info =
6020 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6023 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6025 if (!m_entry->vsi_list_info)
6028 /* A rule already exists with the new VSI being added */
6029 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
6032 /* Update the previously created VSI list set with
6033 * the new VSI ID passed in
6035 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6037 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6039 ice_aqc_opc_update_sw_rules,
6041 /* update VSI list mapping info with new VSI ID */
6043 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
6046 m_entry->vsi_count++;
6050 void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup)
6052 lkup->type = ICE_HW_METADATA;
6053 lkup->m_u.metadata.flags[ICE_PKT_FLAGS_TUNNEL] =
6054 cpu_to_be16(ICE_PKT_TUNNEL_MASK);
6057 void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup)
6059 lkup->type = ICE_HW_METADATA;
6060 lkup->m_u.metadata.flags[ICE_PKT_FLAGS_VLAN] =
6061 cpu_to_be16(ICE_PKT_VLAN_MASK);
6064 void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup)
6066 lkup->type = ICE_HW_METADATA;
6067 lkup->m_u.metadata.source_vsi = cpu_to_be16(ICE_MDID_SOURCE_VSI_MASK);
6071 * ice_add_adv_rule - helper function to create an advanced switch rule
6072 * @hw: pointer to the hardware structure
6073 * @lkups: information on the words that needs to be looked up. All words
6074 * together makes one recipe
6075 * @lkups_cnt: num of entries in the lkups array
6076 * @rinfo: other information related to the rule that needs to be programmed
6077 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6078 * ignored is case of error.
6080 * This function can program only 1 rule at a time. The lkups is used to
6081 * describe the all the words that forms the "lookup" portion of the recipe.
6082 * These words can span multiple protocols. Callers to this function need to
6083 * pass in a list of protocol headers with lookup information along and mask
6084 * that determines which words are valid from the given protocol header.
6085 * rinfo describes other information related to this rule such as forwarding
6086 * IDs, priority of this rule, etc.
6089 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6090 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6091 struct ice_rule_query_data *added_entry)
6093 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6094 struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
6095 const struct ice_dummy_pkt_profile *profile;
6096 u16 rid = 0, i, rule_buf_sz, vsi_handle;
6097 struct list_head *rule_head;
6098 struct ice_switch_info *sw;
6104 /* Initialize profile to result index bitmap */
6105 if (!hw->switch_info->prof_res_bm_init) {
6106 hw->switch_info->prof_res_bm_init = 1;
6107 ice_init_prof_result_bm(hw);
6113 /* get # of words we need to match */
6115 for (i = 0; i < lkups_cnt; i++) {
6118 for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
6119 if (lkups[i].m_raw[j])
6126 if (word_cnt > ICE_MAX_CHAIN_WORDS)
6129 /* locate a dummy packet */
6130 profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
6131 if (IS_ERR(profile))
6132 return PTR_ERR(profile);
6134 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6135 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6136 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6137 rinfo->sw_act.fltr_act == ICE_DROP_PACKET ||
6138 rinfo->sw_act.fltr_act == ICE_NOP)) {
6140 goto free_pkt_profile;
6143 vsi_handle = rinfo->sw_act.vsi_handle;
6144 if (!ice_is_vsi_valid(hw, vsi_handle)) {
6146 goto free_pkt_profile;
6149 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6150 rinfo->sw_act.fltr_act == ICE_NOP)
6151 rinfo->sw_act.fwd_id.hw_vsi_id =
6152 ice_get_hw_vsi_num(hw, vsi_handle);
6155 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, rinfo->src_vsi);
6157 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6159 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6161 goto free_pkt_profile;
6162 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6164 /* we have to add VSI to VSI_LIST and increment vsi_count.
6165 * Also Update VSI list so that we can change forwarding rule
6166 * if the rule already exists, we will check if it exists with
6167 * same vsi_id, if not then add it to the VSI list if it already
6168 * exists if not then create a VSI list and add the existing VSI
6169 * ID and the new VSI ID to the list
6170 * We will add that VSI to the list
6172 status = ice_adv_add_update_vsi_list(hw, m_entry,
6173 &m_entry->rule_info,
6176 added_entry->rid = rid;
6177 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6178 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6180 goto free_pkt_profile;
6182 rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
6183 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6186 goto free_pkt_profile;
6188 if (!rinfo->flags_info.act_valid) {
6189 act |= ICE_SINGLE_ACT_LAN_ENABLE;
6190 act |= ICE_SINGLE_ACT_LB_ENABLE;
6192 act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
6193 ICE_SINGLE_ACT_LB_ENABLE);
6196 switch (rinfo->sw_act.fltr_act) {
6197 case ICE_FWD_TO_VSI:
6198 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6199 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6200 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6203 act |= ICE_SINGLE_ACT_TO_Q;
6204 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6205 ICE_SINGLE_ACT_Q_INDEX_M;
6207 case ICE_FWD_TO_QGRP:
6208 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6209 (u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
6210 act |= ICE_SINGLE_ACT_TO_Q;
6211 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6212 ICE_SINGLE_ACT_Q_INDEX_M;
6213 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6214 ICE_SINGLE_ACT_Q_REGION_M;
6216 case ICE_DROP_PACKET:
6217 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6218 ICE_SINGLE_ACT_VALID_BIT;
6221 act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
6222 rinfo->sw_act.fwd_id.hw_vsi_id);
6223 act &= ~ICE_SINGLE_ACT_VALID_BIT;
6227 goto err_ice_add_adv_rule;
6230 /* If there is no matching criteria for direction there
6231 * is only one difference between Rx and Tx:
6232 * - get switch id base on VSI number from source field (Tx)
6233 * - get switch id base on port number (Rx)
6235 * If matching on direction metadata is chose rule direction is
6236 * extracted from type value set here.
6238 if (rinfo->sw_act.flag & ICE_FLTR_TX) {
6239 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
6240 s_rule->src = cpu_to_le16(rinfo->sw_act.src);
6242 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
6243 s_rule->src = cpu_to_le16(hw->port_info->lport);
6246 s_rule->recipe_id = cpu_to_le16(rid);
6247 s_rule->act = cpu_to_le32(act);
6249 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
6251 goto err_ice_add_adv_rule;
6253 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, s_rule->hdr_data,
6256 goto err_ice_add_adv_rule;
6258 status = ice_fill_adv_packet_vlan(hw, rinfo->vlan_type,
6262 goto err_ice_add_adv_rule;
6264 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6265 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6268 goto err_ice_add_adv_rule;
6269 adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
6270 sizeof(struct ice_adv_fltr_mgmt_list_entry),
6274 goto err_ice_add_adv_rule;
6277 adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
6278 lkups_cnt * sizeof(*lkups), GFP_KERNEL);
6279 if (!adv_fltr->lkups) {
6281 goto err_ice_add_adv_rule;
6284 adv_fltr->lkups_cnt = lkups_cnt;
6285 adv_fltr->rule_info = *rinfo;
6286 adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
6287 sw = hw->switch_info;
6288 sw->recp_list[rid].adv_rule = true;
6289 rule_head = &sw->recp_list[rid].filt_rules;
6291 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6292 adv_fltr->vsi_count = 1;
6294 /* Add rule entry to book keeping list */
6295 list_add(&adv_fltr->list_entry, rule_head);
6297 added_entry->rid = rid;
6298 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6299 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6301 err_ice_add_adv_rule:
6302 if (status && adv_fltr) {
6303 devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
6304 devm_kfree(ice_hw_to_dev(hw), adv_fltr);
6310 if (profile->match & ICE_PKT_KMALLOC) {
6311 kfree(profile->offsets);
6312 kfree(profile->pkt);
6320 * ice_replay_vsi_fltr - Replay filters for requested VSI
6321 * @hw: pointer to the hardware structure
6322 * @vsi_handle: driver VSI handle
6323 * @recp_id: Recipe ID for which rules need to be replayed
6324 * @list_head: list for which filters need to be replayed
6326 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6327 * It is required to pass valid VSI handle.
6330 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6331 struct list_head *list_head)
6333 struct ice_fltr_mgmt_list_entry *itr;
6337 if (list_empty(list_head))
6339 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6341 list_for_each_entry(itr, list_head, list_entry) {
6342 struct ice_fltr_list_entry f_entry;
6344 f_entry.fltr_info = itr->fltr_info;
6345 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6346 itr->fltr_info.vsi_handle == vsi_handle) {
6347 /* update the src in case it is VSI num */
6348 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6349 f_entry.fltr_info.src = hw_vsi_id;
6350 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6355 if (!itr->vsi_list_info ||
6356 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
6358 /* Clearing it so that the logic can add it back */
6359 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6360 f_entry.fltr_info.vsi_handle = vsi_handle;
6361 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6362 /* update the src in case it is VSI num */
6363 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6364 f_entry.fltr_info.src = hw_vsi_id;
6365 if (recp_id == ICE_SW_LKUP_VLAN)
6366 status = ice_add_vlan_internal(hw, &f_entry);
6368 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6377 * ice_adv_rem_update_vsi_list
6378 * @hw: pointer to the hardware structure
6379 * @vsi_handle: VSI handle of the VSI to remove
6380 * @fm_list: filter management entry for which the VSI list management needs to
6384 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6385 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6387 struct ice_vsi_list_map_info *vsi_list_info;
6388 enum ice_sw_lkup_type lkup_type;
6392 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6393 fm_list->vsi_count == 0)
6396 /* A rule with the VSI being removed does not exist */
6397 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
6400 lkup_type = ICE_SW_LKUP_LAST;
6401 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6402 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6403 ice_aqc_opc_update_sw_rules,
6408 fm_list->vsi_count--;
6409 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6410 vsi_list_info = fm_list->vsi_list_info;
6411 if (fm_list->vsi_count == 1) {
6412 struct ice_fltr_info tmp_fltr;
6415 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
6417 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6420 /* Make sure VSI list is empty before removing it below */
6421 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6423 ice_aqc_opc_update_sw_rules,
6428 memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6429 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
6430 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6431 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6432 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6433 tmp_fltr.fwd_id.hw_vsi_id =
6434 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6435 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6436 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6437 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
6439 /* Update the previous switch rule of "MAC forward to VSI" to
6440 * "MAC fwd to VSI list"
6442 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6444 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6445 tmp_fltr.fwd_id.hw_vsi_id, status);
6448 fm_list->vsi_list_info->ref_cnt--;
6450 /* Remove the VSI list since it is no longer used */
6451 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6453 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
6454 vsi_list_id, status);
6458 list_del(&vsi_list_info->list_entry);
6459 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
6460 fm_list->vsi_list_info = NULL;
6467 * ice_rem_adv_rule - removes existing advanced switch rule
6468 * @hw: pointer to the hardware structure
6469 * @lkups: information on the words that needs to be looked up. All words
6470 * together makes one recipe
6471 * @lkups_cnt: num of entries in the lkups array
6472 * @rinfo: Its the pointer to the rule information for the rule
6474 * This function can be used to remove 1 rule at a time. The lkups is
6475 * used to describe all the words that forms the "lookup" portion of the
6476 * rule. These words can span multiple protocols. Callers to this function
6477 * need to pass in a list of protocol headers with lookup information along
6478 * and mask that determines which words are valid from the given protocol
6479 * header. rinfo describes other information related to this rule such as
6480 * forwarding IDs, priority of this rule, etc.
6483 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6484 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6486 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6487 struct ice_prot_lkup_ext lkup_exts;
6488 bool remove_rule = false;
6489 struct mutex *rule_lock; /* Lock to protect filter rule list */
6490 u16 i, rid, vsi_handle;
6493 memset(&lkup_exts, 0, sizeof(lkup_exts));
6494 for (i = 0; i < lkups_cnt; i++) {
6497 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6500 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6505 rid = ice_find_recp(hw, &lkup_exts, rinfo);
6506 /* If did not find a recipe that match the existing criteria */
6507 if (rid == ICE_MAX_NUM_RECIPES)
6510 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6511 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6512 /* the rule is already removed */
6515 mutex_lock(rule_lock);
6516 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6518 } else if (list_elem->vsi_count > 1) {
6519 remove_rule = false;
6520 vsi_handle = rinfo->sw_act.vsi_handle;
6521 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6523 vsi_handle = rinfo->sw_act.vsi_handle;
6524 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6526 mutex_unlock(rule_lock);
6529 if (list_elem->vsi_count == 0)
6532 mutex_unlock(rule_lock);
6534 struct ice_sw_rule_lkup_rx_tx *s_rule;
6537 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
6538 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6542 s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
6543 s_rule->hdr_len = 0;
6544 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6546 ice_aqc_opc_remove_sw_rules, NULL);
6547 if (!status || status == -ENOENT) {
6548 struct ice_switch_info *sw = hw->switch_info;
6550 mutex_lock(rule_lock);
6551 list_del(&list_elem->list_entry);
6552 devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
6553 devm_kfree(ice_hw_to_dev(hw), list_elem);
6554 mutex_unlock(rule_lock);
6555 if (list_empty(&sw->recp_list[rid].filt_rules))
6556 sw->recp_list[rid].adv_rule = false;
6564 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6565 * @hw: pointer to the hardware structure
6566 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6568 * This function is used to remove 1 rule at a time. The removal is based on
6569 * the remove_entry parameter. This function will remove rule for a given
6570 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6573 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6574 struct ice_rule_query_data *remove_entry)
6576 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6577 struct list_head *list_head;
6578 struct ice_adv_rule_info rinfo;
6579 struct ice_switch_info *sw;
6581 sw = hw->switch_info;
6582 if (!sw->recp_list[remove_entry->rid].recp_created)
6584 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6585 list_for_each_entry(list_itr, list_head, list_entry) {
6586 if (list_itr->rule_info.fltr_rule_id ==
6587 remove_entry->rule_id) {
6588 rinfo = list_itr->rule_info;
6589 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6590 return ice_rem_adv_rule(hw, list_itr->lkups,
6591 list_itr->lkups_cnt, &rinfo);
6594 /* either list is empty or unable to find rule */
6599 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6600 * @hw: pointer to the hardware structure
6601 * @vsi_handle: driver VSI handle
6602 * @list_head: list for which filters need to be replayed
6604 * Replay the advanced rule for the given VSI.
6607 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6608 struct list_head *list_head)
6610 struct ice_rule_query_data added_entry = { 0 };
6611 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6614 if (list_empty(list_head))
6616 list_for_each_entry(adv_fltr, list_head, list_entry) {
6617 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6618 u16 lk_cnt = adv_fltr->lkups_cnt;
6620 if (vsi_handle != rinfo->sw_act.vsi_handle)
6622 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6631 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6632 * @hw: pointer to the hardware structure
6633 * @vsi_handle: driver VSI handle
6635 * Replays filters for requested VSI via vsi_handle.
6637 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6639 struct ice_switch_info *sw = hw->switch_info;
6643 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6644 struct list_head *head;
6646 head = &sw->recp_list[i].filt_replay_rules;
6647 if (!sw->recp_list[i].adv_rule)
6648 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6650 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6658 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6659 * @hw: pointer to the HW struct
6661 * Deletes the filter replay rules.
6663 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6665 struct ice_switch_info *sw = hw->switch_info;
6671 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6672 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
6673 struct list_head *l_head;
6675 l_head = &sw->recp_list[i].filt_replay_rules;
6676 if (!sw->recp_list[i].adv_rule)
6677 ice_rem_sw_rule_info(hw, l_head);
6679 ice_rem_adv_rule_info(hw, l_head);