1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
5 #include "ice_switch.h"
7 #define ICE_ETH_DA_OFFSET 0
8 #define ICE_ETH_ETHTYPE_OFFSET 12
9 #define ICE_ETH_VLAN_TCI_OFFSET 14
10 #define ICE_MAX_VLAN_ID 0xFFF
11 #define ICE_IPV6_ETHER_ID 0x86DD
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14 * struct to configure any switch filter rules.
15 * {DA (6 bytes), SA(6 bytes),
16 * Ether type (2 bytes for header without VLAN tag) OR
17 * VLAN tag (4 bytes for header with VLAN tag) }
19 * Word on Hardcoded values
20 * byte 0 = 0x2: to identify it as locally administered DA MAC
21 * byte 6 = 0x2: to identify it as locally administered SA MAC
22 * byte 12 = 0x81 & byte 13 = 0x00:
23 * In case of VLAN filter first two bytes defines ether type (0x8100)
24 * and remaining two bytes are placeholder for programming a given VLAN ID
25 * In case of Ether type filter it is treated as header without VLAN tag
26 * and byte 12 and 13 is used to program a given Ether type instead
28 #define DUMMY_ETH_HDR_LEN 16
29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
33 struct ice_dummy_pkt_offsets {
34 enum ice_protocol_type type;
35 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
38 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
41 { ICE_IPV4_OFOS, 14 },
46 { ICE_PROTOCOL_LAST, 0 },
49 static const u8 dummy_gre_tcp_packet[] = {
50 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
51 0x00, 0x00, 0x00, 0x00,
52 0x00, 0x00, 0x00, 0x00,
54 0x08, 0x00, /* ICE_ETYPE_OL 12 */
56 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
57 0x00, 0x00, 0x00, 0x00,
58 0x00, 0x2F, 0x00, 0x00,
59 0x00, 0x00, 0x00, 0x00,
60 0x00, 0x00, 0x00, 0x00,
62 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
63 0x00, 0x00, 0x00, 0x00,
65 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
66 0x00, 0x00, 0x00, 0x00,
67 0x00, 0x00, 0x00, 0x00,
70 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
71 0x00, 0x00, 0x00, 0x00,
72 0x00, 0x06, 0x00, 0x00,
73 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
77 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
79 0x50, 0x02, 0x20, 0x00,
80 0x00, 0x00, 0x00, 0x00
83 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
86 { ICE_IPV4_OFOS, 14 },
91 { ICE_PROTOCOL_LAST, 0 },
94 static const u8 dummy_gre_udp_packet[] = {
95 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
99 0x08, 0x00, /* ICE_ETYPE_OL 12 */
101 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
102 0x00, 0x00, 0x00, 0x00,
103 0x00, 0x2F, 0x00, 0x00,
104 0x00, 0x00, 0x00, 0x00,
105 0x00, 0x00, 0x00, 0x00,
107 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
108 0x00, 0x00, 0x00, 0x00,
110 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
111 0x00, 0x00, 0x00, 0x00,
112 0x00, 0x00, 0x00, 0x00,
115 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
116 0x00, 0x00, 0x00, 0x00,
117 0x00, 0x11, 0x00, 0x00,
118 0x00, 0x00, 0x00, 0x00,
119 0x00, 0x00, 0x00, 0x00,
121 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
122 0x00, 0x08, 0x00, 0x00,
125 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
127 { ICE_ETYPE_OL, 12 },
128 { ICE_IPV4_OFOS, 14 },
132 { ICE_VXLAN_GPE, 42 },
136 { ICE_PROTOCOL_LAST, 0 },
139 static const u8 dummy_udp_tun_tcp_packet[] = {
140 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
141 0x00, 0x00, 0x00, 0x00,
142 0x00, 0x00, 0x00, 0x00,
144 0x08, 0x00, /* ICE_ETYPE_OL 12 */
146 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
147 0x00, 0x01, 0x00, 0x00,
148 0x40, 0x11, 0x00, 0x00,
149 0x00, 0x00, 0x00, 0x00,
150 0x00, 0x00, 0x00, 0x00,
152 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
153 0x00, 0x46, 0x00, 0x00,
155 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
156 0x00, 0x00, 0x00, 0x00,
158 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
159 0x00, 0x00, 0x00, 0x00,
160 0x00, 0x00, 0x00, 0x00,
163 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
164 0x00, 0x01, 0x00, 0x00,
165 0x40, 0x06, 0x00, 0x00,
166 0x00, 0x00, 0x00, 0x00,
167 0x00, 0x00, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
170 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
172 0x50, 0x02, 0x20, 0x00,
173 0x00, 0x00, 0x00, 0x00
176 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
178 { ICE_ETYPE_OL, 12 },
179 { ICE_IPV4_OFOS, 14 },
183 { ICE_VXLAN_GPE, 42 },
186 { ICE_UDP_ILOS, 84 },
187 { ICE_PROTOCOL_LAST, 0 },
190 static const u8 dummy_udp_tun_udp_packet[] = {
191 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
192 0x00, 0x00, 0x00, 0x00,
193 0x00, 0x00, 0x00, 0x00,
195 0x08, 0x00, /* ICE_ETYPE_OL 12 */
197 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
198 0x00, 0x01, 0x00, 0x00,
199 0x00, 0x11, 0x00, 0x00,
200 0x00, 0x00, 0x00, 0x00,
201 0x00, 0x00, 0x00, 0x00,
203 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
204 0x00, 0x3a, 0x00, 0x00,
206 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
207 0x00, 0x00, 0x00, 0x00,
209 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
210 0x00, 0x00, 0x00, 0x00,
211 0x00, 0x00, 0x00, 0x00,
214 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
215 0x00, 0x01, 0x00, 0x00,
216 0x00, 0x11, 0x00, 0x00,
217 0x00, 0x00, 0x00, 0x00,
218 0x00, 0x00, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
221 0x00, 0x08, 0x00, 0x00,
224 /* offset info for MAC + IPv4 + UDP dummy packet */
225 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
227 { ICE_ETYPE_OL, 12 },
228 { ICE_IPV4_OFOS, 14 },
229 { ICE_UDP_ILOS, 34 },
230 { ICE_PROTOCOL_LAST, 0 },
233 /* Dummy packet for MAC + IPv4 + UDP */
234 static const u8 dummy_udp_packet[] = {
235 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
236 0x00, 0x00, 0x00, 0x00,
237 0x00, 0x00, 0x00, 0x00,
239 0x08, 0x00, /* ICE_ETYPE_OL 12 */
241 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
242 0x00, 0x01, 0x00, 0x00,
243 0x00, 0x11, 0x00, 0x00,
244 0x00, 0x00, 0x00, 0x00,
245 0x00, 0x00, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
248 0x00, 0x08, 0x00, 0x00,
250 0x00, 0x00, /* 2 bytes for 4 byte alignment */
253 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
254 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
256 { ICE_VLAN_OFOS, 12 },
257 { ICE_ETYPE_OL, 16 },
258 { ICE_IPV4_OFOS, 18 },
259 { ICE_UDP_ILOS, 38 },
260 { ICE_PROTOCOL_LAST, 0 },
263 /* C-tag (801.1Q), IPv4:UDP dummy packet */
264 static const u8 dummy_vlan_udp_packet[] = {
265 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
266 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00,
269 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
271 0x08, 0x00, /* ICE_ETYPE_OL 16 */
273 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
274 0x00, 0x01, 0x00, 0x00,
275 0x00, 0x11, 0x00, 0x00,
276 0x00, 0x00, 0x00, 0x00,
277 0x00, 0x00, 0x00, 0x00,
279 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
280 0x00, 0x08, 0x00, 0x00,
282 0x00, 0x00, /* 2 bytes for 4 byte alignment */
285 /* offset info for MAC + IPv4 + TCP dummy packet */
286 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
288 { ICE_ETYPE_OL, 12 },
289 { ICE_IPV4_OFOS, 14 },
291 { ICE_PROTOCOL_LAST, 0 },
294 /* Dummy packet for MAC + IPv4 + TCP */
295 static const u8 dummy_tcp_packet[] = {
296 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
297 0x00, 0x00, 0x00, 0x00,
298 0x00, 0x00, 0x00, 0x00,
300 0x08, 0x00, /* ICE_ETYPE_OL 12 */
302 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
303 0x00, 0x01, 0x00, 0x00,
304 0x00, 0x06, 0x00, 0x00,
305 0x00, 0x00, 0x00, 0x00,
306 0x00, 0x00, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
309 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
311 0x50, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, /* 2 bytes for 4 byte alignment */
317 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
318 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
320 { ICE_VLAN_OFOS, 12 },
321 { ICE_ETYPE_OL, 16 },
322 { ICE_IPV4_OFOS, 18 },
324 { ICE_PROTOCOL_LAST, 0 },
327 /* C-tag (801.1Q), IPv4:TCP dummy packet */
328 static const u8 dummy_vlan_tcp_packet[] = {
329 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
330 0x00, 0x00, 0x00, 0x00,
331 0x00, 0x00, 0x00, 0x00,
333 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
335 0x08, 0x00, /* ICE_ETYPE_OL 16 */
337 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
338 0x00, 0x01, 0x00, 0x00,
339 0x00, 0x06, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00,
341 0x00, 0x00, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
346 0x50, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, /* 2 bytes for 4 byte alignment */
352 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
354 { ICE_ETYPE_OL, 12 },
355 { ICE_IPV6_OFOS, 14 },
357 { ICE_PROTOCOL_LAST, 0 },
360 static const u8 dummy_tcp_ipv6_packet[] = {
361 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
362 0x00, 0x00, 0x00, 0x00,
363 0x00, 0x00, 0x00, 0x00,
365 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
367 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
368 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
369 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
381 0x50, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, /* 2 bytes for 4 byte alignment */
387 /* C-tag (802.1Q): IPv6 + TCP */
388 static const struct ice_dummy_pkt_offsets
389 dummy_vlan_tcp_ipv6_packet_offsets[] = {
391 { ICE_VLAN_OFOS, 12 },
392 { ICE_ETYPE_OL, 16 },
393 { ICE_IPV6_OFOS, 18 },
395 { ICE_PROTOCOL_LAST, 0 },
398 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
399 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
400 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
401 0x00, 0x00, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00,
404 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
406 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
408 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
409 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
410 0x00, 0x00, 0x00, 0x00,
411 0x00, 0x00, 0x00, 0x00,
412 0x00, 0x00, 0x00, 0x00,
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
422 0x50, 0x00, 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x00,
425 0x00, 0x00, /* 2 bytes for 4 byte alignment */
429 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
431 { ICE_ETYPE_OL, 12 },
432 { ICE_IPV6_OFOS, 14 },
433 { ICE_UDP_ILOS, 54 },
434 { ICE_PROTOCOL_LAST, 0 },
437 /* IPv6 + UDP dummy packet */
438 static const u8 dummy_udp_ipv6_packet[] = {
439 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
440 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00,
443 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
445 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
446 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
447 0x00, 0x00, 0x00, 0x00,
448 0x00, 0x00, 0x00, 0x00,
449 0x00, 0x00, 0x00, 0x00,
450 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
457 0x00, 0x10, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
460 0x00, 0x00, 0x00, 0x00,
462 0x00, 0x00, /* 2 bytes for 4 byte alignment */
465 /* C-tag (802.1Q): IPv6 + UDP */
466 static const struct ice_dummy_pkt_offsets
467 dummy_vlan_udp_ipv6_packet_offsets[] = {
469 { ICE_VLAN_OFOS, 12 },
470 { ICE_ETYPE_OL, 16 },
471 { ICE_IPV6_OFOS, 18 },
472 { ICE_UDP_ILOS, 58 },
473 { ICE_PROTOCOL_LAST, 0 },
476 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
477 static const u8 dummy_vlan_udp_ipv6_packet[] = {
478 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
479 0x00, 0x00, 0x00, 0x00,
480 0x00, 0x00, 0x00, 0x00,
482 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
484 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
486 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
487 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
488 0x00, 0x00, 0x00, 0x00,
489 0x00, 0x00, 0x00, 0x00,
490 0x00, 0x00, 0x00, 0x00,
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
498 0x00, 0x08, 0x00, 0x00,
500 0x00, 0x00, /* 2 bytes for 4 byte alignment */
503 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
504 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
505 (DUMMY_ETH_HDR_LEN * \
506 sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
507 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
508 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
509 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
510 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
511 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
512 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
513 (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
514 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
516 /* this is a recipe to profile association bitmap */
517 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
518 ICE_MAX_NUM_PROFILES);
520 /* this is a profile to recipe association bitmap */
521 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
522 ICE_MAX_NUM_RECIPES);
525 * ice_init_def_sw_recp - initialize the recipe book keeping tables
526 * @hw: pointer to the HW struct
528 * Allocate memory for the entire recipe table and initialize the structures/
529 * entries corresponding to basic recipes.
531 int ice_init_def_sw_recp(struct ice_hw *hw)
533 struct ice_sw_recipe *recps;
536 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
537 sizeof(*recps), GFP_KERNEL);
541 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
542 recps[i].root_rid = i;
543 INIT_LIST_HEAD(&recps[i].filt_rules);
544 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
545 INIT_LIST_HEAD(&recps[i].rg_list);
546 mutex_init(&recps[i].filt_rule_lock);
549 hw->switch_info->recp_list = recps;
555 * ice_aq_get_sw_cfg - get switch configuration
556 * @hw: pointer to the hardware structure
557 * @buf: pointer to the result buffer
558 * @buf_size: length of the buffer available for response
559 * @req_desc: pointer to requested descriptor
560 * @num_elems: pointer to number of elements
561 * @cd: pointer to command details structure or NULL
563 * Get switch configuration (0x0200) to be placed in buf.
564 * This admin command returns information such as initial VSI/port number
565 * and switch ID it belongs to.
567 * NOTE: *req_desc is both an input/output parameter.
568 * The caller of this function first calls this function with *request_desc set
569 * to 0. If the response from f/w has *req_desc set to 0, all the switch
570 * configuration information has been returned; if non-zero (meaning not all
571 * the information was returned), the caller should call this function again
572 * with *req_desc set to the previous value returned by f/w to get the
573 * next block of switch configuration information.
575 * *num_elems is output only parameter. This reflects the number of elements
576 * in response buffer. The caller of this function to use *num_elems while
577 * parsing the response buffer.
580 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
581 u16 buf_size, u16 *req_desc, u16 *num_elems,
582 struct ice_sq_cd *cd)
584 struct ice_aqc_get_sw_cfg *cmd;
585 struct ice_aq_desc desc;
588 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
589 cmd = &desc.params.get_sw_conf;
590 cmd->element = cpu_to_le16(*req_desc);
592 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
594 *req_desc = le16_to_cpu(cmd->element);
595 *num_elems = le16_to_cpu(cmd->num_elems);
603 * @hw: pointer to the HW struct
604 * @vsi_ctx: pointer to a VSI context struct
605 * @cd: pointer to command details structure or NULL
607 * Add a VSI context to the hardware (0x0210)
610 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
611 struct ice_sq_cd *cd)
613 struct ice_aqc_add_update_free_vsi_resp *res;
614 struct ice_aqc_add_get_update_free_vsi *cmd;
615 struct ice_aq_desc desc;
618 cmd = &desc.params.vsi_cmd;
619 res = &desc.params.add_update_free_vsi_res;
621 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
623 if (!vsi_ctx->alloc_from_pool)
624 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
625 ICE_AQ_VSI_IS_VALID);
626 cmd->vf_id = vsi_ctx->vf_num;
628 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
630 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
632 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
633 sizeof(vsi_ctx->info), cd);
636 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
637 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
638 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
646 * @hw: pointer to the HW struct
647 * @vsi_ctx: pointer to a VSI context struct
648 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
649 * @cd: pointer to command details structure or NULL
651 * Free VSI context info from hardware (0x0213)
654 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
655 bool keep_vsi_alloc, struct ice_sq_cd *cd)
657 struct ice_aqc_add_update_free_vsi_resp *resp;
658 struct ice_aqc_add_get_update_free_vsi *cmd;
659 struct ice_aq_desc desc;
662 cmd = &desc.params.vsi_cmd;
663 resp = &desc.params.add_update_free_vsi_res;
665 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
667 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
669 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
671 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
673 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
674 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
682 * @hw: pointer to the HW struct
683 * @vsi_ctx: pointer to a VSI context struct
684 * @cd: pointer to command details structure or NULL
686 * Update VSI context in the hardware (0x0211)
689 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
690 struct ice_sq_cd *cd)
692 struct ice_aqc_add_update_free_vsi_resp *resp;
693 struct ice_aqc_add_get_update_free_vsi *cmd;
694 struct ice_aq_desc desc;
697 cmd = &desc.params.vsi_cmd;
698 resp = &desc.params.add_update_free_vsi_res;
700 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
702 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
704 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
706 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
707 sizeof(vsi_ctx->info), cd);
710 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
711 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
718 * ice_is_vsi_valid - check whether the VSI is valid or not
719 * @hw: pointer to the HW struct
720 * @vsi_handle: VSI handle
722 * check whether the VSI is valid or not
724 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
726 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
730 * ice_get_hw_vsi_num - return the HW VSI number
731 * @hw: pointer to the HW struct
732 * @vsi_handle: VSI handle
734 * return the HW VSI number
735 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
737 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
739 return hw->vsi_ctx[vsi_handle]->vsi_num;
743 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
744 * @hw: pointer to the HW struct
745 * @vsi_handle: VSI handle
747 * return the VSI context entry for a given VSI handle
749 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
751 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
755 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
756 * @hw: pointer to the HW struct
757 * @vsi_handle: VSI handle
758 * @vsi: VSI context pointer
760 * save the VSI context entry for a given VSI handle
763 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
765 hw->vsi_ctx[vsi_handle] = vsi;
769 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
770 * @hw: pointer to the HW struct
771 * @vsi_handle: VSI handle
773 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
775 struct ice_vsi_ctx *vsi;
778 vsi = ice_get_vsi_ctx(hw, vsi_handle);
781 ice_for_each_traffic_class(i) {
782 if (vsi->lan_q_ctx[i]) {
783 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
784 vsi->lan_q_ctx[i] = NULL;
786 if (vsi->rdma_q_ctx[i]) {
787 devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
788 vsi->rdma_q_ctx[i] = NULL;
794 * ice_clear_vsi_ctx - clear the VSI context entry
795 * @hw: pointer to the HW struct
796 * @vsi_handle: VSI handle
798 * clear the VSI context entry
800 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
802 struct ice_vsi_ctx *vsi;
804 vsi = ice_get_vsi_ctx(hw, vsi_handle);
806 ice_clear_vsi_q_ctx(hw, vsi_handle);
807 devm_kfree(ice_hw_to_dev(hw), vsi);
808 hw->vsi_ctx[vsi_handle] = NULL;
813 * ice_clear_all_vsi_ctx - clear all the VSI context entries
814 * @hw: pointer to the HW struct
816 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
820 for (i = 0; i < ICE_MAX_VSI; i++)
821 ice_clear_vsi_ctx(hw, i);
825 * ice_add_vsi - add VSI context to the hardware and VSI handle list
826 * @hw: pointer to the HW struct
827 * @vsi_handle: unique VSI handle provided by drivers
828 * @vsi_ctx: pointer to a VSI context struct
829 * @cd: pointer to command details structure or NULL
831 * Add a VSI context to the hardware also add it into the VSI handle list.
832 * If this function gets called after reset for existing VSIs then update
833 * with the new HW VSI number in the corresponding VSI handle list entry.
836 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
837 struct ice_sq_cd *cd)
839 struct ice_vsi_ctx *tmp_vsi_ctx;
842 if (vsi_handle >= ICE_MAX_VSI)
844 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
847 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
849 /* Create a new VSI context */
850 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
851 sizeof(*tmp_vsi_ctx), GFP_KERNEL);
853 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
856 *tmp_vsi_ctx = *vsi_ctx;
857 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
859 /* update with new HW VSI num */
860 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
867 * ice_free_vsi- free VSI context from hardware and VSI handle list
868 * @hw: pointer to the HW struct
869 * @vsi_handle: unique VSI handle
870 * @vsi_ctx: pointer to a VSI context struct
871 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
872 * @cd: pointer to command details structure or NULL
874 * Free VSI context info from hardware as well as from VSI handle list
877 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
878 bool keep_vsi_alloc, struct ice_sq_cd *cd)
882 if (!ice_is_vsi_valid(hw, vsi_handle))
884 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
885 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
887 ice_clear_vsi_ctx(hw, vsi_handle);
893 * @hw: pointer to the HW struct
894 * @vsi_handle: unique VSI handle
895 * @vsi_ctx: pointer to a VSI context struct
896 * @cd: pointer to command details structure or NULL
898 * Update VSI context in the hardware
901 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
902 struct ice_sq_cd *cd)
904 if (!ice_is_vsi_valid(hw, vsi_handle))
906 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
907 return ice_aq_update_vsi(hw, vsi_ctx, cd);
911 * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
912 * @hw: pointer to HW struct
913 * @vsi_handle: VSI SW index
914 * @enable: boolean for enable/disable
917 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
919 struct ice_vsi_ctx *ctx;
921 ctx = ice_get_vsi_ctx(hw, vsi_handle);
926 ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
928 ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
930 return ice_update_vsi(hw, vsi_handle, ctx, NULL);
934 * ice_aq_alloc_free_vsi_list
935 * @hw: pointer to the HW struct
936 * @vsi_list_id: VSI list ID returned or used for lookup
937 * @lkup_type: switch rule filter lookup type
938 * @opc: switch rules population command type - pass in the command opcode
940 * allocates or free a VSI list resource
943 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
944 enum ice_sw_lkup_type lkup_type,
945 enum ice_adminq_opc opc)
947 struct ice_aqc_alloc_free_res_elem *sw_buf;
948 struct ice_aqc_res_elem *vsi_ele;
952 buf_len = struct_size(sw_buf, elem, 1);
953 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
956 sw_buf->num_elems = cpu_to_le16(1);
958 if (lkup_type == ICE_SW_LKUP_MAC ||
959 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
960 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
961 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
962 lkup_type == ICE_SW_LKUP_PROMISC ||
963 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
964 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
965 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
967 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
970 goto ice_aq_alloc_free_vsi_list_exit;
973 if (opc == ice_aqc_opc_free_res)
974 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
976 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
978 goto ice_aq_alloc_free_vsi_list_exit;
980 if (opc == ice_aqc_opc_alloc_res) {
981 vsi_ele = &sw_buf->elem[0];
982 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
985 ice_aq_alloc_free_vsi_list_exit:
986 devm_kfree(ice_hw_to_dev(hw), sw_buf);
991 * ice_aq_sw_rules - add/update/remove switch rules
992 * @hw: pointer to the HW struct
993 * @rule_list: pointer to switch rule population list
994 * @rule_list_sz: total size of the rule list in bytes
995 * @num_rules: number of switch rules in the rule_list
996 * @opc: switch rules population command type - pass in the command opcode
997 * @cd: pointer to command details structure or NULL
999 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1002 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1003 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1005 struct ice_aq_desc desc;
1008 if (opc != ice_aqc_opc_add_sw_rules &&
1009 opc != ice_aqc_opc_update_sw_rules &&
1010 opc != ice_aqc_opc_remove_sw_rules)
1013 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1015 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1016 desc.params.sw_rules.num_rules_fltr_entry_index =
1017 cpu_to_le16(num_rules);
1018 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1019 if (opc != ice_aqc_opc_add_sw_rules &&
1020 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1027 * ice_aq_add_recipe - add switch recipe
1028 * @hw: pointer to the HW struct
1029 * @s_recipe_list: pointer to switch rule population list
1030 * @num_recipes: number of switch recipes in the list
1031 * @cd: pointer to command details structure or NULL
1036 ice_aq_add_recipe(struct ice_hw *hw,
1037 struct ice_aqc_recipe_data_elem *s_recipe_list,
1038 u16 num_recipes, struct ice_sq_cd *cd)
1040 struct ice_aqc_add_get_recipe *cmd;
1041 struct ice_aq_desc desc;
1044 cmd = &desc.params.add_get_recipe;
1045 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1047 cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1048 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1050 buf_size = num_recipes * sizeof(*s_recipe_list);
1052 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1056 * ice_aq_get_recipe - get switch recipe
1057 * @hw: pointer to the HW struct
1058 * @s_recipe_list: pointer to switch rule population list
1059 * @num_recipes: pointer to the number of recipes (input and output)
1060 * @recipe_root: root recipe number of recipe(s) to retrieve
1061 * @cd: pointer to command details structure or NULL
1065 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1066 * On output, *num_recipes will equal the number of entries returned in
1069 * The caller must supply enough space in s_recipe_list to hold all possible
1070 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1073 ice_aq_get_recipe(struct ice_hw *hw,
1074 struct ice_aqc_recipe_data_elem *s_recipe_list,
1075 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1077 struct ice_aqc_add_get_recipe *cmd;
1078 struct ice_aq_desc desc;
1082 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1085 cmd = &desc.params.add_get_recipe;
1086 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1088 cmd->return_index = cpu_to_le16(recipe_root);
1089 cmd->num_sub_recipes = 0;
1091 buf_size = *num_recipes * sizeof(*s_recipe_list);
1093 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1094 *num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1100 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1101 * @hw: pointer to the HW struct
1102 * @profile_id: package profile ID to associate the recipe with
1103 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1104 * @cd: pointer to command details structure or NULL
1105 * Recipe to profile association (0x0291)
1108 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1109 struct ice_sq_cd *cd)
1111 struct ice_aqc_recipe_to_profile *cmd;
1112 struct ice_aq_desc desc;
1114 cmd = &desc.params.recipe_to_profile;
1115 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1116 cmd->profile_id = cpu_to_le16(profile_id);
1117 /* Set the recipe ID bit in the bitmask to let the device know which
1118 * profile we are associating the recipe to
1120 memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
1122 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1126 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1127 * @hw: pointer to the HW struct
1128 * @profile_id: package profile ID to associate the recipe with
1129 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1130 * @cd: pointer to command details structure or NULL
1131 * Associate profile ID with given recipe (0x0293)
1134 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1135 struct ice_sq_cd *cd)
1137 struct ice_aqc_recipe_to_profile *cmd;
1138 struct ice_aq_desc desc;
1141 cmd = &desc.params.recipe_to_profile;
1142 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1143 cmd->profile_id = cpu_to_le16(profile_id);
1145 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1147 memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
1153 * ice_alloc_recipe - add recipe resource
1154 * @hw: pointer to the hardware structure
1155 * @rid: recipe ID returned as response to AQ call
1157 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1159 struct ice_aqc_alloc_free_res_elem *sw_buf;
1163 buf_len = struct_size(sw_buf, elem, 1);
1164 sw_buf = kzalloc(buf_len, GFP_KERNEL);
1168 sw_buf->num_elems = cpu_to_le16(1);
1169 sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
1170 ICE_AQC_RES_TYPE_S) |
1171 ICE_AQC_RES_TYPE_FLAG_SHARED);
1172 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1173 ice_aqc_opc_alloc_res, NULL);
1175 *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
1182 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1183 * @hw: pointer to hardware structure
1185 * This function is used to populate recipe_to_profile matrix where index to
1186 * this array is the recipe ID and the element is the mapping of which profiles
1187 * is this recipe mapped to.
1189 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1191 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
1194 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
1197 bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1198 bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
1199 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1201 bitmap_copy(profile_to_recipe[i], r_bitmap,
1202 ICE_MAX_NUM_RECIPES);
1203 for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
1204 set_bit(i, recipe_to_profile[j]);
1209 * ice_collect_result_idx - copy result index values
1210 * @buf: buffer that contains the result index
1211 * @recp: the recipe struct to copy data into
1214 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1215 struct ice_sw_recipe *recp)
1217 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1218 set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
1223 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1224 * @hw: pointer to hardware structure
1225 * @recps: struct that we need to populate
1226 * @rid: recipe ID that we are populating
1227 * @refresh_required: true if we should get recipe to profile mapping from FW
1229 * This function is used to populate all the necessary entries into our
1230 * bookkeeping so that we have a current list of all the recipes that are
1231 * programmed in the firmware.
1234 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1235 bool *refresh_required)
1237 DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
1238 struct ice_aqc_recipe_data_elem *tmp;
1239 u16 num_recps = ICE_MAX_NUM_RECIPES;
1240 struct ice_prot_lkup_ext *lkup_exts;
1245 bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
1247 /* we need a buffer big enough to accommodate all the recipes */
1248 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
1252 tmp[0].recipe_indx = rid;
1253 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1254 /* non-zero status meaning recipe doesn't exist */
1258 /* Get recipe to profile map so that we can get the fv from lkups that
1259 * we read for a recipe from FW. Since we want to minimize the number of
1260 * times we make this FW call, just make one call and cache the copy
1261 * until a new recipe is added. This operation is only required the
1262 * first time to get the changes from FW. Then to search existing
1263 * entries we don't need to update the cache again until another recipe
1266 if (*refresh_required) {
1267 ice_get_recp_to_prof_map(hw);
1268 *refresh_required = false;
1271 /* Start populating all the entries for recps[rid] based on lkups from
1272 * firmware. Note that we are only creating the root recipe in our
1275 lkup_exts = &recps[rid].lkup_exts;
1277 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1278 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1279 struct ice_recp_grp_entry *rg_entry;
1280 u8 i, prof, idx, prot = 0;
1284 rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
1291 idx = root_bufs.recipe_indx;
1292 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1294 /* Mark all result indices in this chain */
1295 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1296 set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
1299 /* get the first profile that is associated with rid */
1300 prof = find_first_bit(recipe_to_profile[idx],
1301 ICE_MAX_NUM_PROFILES);
1302 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1303 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1305 rg_entry->fv_idx[i] = lkup_indx;
1306 rg_entry->fv_mask[i] =
1307 le16_to_cpu(root_bufs.content.mask[i + 1]);
1309 /* If the recipe is a chained recipe then all its
1310 * child recipe's result will have a result index.
1311 * To fill fv_words we should not use those result
1312 * index, we only need the protocol ids and offsets.
1313 * We will skip all the fv_idx which stores result
1314 * index in them. We also need to skip any fv_idx which
1315 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1316 * valid offset value.
1318 if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
1319 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1320 rg_entry->fv_idx[i] == 0)
1323 ice_find_prot_off(hw, ICE_BLK_SW, prof,
1324 rg_entry->fv_idx[i], &prot, &off);
1325 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1326 lkup_exts->fv_words[fv_word_idx].off = off;
1327 lkup_exts->field_mask[fv_word_idx] =
1328 rg_entry->fv_mask[i];
1331 /* populate rg_list with the data from the child entry of this
1334 list_add(&rg_entry->l_entry, &recps[rid].rg_list);
1336 /* Propagate some data to the recipe database */
1337 recps[idx].is_root = !!is_root;
1338 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1339 bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1340 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1341 recps[idx].chain_idx = root_bufs.content.result_indx &
1342 ~ICE_AQ_RECIPE_RESULT_EN;
1343 set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1345 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1351 /* Only do the following for root recipes entries */
1352 memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1353 sizeof(recps[idx].r_bitmap));
1354 recps[idx].root_rid = root_bufs.content.rid &
1355 ~ICE_AQ_RECIPE_ID_IS_ROOT;
1356 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1359 /* Complete initialization of the root recipe entry */
1360 lkup_exts->n_val_words = fv_word_idx;
1361 recps[rid].big_recp = (num_recps > 1);
1362 recps[rid].n_grp_count = (u8)num_recps;
1363 recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
1364 recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
1366 if (!recps[rid].root_buf) {
1371 /* Copy result indexes */
1372 bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1373 recps[rid].recp_created = true;
1380 /* ice_init_port_info - Initialize port_info with switch configuration data
1381 * @pi: pointer to port_info
1382 * @vsi_port_num: VSI number or port number
1383 * @type: Type of switch element (port or VSI)
1384 * @swid: switch ID of the switch the element is attached to
1385 * @pf_vf_num: PF or VF number
1386 * @is_vf: true if the element is a VF, false otherwise
1389 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1390 u16 swid, u16 pf_vf_num, bool is_vf)
1393 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1394 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1396 pi->pf_vf_num = pf_vf_num;
1398 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1399 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1402 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
1407 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1408 * @hw: pointer to the hardware structure
1410 int ice_get_initial_sw_cfg(struct ice_hw *hw)
1412 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
1418 rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
1424 /* Multiple calls to ice_aq_get_sw_cfg may be required
1425 * to get all the switch configuration information. The need
1426 * for additional calls is indicated by ice_aq_get_sw_cfg
1427 * writing a non-zero value in req_desc
1430 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1432 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1433 &req_desc, &num_elems, NULL);
1438 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
1439 u16 pf_vf_num, swid, vsi_port_num;
1443 vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
1444 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1446 pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
1447 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1449 swid = le16_to_cpu(ele->swid);
1451 if (le16_to_cpu(ele->pf_vf_num) &
1452 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1455 res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
1456 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
1458 if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
1459 /* FW VSI is not needed. Just continue. */
1463 ice_init_port_info(hw->port_info, vsi_port_num,
1464 res_type, swid, pf_vf_num, is_vf);
1466 } while (req_desc && !status);
1468 devm_kfree(ice_hw_to_dev(hw), rbuf);
1473 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1474 * @hw: pointer to the hardware structure
1475 * @fi: filter info structure to fill/update
1477 * This helper function populates the lb_en and lan_en elements of the provided
1478 * ice_fltr_info struct using the switch's type and characteristics of the
1479 * switch rule being configured.
1481 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1485 if ((fi->flag & ICE_FLTR_TX) &&
1486 (fi->fltr_act == ICE_FWD_TO_VSI ||
1487 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1488 fi->fltr_act == ICE_FWD_TO_Q ||
1489 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1490 /* Setting LB for prune actions will result in replicated
1491 * packets to the internal switch that will be dropped.
1493 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1496 /* Set lan_en to TRUE if
1497 * 1. The switch is a VEB AND
1499 * 2.1 The lookup is a directional lookup like ethertype,
1500 * promiscuous, ethertype-MAC, promiscuous-VLAN
1501 * and default-port OR
1502 * 2.2 The lookup is VLAN, OR
1503 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1504 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1508 * The switch is a VEPA.
1510 * In all other cases, the LAN enable has to be set to false.
1513 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1514 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1515 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1516 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1517 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1518 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1519 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1520 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
1521 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1522 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
1531 * ice_fill_sw_rule - Helper function to fill switch rule structure
1532 * @hw: pointer to the hardware structure
1533 * @f_info: entry containing packet forwarding information
1534 * @s_rule: switch rule structure to be filled in based on mac_entry
1535 * @opc: switch rules population command type - pass in the command opcode
1538 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1539 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1541 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1549 if (opc == ice_aqc_opc_remove_sw_rules) {
1550 s_rule->pdata.lkup_tx_rx.act = 0;
1551 s_rule->pdata.lkup_tx_rx.index =
1552 cpu_to_le16(f_info->fltr_rule_id);
1553 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1557 eth_hdr_sz = sizeof(dummy_eth_header);
1558 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1560 /* initialize the ether header with a dummy header */
1561 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
1562 ice_fill_sw_info(hw, f_info);
1564 switch (f_info->fltr_act) {
1565 case ICE_FWD_TO_VSI:
1566 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1567 ICE_SINGLE_ACT_VSI_ID_M;
1568 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1569 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1570 ICE_SINGLE_ACT_VALID_BIT;
1572 case ICE_FWD_TO_VSI_LIST:
1573 act |= ICE_SINGLE_ACT_VSI_LIST;
1574 act |= (f_info->fwd_id.vsi_list_id <<
1575 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1576 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1577 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1578 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1579 ICE_SINGLE_ACT_VALID_BIT;
1582 act |= ICE_SINGLE_ACT_TO_Q;
1583 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1584 ICE_SINGLE_ACT_Q_INDEX_M;
1586 case ICE_DROP_PACKET:
1587 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1588 ICE_SINGLE_ACT_VALID_BIT;
1590 case ICE_FWD_TO_QGRP:
1591 q_rgn = f_info->qgrp_size > 0 ?
1592 (u8)ilog2(f_info->qgrp_size) : 0;
1593 act |= ICE_SINGLE_ACT_TO_Q;
1594 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1595 ICE_SINGLE_ACT_Q_INDEX_M;
1596 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1597 ICE_SINGLE_ACT_Q_REGION_M;
1604 act |= ICE_SINGLE_ACT_LB_ENABLE;
1606 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1608 switch (f_info->lkup_type) {
1609 case ICE_SW_LKUP_MAC:
1610 daddr = f_info->l_data.mac.mac_addr;
1612 case ICE_SW_LKUP_VLAN:
1613 vlan_id = f_info->l_data.vlan.vlan_id;
1614 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1615 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1616 act |= ICE_SINGLE_ACT_PRUNE;
1617 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1620 case ICE_SW_LKUP_ETHERTYPE_MAC:
1621 daddr = f_info->l_data.ethertype_mac.mac_addr;
1623 case ICE_SW_LKUP_ETHERTYPE:
1624 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1625 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
1627 case ICE_SW_LKUP_MAC_VLAN:
1628 daddr = f_info->l_data.mac_vlan.mac_addr;
1629 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1631 case ICE_SW_LKUP_PROMISC_VLAN:
1632 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1634 case ICE_SW_LKUP_PROMISC:
1635 daddr = f_info->l_data.mac_vlan.mac_addr;
1641 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1642 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1643 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
1645 /* Recipe set depending on lookup type */
1646 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
1647 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
1648 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
1651 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
1653 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1654 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1655 *off = cpu_to_be16(vlan_id);
1658 /* Create the switch rule with the final dummy Ethernet header */
1659 if (opc != ice_aqc_opc_update_sw_rules)
1660 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
1664 * ice_add_marker_act
1665 * @hw: pointer to the hardware structure
1666 * @m_ent: the management entry for which sw marker needs to be added
1667 * @sw_marker: sw marker to tag the Rx descriptor with
1668 * @l_id: large action resource ID
1670 * Create a large action to hold software marker and update the switch rule
1671 * entry pointed by m_ent with newly created large action
1674 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1675 u16 sw_marker, u16 l_id)
1677 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1678 /* For software marker we need 3 large actions
1679 * 1. FWD action: FWD TO VSI or VSI LIST
1680 * 2. GENERIC VALUE action to hold the profile ID
1681 * 3. GENERIC VALUE action to hold the software marker ID
1683 const u16 num_lg_acts = 3;
1690 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1693 /* Create two back-to-back switch rules and submit them to the HW using
1694 * one memory buffer:
1698 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
1699 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1700 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
1704 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
1706 /* Fill in the first switch rule i.e. large action */
1707 lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
1708 lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
1709 lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
1711 /* First action VSI forwarding or VSI list forwarding depending on how
1714 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1715 m_ent->fltr_info.fwd_id.hw_vsi_id;
1717 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1718 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
1719 if (m_ent->vsi_count > 1)
1720 act |= ICE_LG_ACT_VSI_LIST;
1721 lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
1723 /* Second action descriptor type */
1724 act = ICE_LG_ACT_GENERIC;
1726 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
1727 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
1729 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
1730 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
1732 /* Third action Marker value */
1733 act |= ICE_LG_ACT_GENERIC;
1734 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
1735 ICE_LG_ACT_GENERIC_VALUE_M;
1737 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
1739 /* call the fill switch rule to fill the lookup Tx Rx structure */
1740 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1741 ice_aqc_opc_update_sw_rules);
1743 /* Update the action to point to the large action ID */
1744 rx_tx->pdata.lkup_tx_rx.act =
1745 cpu_to_le32(ICE_SINGLE_ACT_PTR |
1746 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
1747 ICE_SINGLE_ACT_PTR_VAL_M));
1749 /* Use the filter rule ID of the previously created rule with single
1750 * act. Once the update happens, hardware will treat this as large
1753 rx_tx->pdata.lkup_tx_rx.index =
1754 cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
1756 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1757 ice_aqc_opc_update_sw_rules, NULL);
1759 m_ent->lg_act_idx = l_id;
1760 m_ent->sw_marker_id = sw_marker;
1763 devm_kfree(ice_hw_to_dev(hw), lg_act);
1768 * ice_create_vsi_list_map
1769 * @hw: pointer to the hardware structure
1770 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
1771 * @num_vsi: number of VSI handles in the array
1772 * @vsi_list_id: VSI list ID generated as part of allocate resource
1774 * Helper function to create a new entry of VSI list ID to VSI mapping
1775 * using the given VSI list ID
1777 static struct ice_vsi_list_map_info *
1778 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1781 struct ice_switch_info *sw = hw->switch_info;
1782 struct ice_vsi_list_map_info *v_map;
1785 v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
1789 v_map->vsi_list_id = vsi_list_id;
1791 for (i = 0; i < num_vsi; i++)
1792 set_bit(vsi_handle_arr[i], v_map->vsi_map);
1794 list_add(&v_map->list_entry, &sw->vsi_list_map_head);
1799 * ice_update_vsi_list_rule
1800 * @hw: pointer to the hardware structure
1801 * @vsi_handle_arr: array of VSI handles to form a VSI list
1802 * @num_vsi: number of VSI handles in the array
1803 * @vsi_list_id: VSI list ID generated as part of allocate resource
1804 * @remove: Boolean value to indicate if this is a remove action
1805 * @opc: switch rules population command type - pass in the command opcode
1806 * @lkup_type: lookup type of the filter
1808 * Call AQ command to add a new switch rule or update existing switch rule
1809 * using the given VSI list ID
1812 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1813 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
1814 enum ice_sw_lkup_type lkup_type)
1816 struct ice_aqc_sw_rules_elem *s_rule;
1825 if (lkup_type == ICE_SW_LKUP_MAC ||
1826 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1827 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1828 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1829 lkup_type == ICE_SW_LKUP_PROMISC ||
1830 lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
1831 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
1832 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
1833 else if (lkup_type == ICE_SW_LKUP_VLAN)
1834 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
1835 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
1839 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
1840 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
1843 for (i = 0; i < num_vsi; i++) {
1844 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
1848 /* AQ call requires hw_vsi_id(s) */
1849 s_rule->pdata.vsi_list.vsi[i] =
1850 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
1853 s_rule->type = cpu_to_le16(rule_type);
1854 s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
1855 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
1857 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
1860 devm_kfree(ice_hw_to_dev(hw), s_rule);
1865 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
1866 * @hw: pointer to the HW struct
1867 * @vsi_handle_arr: array of VSI handles to form a VSI list
1868 * @num_vsi: number of VSI handles in the array
1869 * @vsi_list_id: stores the ID of the VSI list to be created
1870 * @lkup_type: switch rule filter's lookup type
1873 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1874 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
1878 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
1879 ice_aqc_opc_alloc_res);
1883 /* Update the newly created VSI list to include the specified VSIs */
1884 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
1885 *vsi_list_id, false,
1886 ice_aqc_opc_add_sw_rules, lkup_type);
1890 * ice_create_pkt_fwd_rule
1891 * @hw: pointer to the hardware structure
1892 * @f_entry: entry containing packet forwarding information
1894 * Create switch rule with given filter information and add an entry
1895 * to the corresponding filter management list to track this switch rule
1899 ice_create_pkt_fwd_rule(struct ice_hw *hw,
1900 struct ice_fltr_list_entry *f_entry)
1902 struct ice_fltr_mgmt_list_entry *fm_entry;
1903 struct ice_aqc_sw_rules_elem *s_rule;
1904 enum ice_sw_lkup_type l_type;
1905 struct ice_sw_recipe *recp;
1908 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1909 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
1912 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
1916 goto ice_create_pkt_fwd_rule_exit;
1919 fm_entry->fltr_info = f_entry->fltr_info;
1921 /* Initialize all the fields for the management entry */
1922 fm_entry->vsi_count = 1;
1923 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
1924 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
1925 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
1927 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
1928 ice_aqc_opc_add_sw_rules);
1930 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1931 ice_aqc_opc_add_sw_rules, NULL);
1933 devm_kfree(ice_hw_to_dev(hw), fm_entry);
1934 goto ice_create_pkt_fwd_rule_exit;
1937 f_entry->fltr_info.fltr_rule_id =
1938 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1939 fm_entry->fltr_info.fltr_rule_id =
1940 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1942 /* The book keeping entries will get removed when base driver
1943 * calls remove filter AQ command
1945 l_type = fm_entry->fltr_info.lkup_type;
1946 recp = &hw->switch_info->recp_list[l_type];
1947 list_add(&fm_entry->list_entry, &recp->filt_rules);
1949 ice_create_pkt_fwd_rule_exit:
1950 devm_kfree(ice_hw_to_dev(hw), s_rule);
1955 * ice_update_pkt_fwd_rule
1956 * @hw: pointer to the hardware structure
1957 * @f_info: filter information for switch rule
1959 * Call AQ command to update a previously created switch rule with a
1963 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
1965 struct ice_aqc_sw_rules_elem *s_rule;
1968 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1969 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
1973 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
1975 s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
1977 /* Update switch rule with new rule set to forward VSI list */
1978 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1979 ice_aqc_opc_update_sw_rules, NULL);
1981 devm_kfree(ice_hw_to_dev(hw), s_rule);
1986 * ice_update_sw_rule_bridge_mode
1987 * @hw: pointer to the HW struct
1989 * Updates unicast switch filter rules based on VEB/VEPA mode
1991 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
1993 struct ice_switch_info *sw = hw->switch_info;
1994 struct ice_fltr_mgmt_list_entry *fm_entry;
1995 struct list_head *rule_head;
1996 struct mutex *rule_lock; /* Lock to protect filter rule list */
1999 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2000 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2002 mutex_lock(rule_lock);
2003 list_for_each_entry(fm_entry, rule_head, list_entry) {
2004 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2005 u8 *addr = fi->l_data.mac.mac_addr;
2007 /* Update unicast Tx rules to reflect the selected
2010 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2011 (fi->fltr_act == ICE_FWD_TO_VSI ||
2012 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2013 fi->fltr_act == ICE_FWD_TO_Q ||
2014 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2015 status = ice_update_pkt_fwd_rule(hw, fi);
2021 mutex_unlock(rule_lock);
2027 * ice_add_update_vsi_list
2028 * @hw: pointer to the hardware structure
2029 * @m_entry: pointer to current filter management list entry
2030 * @cur_fltr: filter information from the book keeping entry
2031 * @new_fltr: filter information with the new VSI to be added
2033 * Call AQ command to add or update previously created VSI list with new VSI.
2035 * Helper function to do book keeping associated with adding filter information
2036 * The algorithm to do the book keeping is described below :
2037 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2038 * if only one VSI has been added till now
2039 * Allocate a new VSI list and add two VSIs
2040 * to this list using switch rule command
2041 * Update the previously created switch rule with the
2042 * newly created VSI list ID
2043 * if a VSI list was previously created
2044 * Add the new VSI to the previously created VSI list set
2045 * using the update switch rule command
2048 ice_add_update_vsi_list(struct ice_hw *hw,
2049 struct ice_fltr_mgmt_list_entry *m_entry,
2050 struct ice_fltr_info *cur_fltr,
2051 struct ice_fltr_info *new_fltr)
2053 u16 vsi_list_id = 0;
2056 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2057 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2060 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2061 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2062 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2063 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2066 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2067 /* Only one entry existed in the mapping and it was not already
2068 * a part of a VSI list. So, create a VSI list with the old and
2071 struct ice_fltr_info tmp_fltr;
2072 u16 vsi_handle_arr[2];
2074 /* A rule already exists with the new VSI being added */
2075 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2078 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2079 vsi_handle_arr[1] = new_fltr->vsi_handle;
2080 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2082 new_fltr->lkup_type);
2086 tmp_fltr = *new_fltr;
2087 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2088 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2089 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2090 /* Update the previous switch rule of "MAC forward to VSI" to
2091 * "MAC fwd to VSI list"
2093 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2097 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2098 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2099 m_entry->vsi_list_info =
2100 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2103 if (!m_entry->vsi_list_info)
2106 /* If this entry was large action then the large action needs
2107 * to be updated to point to FWD to VSI list
2109 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2111 ice_add_marker_act(hw, m_entry,
2112 m_entry->sw_marker_id,
2113 m_entry->lg_act_idx);
2115 u16 vsi_handle = new_fltr->vsi_handle;
2116 enum ice_adminq_opc opcode;
2118 if (!m_entry->vsi_list_info)
2121 /* A rule already exists with the new VSI being added */
2122 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
2125 /* Update the previously created VSI list set with
2126 * the new VSI ID passed in
2128 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2129 opcode = ice_aqc_opc_update_sw_rules;
2131 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2132 vsi_list_id, false, opcode,
2133 new_fltr->lkup_type);
2134 /* update VSI list mapping info with new VSI ID */
2136 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
2139 m_entry->vsi_count++;
2144 * ice_find_rule_entry - Search a rule entry
2145 * @hw: pointer to the hardware structure
2146 * @recp_id: lookup type for which the specified rule needs to be searched
2147 * @f_info: rule information
2149 * Helper function to search for a given rule entry
2150 * Returns pointer to entry storing the rule if found
2152 static struct ice_fltr_mgmt_list_entry *
2153 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2155 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2156 struct ice_switch_info *sw = hw->switch_info;
2157 struct list_head *list_head;
2159 list_head = &sw->recp_list[recp_id].filt_rules;
2160 list_for_each_entry(list_itr, list_head, list_entry) {
2161 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2162 sizeof(f_info->l_data)) &&
2163 f_info->flag == list_itr->fltr_info.flag) {
2172 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2173 * @hw: pointer to the hardware structure
2174 * @recp_id: lookup type for which VSI lists needs to be searched
2175 * @vsi_handle: VSI handle to be found in VSI list
2176 * @vsi_list_id: VSI list ID found containing vsi_handle
2178 * Helper function to search a VSI list with single entry containing given VSI
2179 * handle element. This can be extended further to search VSI list with more
2180 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2182 static struct ice_vsi_list_map_info *
2183 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2186 struct ice_vsi_list_map_info *map_info = NULL;
2187 struct ice_switch_info *sw = hw->switch_info;
2188 struct ice_fltr_mgmt_list_entry *list_itr;
2189 struct list_head *list_head;
2191 list_head = &sw->recp_list[recp_id].filt_rules;
2192 list_for_each_entry(list_itr, list_head, list_entry) {
2193 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
2194 map_info = list_itr->vsi_list_info;
2195 if (test_bit(vsi_handle, map_info->vsi_map)) {
2196 *vsi_list_id = map_info->vsi_list_id;
2205 * ice_add_rule_internal - add rule for a given lookup type
2206 * @hw: pointer to the hardware structure
2207 * @recp_id: lookup type (recipe ID) for which rule has to be added
2208 * @f_entry: structure containing MAC forwarding information
2210 * Adds or updates the rule lists for a given recipe
2213 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2214 struct ice_fltr_list_entry *f_entry)
2216 struct ice_switch_info *sw = hw->switch_info;
2217 struct ice_fltr_info *new_fltr, *cur_fltr;
2218 struct ice_fltr_mgmt_list_entry *m_entry;
2219 struct mutex *rule_lock; /* Lock to protect filter rule list */
2222 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2224 f_entry->fltr_info.fwd_id.hw_vsi_id =
2225 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2227 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2229 mutex_lock(rule_lock);
2230 new_fltr = &f_entry->fltr_info;
2231 if (new_fltr->flag & ICE_FLTR_RX)
2232 new_fltr->src = hw->port_info->lport;
2233 else if (new_fltr->flag & ICE_FLTR_TX)
2234 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
2236 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2238 mutex_unlock(rule_lock);
2239 return ice_create_pkt_fwd_rule(hw, f_entry);
2242 cur_fltr = &m_entry->fltr_info;
2243 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2244 mutex_unlock(rule_lock);
2250 * ice_remove_vsi_list_rule
2251 * @hw: pointer to the hardware structure
2252 * @vsi_list_id: VSI list ID generated as part of allocate resource
2253 * @lkup_type: switch rule filter lookup type
2255 * The VSI list should be emptied before this function is called to remove the
2259 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2260 enum ice_sw_lkup_type lkup_type)
2262 struct ice_aqc_sw_rules_elem *s_rule;
2266 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2267 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2271 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2272 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
2274 /* Free the vsi_list resource that we allocated. It is assumed that the
2275 * list is empty at this point.
2277 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2278 ice_aqc_opc_free_res);
2280 devm_kfree(ice_hw_to_dev(hw), s_rule);
2285 * ice_rem_update_vsi_list
2286 * @hw: pointer to the hardware structure
2287 * @vsi_handle: VSI handle of the VSI to remove
2288 * @fm_list: filter management entry for which the VSI list management needs to
2292 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2293 struct ice_fltr_mgmt_list_entry *fm_list)
2295 enum ice_sw_lkup_type lkup_type;
2299 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2300 fm_list->vsi_count == 0)
2303 /* A rule with the VSI being removed does not exist */
2304 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
2307 lkup_type = fm_list->fltr_info.lkup_type;
2308 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2309 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2310 ice_aqc_opc_update_sw_rules,
2315 fm_list->vsi_count--;
2316 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2318 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2319 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2320 struct ice_vsi_list_map_info *vsi_list_info =
2321 fm_list->vsi_list_info;
2324 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
2326 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2329 /* Make sure VSI list is empty before removing it below */
2330 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2332 ice_aqc_opc_update_sw_rules,
2337 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2338 tmp_fltr_info.fwd_id.hw_vsi_id =
2339 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2340 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2341 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2343 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2344 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2348 fm_list->fltr_info = tmp_fltr_info;
2351 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2352 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2353 struct ice_vsi_list_map_info *vsi_list_info =
2354 fm_list->vsi_list_info;
2356 /* Remove the VSI list since it is no longer used */
2357 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2359 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
2360 vsi_list_id, status);
2364 list_del(&vsi_list_info->list_entry);
2365 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
2366 fm_list->vsi_list_info = NULL;
2373 * ice_remove_rule_internal - Remove a filter rule of a given type
2374 * @hw: pointer to the hardware structure
2375 * @recp_id: recipe ID for which the rule needs to removed
2376 * @f_entry: rule entry containing filter information
2379 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2380 struct ice_fltr_list_entry *f_entry)
2382 struct ice_switch_info *sw = hw->switch_info;
2383 struct ice_fltr_mgmt_list_entry *list_elem;
2384 struct mutex *rule_lock; /* Lock to protect filter rule list */
2385 bool remove_rule = false;
2389 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2391 f_entry->fltr_info.fwd_id.hw_vsi_id =
2392 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2394 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2395 mutex_lock(rule_lock);
2396 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2402 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2404 } else if (!list_elem->vsi_list_info) {
2407 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2408 /* a ref_cnt > 1 indicates that the vsi_list is being
2409 * shared by multiple rules. Decrement the ref_cnt and
2410 * remove this rule, but do not modify the list, as it
2411 * is in-use by other rules.
2413 list_elem->vsi_list_info->ref_cnt--;
2416 /* a ref_cnt of 1 indicates the vsi_list is only used
2417 * by one rule. However, the original removal request is only
2418 * for a single VSI. Update the vsi_list first, and only
2419 * remove the rule if there are no further VSIs in this list.
2421 vsi_handle = f_entry->fltr_info.vsi_handle;
2422 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2425 /* if VSI count goes to zero after updating the VSI list */
2426 if (list_elem->vsi_count == 0)
2431 /* Remove the lookup rule */
2432 struct ice_aqc_sw_rules_elem *s_rule;
2434 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2435 ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
2442 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2443 ice_aqc_opc_remove_sw_rules);
2445 status = ice_aq_sw_rules(hw, s_rule,
2446 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2447 ice_aqc_opc_remove_sw_rules, NULL);
2449 /* Remove a book keeping from the list */
2450 devm_kfree(ice_hw_to_dev(hw), s_rule);
2455 list_del(&list_elem->list_entry);
2456 devm_kfree(ice_hw_to_dev(hw), list_elem);
2459 mutex_unlock(rule_lock);
2464 * ice_mac_fltr_exist - does this MAC filter exist for given VSI
2465 * @hw: pointer to the hardware structure
2466 * @mac: MAC address to be checked (for MAC filter)
2467 * @vsi_handle: check MAC filter for this VSI
2469 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
2471 struct ice_fltr_mgmt_list_entry *entry;
2472 struct list_head *rule_head;
2473 struct ice_switch_info *sw;
2474 struct mutex *rule_lock; /* Lock to protect filter rule list */
2477 if (!ice_is_vsi_valid(hw, vsi_handle))
2480 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2481 sw = hw->switch_info;
2482 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2486 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2487 mutex_lock(rule_lock);
2488 list_for_each_entry(entry, rule_head, list_entry) {
2489 struct ice_fltr_info *f_info = &entry->fltr_info;
2490 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2492 if (is_zero_ether_addr(mac_addr))
2495 if (f_info->flag != ICE_FLTR_TX ||
2496 f_info->src_id != ICE_SRC_ID_VSI ||
2497 f_info->lkup_type != ICE_SW_LKUP_MAC ||
2498 f_info->fltr_act != ICE_FWD_TO_VSI ||
2499 hw_vsi_id != f_info->fwd_id.hw_vsi_id)
2502 if (ether_addr_equal(mac, mac_addr)) {
2503 mutex_unlock(rule_lock);
2507 mutex_unlock(rule_lock);
2512 * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
2513 * @hw: pointer to the hardware structure
2515 * @vsi_handle: check MAC filter for this VSI
2517 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
2519 struct ice_fltr_mgmt_list_entry *entry;
2520 struct list_head *rule_head;
2521 struct ice_switch_info *sw;
2522 struct mutex *rule_lock; /* Lock to protect filter rule list */
2525 if (vlan_id > ICE_MAX_VLAN_ID)
2528 if (!ice_is_vsi_valid(hw, vsi_handle))
2531 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2532 sw = hw->switch_info;
2533 rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
2537 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2538 mutex_lock(rule_lock);
2539 list_for_each_entry(entry, rule_head, list_entry) {
2540 struct ice_fltr_info *f_info = &entry->fltr_info;
2541 u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
2542 struct ice_vsi_list_map_info *map_info;
2544 if (entry_vlan_id > ICE_MAX_VLAN_ID)
2547 if (f_info->flag != ICE_FLTR_TX ||
2548 f_info->src_id != ICE_SRC_ID_VSI ||
2549 f_info->lkup_type != ICE_SW_LKUP_VLAN)
2552 /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
2553 if (f_info->fltr_act != ICE_FWD_TO_VSI &&
2554 f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
2557 if (f_info->fltr_act == ICE_FWD_TO_VSI) {
2558 if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
2560 } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2561 /* If filter_action is FWD_TO_VSI_LIST, make sure
2562 * that VSI being checked is part of VSI list
2564 if (entry->vsi_count == 1 &&
2565 entry->vsi_list_info) {
2566 map_info = entry->vsi_list_info;
2567 if (!test_bit(vsi_handle, map_info->vsi_map))
2572 if (vlan_id == entry_vlan_id) {
2573 mutex_unlock(rule_lock);
2577 mutex_unlock(rule_lock);
2583 * ice_add_mac - Add a MAC address based filter rule
2584 * @hw: pointer to the hardware structure
2585 * @m_list: list of MAC addresses and forwarding information
2587 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2588 * multiple unicast addresses, the function assumes that all the
2589 * addresses are unique in a given add_mac call. It doesn't
2590 * check for duplicates in this case, removing duplicates from a given
2591 * list should be taken care of in the caller of this function.
2593 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
2595 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2596 struct ice_fltr_list_entry *m_list_itr;
2597 struct list_head *rule_head;
2598 u16 total_elem_left, s_rule_size;
2599 struct ice_switch_info *sw;
2600 struct mutex *rule_lock; /* Lock to protect filter rule list */
2601 u16 num_unicast = 0;
2609 sw = hw->switch_info;
2610 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2611 list_for_each_entry(m_list_itr, m_list, list_entry) {
2612 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
2616 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
2617 vsi_handle = m_list_itr->fltr_info.vsi_handle;
2618 if (!ice_is_vsi_valid(hw, vsi_handle))
2620 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2621 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
2622 /* update the src in case it is VSI num */
2623 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
2625 m_list_itr->fltr_info.src = hw_vsi_id;
2626 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
2627 is_zero_ether_addr(add))
2629 if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
2630 /* Don't overwrite the unicast address */
2631 mutex_lock(rule_lock);
2632 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
2633 &m_list_itr->fltr_info)) {
2634 mutex_unlock(rule_lock);
2637 mutex_unlock(rule_lock);
2639 } else if (is_multicast_ether_addr(add) ||
2640 (is_unicast_ether_addr(add) && hw->ucast_shared)) {
2641 m_list_itr->status =
2642 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
2644 if (m_list_itr->status)
2645 return m_list_itr->status;
2649 mutex_lock(rule_lock);
2650 /* Exit if no suitable entries were found for adding bulk switch rule */
2653 goto ice_add_mac_exit;
2656 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2658 /* Allocate switch rule buffer for the bulk update for unicast */
2659 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2660 s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
2664 goto ice_add_mac_exit;
2668 list_for_each_entry(m_list_itr, m_list, list_entry) {
2669 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2670 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2672 if (is_unicast_ether_addr(mac_addr)) {
2673 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
2674 ice_aqc_opc_add_sw_rules);
2675 r_iter = (struct ice_aqc_sw_rules_elem *)
2676 ((u8 *)r_iter + s_rule_size);
2680 /* Call AQ bulk switch rule update for all unicast addresses */
2682 /* Call AQ switch rule in AQ_MAX chunk */
2683 for (total_elem_left = num_unicast; total_elem_left > 0;
2684 total_elem_left -= elem_sent) {
2685 struct ice_aqc_sw_rules_elem *entry = r_iter;
2687 elem_sent = min_t(u8, total_elem_left,
2688 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
2689 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
2690 elem_sent, ice_aqc_opc_add_sw_rules,
2693 goto ice_add_mac_exit;
2694 r_iter = (struct ice_aqc_sw_rules_elem *)
2695 ((u8 *)r_iter + (elem_sent * s_rule_size));
2698 /* Fill up rule ID based on the value returned from FW */
2700 list_for_each_entry(m_list_itr, m_list, list_entry) {
2701 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2702 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2703 struct ice_fltr_mgmt_list_entry *fm_entry;
2705 if (is_unicast_ether_addr(mac_addr)) {
2706 f_info->fltr_rule_id =
2707 le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
2708 f_info->fltr_act = ICE_FWD_TO_VSI;
2709 /* Create an entry to track this MAC address */
2710 fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
2711 sizeof(*fm_entry), GFP_KERNEL);
2714 goto ice_add_mac_exit;
2716 fm_entry->fltr_info = *f_info;
2717 fm_entry->vsi_count = 1;
2718 /* The book keeping entries will get removed when
2719 * base driver calls remove filter AQ command
2722 list_add(&fm_entry->list_entry, rule_head);
2723 r_iter = (struct ice_aqc_sw_rules_elem *)
2724 ((u8 *)r_iter + s_rule_size);
2729 mutex_unlock(rule_lock);
2731 devm_kfree(ice_hw_to_dev(hw), s_rule);
2736 * ice_add_vlan_internal - Add one VLAN based filter rule
2737 * @hw: pointer to the hardware structure
2738 * @f_entry: filter entry containing one VLAN information
2741 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
2743 struct ice_switch_info *sw = hw->switch_info;
2744 struct ice_fltr_mgmt_list_entry *v_list_itr;
2745 struct ice_fltr_info *new_fltr, *cur_fltr;
2746 enum ice_sw_lkup_type lkup_type;
2747 u16 vsi_list_id = 0, vsi_handle;
2748 struct mutex *rule_lock; /* Lock to protect filter rule list */
2751 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2754 f_entry->fltr_info.fwd_id.hw_vsi_id =
2755 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2756 new_fltr = &f_entry->fltr_info;
2758 /* VLAN ID should only be 12 bits */
2759 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
2762 if (new_fltr->src_id != ICE_SRC_ID_VSI)
2765 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
2766 lkup_type = new_fltr->lkup_type;
2767 vsi_handle = new_fltr->vsi_handle;
2768 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2769 mutex_lock(rule_lock);
2770 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
2772 struct ice_vsi_list_map_info *map_info = NULL;
2774 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
2775 /* All VLAN pruning rules use a VSI list. Check if
2776 * there is already a VSI list containing VSI that we
2777 * want to add. If found, use the same vsi_list_id for
2778 * this new VLAN rule or else create a new list.
2780 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
2784 status = ice_create_vsi_list_rule(hw,
2792 /* Convert the action to forwarding to a VSI list. */
2793 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2794 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
2797 status = ice_create_pkt_fwd_rule(hw, f_entry);
2799 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
2805 /* reuse VSI list for new rule and increment ref_cnt */
2807 v_list_itr->vsi_list_info = map_info;
2808 map_info->ref_cnt++;
2810 v_list_itr->vsi_list_info =
2811 ice_create_vsi_list_map(hw, &vsi_handle,
2815 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
2816 /* Update existing VSI list to add new VSI ID only if it used
2819 cur_fltr = &v_list_itr->fltr_info;
2820 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
2823 /* If VLAN rule exists and VSI list being used by this rule is
2824 * referenced by more than 1 VLAN rule. Then create a new VSI
2825 * list appending previous VSI with new VSI and update existing
2826 * VLAN rule to point to new VSI list ID
2828 struct ice_fltr_info tmp_fltr;
2829 u16 vsi_handle_arr[2];
2832 /* Current implementation only supports reusing VSI list with
2833 * one VSI count. We should never hit below condition
2835 if (v_list_itr->vsi_count > 1 &&
2836 v_list_itr->vsi_list_info->ref_cnt > 1) {
2837 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
2843 find_first_bit(v_list_itr->vsi_list_info->vsi_map,
2846 /* A rule already exists with the new VSI being added */
2847 if (cur_handle == vsi_handle) {
2852 vsi_handle_arr[0] = cur_handle;
2853 vsi_handle_arr[1] = vsi_handle;
2854 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2855 &vsi_list_id, lkup_type);
2859 tmp_fltr = v_list_itr->fltr_info;
2860 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
2861 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2862 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2863 /* Update the previous switch rule to a new VSI list which
2864 * includes current VSI that is requested
2866 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2870 /* before overriding VSI list map info. decrement ref_cnt of
2873 v_list_itr->vsi_list_info->ref_cnt--;
2875 /* now update to newly created list */
2876 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
2877 v_list_itr->vsi_list_info =
2878 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2880 v_list_itr->vsi_count++;
2884 mutex_unlock(rule_lock);
2889 * ice_add_vlan - Add VLAN based filter rule
2890 * @hw: pointer to the hardware structure
2891 * @v_list: list of VLAN entries and forwarding information
2893 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
2895 struct ice_fltr_list_entry *v_list_itr;
2900 list_for_each_entry(v_list_itr, v_list, list_entry) {
2901 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
2903 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
2904 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
2905 if (v_list_itr->status)
2906 return v_list_itr->status;
2912 * ice_add_eth_mac - Add ethertype and MAC based filter rule
2913 * @hw: pointer to the hardware structure
2914 * @em_list: list of ether type MAC filter, MAC is optional
2916 * This function requires the caller to populate the entries in
2917 * the filter list with the necessary fields (including flags to
2918 * indicate Tx or Rx rules).
2920 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
2922 struct ice_fltr_list_entry *em_list_itr;
2924 if (!em_list || !hw)
2927 list_for_each_entry(em_list_itr, em_list, list_entry) {
2928 enum ice_sw_lkup_type l_type =
2929 em_list_itr->fltr_info.lkup_type;
2931 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
2932 l_type != ICE_SW_LKUP_ETHERTYPE)
2935 em_list_itr->status = ice_add_rule_internal(hw, l_type,
2937 if (em_list_itr->status)
2938 return em_list_itr->status;
2944 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
2945 * @hw: pointer to the hardware structure
2946 * @em_list: list of ethertype or ethertype MAC entries
2948 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
2950 struct ice_fltr_list_entry *em_list_itr, *tmp;
2952 if (!em_list || !hw)
2955 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
2956 enum ice_sw_lkup_type l_type =
2957 em_list_itr->fltr_info.lkup_type;
2959 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
2960 l_type != ICE_SW_LKUP_ETHERTYPE)
2963 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
2965 if (em_list_itr->status)
2966 return em_list_itr->status;
2972 * ice_rem_sw_rule_info
2973 * @hw: pointer to the hardware structure
2974 * @rule_head: pointer to the switch list structure that we want to delete
2977 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
2979 if (!list_empty(rule_head)) {
2980 struct ice_fltr_mgmt_list_entry *entry;
2981 struct ice_fltr_mgmt_list_entry *tmp;
2983 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
2984 list_del(&entry->list_entry);
2985 devm_kfree(ice_hw_to_dev(hw), entry);
2991 * ice_rem_adv_rule_info
2992 * @hw: pointer to the hardware structure
2993 * @rule_head: pointer to the switch list structure that we want to delete
2996 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
2998 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
2999 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3001 if (list_empty(rule_head))
3004 list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3005 list_del(&lst_itr->list_entry);
3006 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3007 devm_kfree(ice_hw_to_dev(hw), lst_itr);
3012 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3013 * @hw: pointer to the hardware structure
3014 * @vsi_handle: VSI handle to set as default
3015 * @set: true to add the above mentioned switch rule, false to remove it
3016 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3018 * add filter rule to set/unset given VSI as default VSI for the switch
3019 * (represented by swid)
3021 int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
3023 struct ice_aqc_sw_rules_elem *s_rule;
3024 struct ice_fltr_info f_info;
3025 enum ice_adminq_opc opcode;
3030 if (!ice_is_vsi_valid(hw, vsi_handle))
3032 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3034 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3035 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3037 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3041 memset(&f_info, 0, sizeof(f_info));
3043 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3044 f_info.flag = direction;
3045 f_info.fltr_act = ICE_FWD_TO_VSI;
3046 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3048 if (f_info.flag & ICE_FLTR_RX) {
3049 f_info.src = hw->port_info->lport;
3050 f_info.src_id = ICE_SRC_ID_LPORT;
3052 f_info.fltr_rule_id =
3053 hw->port_info->dflt_rx_vsi_rule_id;
3054 } else if (f_info.flag & ICE_FLTR_TX) {
3055 f_info.src_id = ICE_SRC_ID_VSI;
3056 f_info.src = hw_vsi_id;
3058 f_info.fltr_rule_id =
3059 hw->port_info->dflt_tx_vsi_rule_id;
3063 opcode = ice_aqc_opc_add_sw_rules;
3065 opcode = ice_aqc_opc_remove_sw_rules;
3067 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3069 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3070 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3073 u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
3075 if (f_info.flag & ICE_FLTR_TX) {
3076 hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
3077 hw->port_info->dflt_tx_vsi_rule_id = index;
3078 } else if (f_info.flag & ICE_FLTR_RX) {
3079 hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
3080 hw->port_info->dflt_rx_vsi_rule_id = index;
3083 if (f_info.flag & ICE_FLTR_TX) {
3084 hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3085 hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3086 } else if (f_info.flag & ICE_FLTR_RX) {
3087 hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3088 hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3093 devm_kfree(ice_hw_to_dev(hw), s_rule);
3098 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3099 * @hw: pointer to the hardware structure
3100 * @recp_id: lookup type for which the specified rule needs to be searched
3101 * @f_info: rule information
3103 * Helper function to search for a unicast rule entry - this is to be used
3104 * to remove unicast MAC filter that is not shared with other VSIs on the
3107 * Returns pointer to entry storing the rule if found
3109 static struct ice_fltr_mgmt_list_entry *
3110 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3111 struct ice_fltr_info *f_info)
3113 struct ice_switch_info *sw = hw->switch_info;
3114 struct ice_fltr_mgmt_list_entry *list_itr;
3115 struct list_head *list_head;
3117 list_head = &sw->recp_list[recp_id].filt_rules;
3118 list_for_each_entry(list_itr, list_head, list_entry) {
3119 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3120 sizeof(f_info->l_data)) &&
3121 f_info->fwd_id.hw_vsi_id ==
3122 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3123 f_info->flag == list_itr->fltr_info.flag)
3130 * ice_remove_mac - remove a MAC address based filter rule
3131 * @hw: pointer to the hardware structure
3132 * @m_list: list of MAC addresses and forwarding information
3134 * This function removes either a MAC filter rule or a specific VSI from a
3135 * VSI list for a multicast MAC address.
3137 * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
3138 * be aware that this call will only work if all the entries passed into m_list
3139 * were added previously. It will not attempt to do a partial remove of entries
3142 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
3144 struct ice_fltr_list_entry *list_itr, *tmp;
3145 struct mutex *rule_lock; /* Lock to protect filter rule list */
3150 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3151 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
3152 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3153 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3156 if (l_type != ICE_SW_LKUP_MAC)
3159 vsi_handle = list_itr->fltr_info.vsi_handle;
3160 if (!ice_is_vsi_valid(hw, vsi_handle))
3163 list_itr->fltr_info.fwd_id.hw_vsi_id =
3164 ice_get_hw_vsi_num(hw, vsi_handle);
3165 if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
3166 /* Don't remove the unicast address that belongs to
3167 * another VSI on the switch, since it is not being
3170 mutex_lock(rule_lock);
3171 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3172 &list_itr->fltr_info)) {
3173 mutex_unlock(rule_lock);
3176 mutex_unlock(rule_lock);
3178 list_itr->status = ice_remove_rule_internal(hw,
3181 if (list_itr->status)
3182 return list_itr->status;
3188 * ice_remove_vlan - Remove VLAN based filter rule
3189 * @hw: pointer to the hardware structure
3190 * @v_list: list of VLAN entries and forwarding information
3192 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
3194 struct ice_fltr_list_entry *v_list_itr, *tmp;
3199 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
3200 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3202 if (l_type != ICE_SW_LKUP_VLAN)
3204 v_list_itr->status = ice_remove_rule_internal(hw,
3207 if (v_list_itr->status)
3208 return v_list_itr->status;
3214 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3215 * @fm_entry: filter entry to inspect
3216 * @vsi_handle: VSI handle to compare with filter info
3219 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3221 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3222 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3223 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3224 fm_entry->vsi_list_info &&
3225 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
3229 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3230 * @hw: pointer to the hardware structure
3231 * @vsi_handle: VSI handle to remove filters from
3232 * @vsi_list_head: pointer to the list to add entry to
3233 * @fi: pointer to fltr_info of filter entry to copy & add
3235 * Helper function, used when creating a list of filters to remove from
3236 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3237 * original filter entry, with the exception of fltr_info.fltr_act and
3238 * fltr_info.fwd_id fields. These are set such that later logic can
3239 * extract which VSI to remove the fltr from, and pass on that information.
3242 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3243 struct list_head *vsi_list_head,
3244 struct ice_fltr_info *fi)
3246 struct ice_fltr_list_entry *tmp;
3248 /* this memory is freed up in the caller function
3249 * once filters for this VSI are removed
3251 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
3255 tmp->fltr_info = *fi;
3257 /* Overwrite these fields to indicate which VSI to remove filter from,
3258 * so find and remove logic can extract the information from the
3259 * list entries. Note that original entries will still have proper
3262 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3263 tmp->fltr_info.vsi_handle = vsi_handle;
3264 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3266 list_add(&tmp->list_entry, vsi_list_head);
3272 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3273 * @hw: pointer to the hardware structure
3274 * @vsi_handle: VSI handle to remove filters from
3275 * @lkup_list_head: pointer to the list that has certain lookup type filters
3276 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3278 * Locates all filters in lkup_list_head that are used by the given VSI,
3279 * and adds COPIES of those entries to vsi_list_head (intended to be used
3280 * to remove the listed filters).
3281 * Note that this means all entries in vsi_list_head must be explicitly
3282 * deallocated by the caller when done with list.
3285 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3286 struct list_head *lkup_list_head,
3287 struct list_head *vsi_list_head)
3289 struct ice_fltr_mgmt_list_entry *fm_entry;
3292 /* check to make sure VSI ID is valid and within boundary */
3293 if (!ice_is_vsi_valid(hw, vsi_handle))
3296 list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
3297 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
3300 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3302 &fm_entry->fltr_info);
3310 * ice_determine_promisc_mask
3311 * @fi: filter info to parse
3313 * Helper function to determine which ICE_PROMISC_ mask corresponds
3314 * to given filter into.
3316 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3318 u16 vid = fi->l_data.mac_vlan.vlan_id;
3319 u8 *macaddr = fi->l_data.mac.mac_addr;
3320 bool is_tx_fltr = false;
3321 u8 promisc_mask = 0;
3323 if (fi->flag == ICE_FLTR_TX)
3326 if (is_broadcast_ether_addr(macaddr))
3327 promisc_mask |= is_tx_fltr ?
3328 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3329 else if (is_multicast_ether_addr(macaddr))
3330 promisc_mask |= is_tx_fltr ?
3331 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3332 else if (is_unicast_ether_addr(macaddr))
3333 promisc_mask |= is_tx_fltr ?
3334 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3336 promisc_mask |= is_tx_fltr ?
3337 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3339 return promisc_mask;
3343 * ice_remove_promisc - Remove promisc based filter rules
3344 * @hw: pointer to the hardware structure
3345 * @recp_id: recipe ID for which the rule needs to removed
3346 * @v_list: list of promisc entries
3349 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
3351 struct ice_fltr_list_entry *v_list_itr, *tmp;
3353 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
3354 v_list_itr->status =
3355 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3356 if (v_list_itr->status)
3357 return v_list_itr->status;
3363 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3364 * @hw: pointer to the hardware structure
3365 * @vsi_handle: VSI handle to clear mode
3366 * @promisc_mask: mask of promiscuous config bits to clear
3367 * @vid: VLAN ID to clear VLAN promiscuous
3370 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3373 struct ice_switch_info *sw = hw->switch_info;
3374 struct ice_fltr_list_entry *fm_entry, *tmp;
3375 struct list_head remove_list_head;
3376 struct ice_fltr_mgmt_list_entry *itr;
3377 struct list_head *rule_head;
3378 struct mutex *rule_lock; /* Lock to protect filter rule list */
3382 if (!ice_is_vsi_valid(hw, vsi_handle))
3385 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
3386 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3388 recipe_id = ICE_SW_LKUP_PROMISC;
3390 rule_head = &sw->recp_list[recipe_id].filt_rules;
3391 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3393 INIT_LIST_HEAD(&remove_list_head);
3395 mutex_lock(rule_lock);
3396 list_for_each_entry(itr, rule_head, list_entry) {
3397 struct ice_fltr_info *fltr_info;
3398 u8 fltr_promisc_mask = 0;
3400 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3402 fltr_info = &itr->fltr_info;
3404 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
3405 vid != fltr_info->l_data.mac_vlan.vlan_id)
3408 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
3410 /* Skip if filter is not completely specified by given mask */
3411 if (fltr_promisc_mask & ~promisc_mask)
3414 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3418 mutex_unlock(rule_lock);
3419 goto free_fltr_list;
3422 mutex_unlock(rule_lock);
3424 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
3427 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
3428 list_del(&fm_entry->list_entry);
3429 devm_kfree(ice_hw_to_dev(hw), fm_entry);
3436 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
3437 * @hw: pointer to the hardware structure
3438 * @vsi_handle: VSI handle to configure
3439 * @promisc_mask: mask of promiscuous config bits
3440 * @vid: VLAN ID to set VLAN promiscuous
3443 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
3445 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
3446 struct ice_fltr_list_entry f_list_entry;
3447 struct ice_fltr_info new_fltr;
3454 if (!ice_is_vsi_valid(hw, vsi_handle))
3456 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3458 memset(&new_fltr, 0, sizeof(new_fltr));
3460 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
3461 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
3462 new_fltr.l_data.mac_vlan.vlan_id = vid;
3463 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3465 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
3466 recipe_id = ICE_SW_LKUP_PROMISC;
3469 /* Separate filters must be set for each direction/packet type
3470 * combination, so we will loop over the mask value, store the
3471 * individual type, and clear it out in the input mask as it
3474 while (promisc_mask) {
3480 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
3481 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
3482 pkt_type = UCAST_FLTR;
3483 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
3484 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
3485 pkt_type = UCAST_FLTR;
3487 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
3488 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
3489 pkt_type = MCAST_FLTR;
3490 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
3491 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
3492 pkt_type = MCAST_FLTR;
3494 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
3495 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
3496 pkt_type = BCAST_FLTR;
3497 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
3498 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
3499 pkt_type = BCAST_FLTR;
3503 /* Check for VLAN promiscuous flag */
3504 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
3505 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
3506 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
3507 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
3511 /* Set filter DA based on packet type */
3512 mac_addr = new_fltr.l_data.mac.mac_addr;
3513 if (pkt_type == BCAST_FLTR) {
3514 eth_broadcast_addr(mac_addr);
3515 } else if (pkt_type == MCAST_FLTR ||
3516 pkt_type == UCAST_FLTR) {
3517 /* Use the dummy ether header DA */
3518 ether_addr_copy(mac_addr, dummy_eth_header);
3519 if (pkt_type == MCAST_FLTR)
3520 mac_addr[0] |= 0x1; /* Set multicast bit */
3523 /* Need to reset this to zero for all iterations */
3526 new_fltr.flag |= ICE_FLTR_TX;
3527 new_fltr.src = hw_vsi_id;
3529 new_fltr.flag |= ICE_FLTR_RX;
3530 new_fltr.src = hw->port_info->lport;
3533 new_fltr.fltr_act = ICE_FWD_TO_VSI;
3534 new_fltr.vsi_handle = vsi_handle;
3535 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
3536 f_list_entry.fltr_info = new_fltr;
3538 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
3540 goto set_promisc_exit;
3548 * ice_set_vlan_vsi_promisc
3549 * @hw: pointer to the hardware structure
3550 * @vsi_handle: VSI handle to configure
3551 * @promisc_mask: mask of promiscuous config bits
3552 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
3554 * Configure VSI with all associated VLANs to given promiscuous mode(s)
3557 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3558 bool rm_vlan_promisc)
3560 struct ice_switch_info *sw = hw->switch_info;
3561 struct ice_fltr_list_entry *list_itr, *tmp;
3562 struct list_head vsi_list_head;
3563 struct list_head *vlan_head;
3564 struct mutex *vlan_lock; /* Lock to protect filter rule list */
3568 INIT_LIST_HEAD(&vsi_list_head);
3569 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3570 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3571 mutex_lock(vlan_lock);
3572 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
3574 mutex_unlock(vlan_lock);
3576 goto free_fltr_list;
3578 list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
3579 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
3580 if (rm_vlan_promisc)
3581 status = ice_clear_vsi_promisc(hw, vsi_handle,
3582 promisc_mask, vlan_id);
3584 status = ice_set_vsi_promisc(hw, vsi_handle,
3585 promisc_mask, vlan_id);
3591 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
3592 list_del(&list_itr->list_entry);
3593 devm_kfree(ice_hw_to_dev(hw), list_itr);
3599 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
3600 * @hw: pointer to the hardware structure
3601 * @vsi_handle: VSI handle to remove filters from
3602 * @lkup: switch rule filter lookup type
3605 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
3606 enum ice_sw_lkup_type lkup)
3608 struct ice_switch_info *sw = hw->switch_info;
3609 struct ice_fltr_list_entry *fm_entry;
3610 struct list_head remove_list_head;
3611 struct list_head *rule_head;
3612 struct ice_fltr_list_entry *tmp;
3613 struct mutex *rule_lock; /* Lock to protect filter rule list */
3616 INIT_LIST_HEAD(&remove_list_head);
3617 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
3618 rule_head = &sw->recp_list[lkup].filt_rules;
3619 mutex_lock(rule_lock);
3620 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
3622 mutex_unlock(rule_lock);
3624 goto free_fltr_list;
3627 case ICE_SW_LKUP_MAC:
3628 ice_remove_mac(hw, &remove_list_head);
3630 case ICE_SW_LKUP_VLAN:
3631 ice_remove_vlan(hw, &remove_list_head);
3633 case ICE_SW_LKUP_PROMISC:
3634 case ICE_SW_LKUP_PROMISC_VLAN:
3635 ice_remove_promisc(hw, lkup, &remove_list_head);
3637 case ICE_SW_LKUP_MAC_VLAN:
3638 case ICE_SW_LKUP_ETHERTYPE:
3639 case ICE_SW_LKUP_ETHERTYPE_MAC:
3640 case ICE_SW_LKUP_DFLT:
3641 case ICE_SW_LKUP_LAST:
3643 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
3648 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
3649 list_del(&fm_entry->list_entry);
3650 devm_kfree(ice_hw_to_dev(hw), fm_entry);
3655 * ice_remove_vsi_fltr - Remove all filters for a VSI
3656 * @hw: pointer to the hardware structure
3657 * @vsi_handle: VSI handle to remove filters from
3659 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
3661 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
3662 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
3663 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
3664 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
3665 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
3666 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
3667 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
3668 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
3672 * ice_alloc_res_cntr - allocating resource counter
3673 * @hw: pointer to the hardware structure
3674 * @type: type of resource
3675 * @alloc_shared: if set it is shared else dedicated
3676 * @num_items: number of entries requested for FD resource type
3677 * @counter_id: counter index returned by AQ call
3680 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
3683 struct ice_aqc_alloc_free_res_elem *buf;
3687 /* Allocate resource */
3688 buf_len = struct_size(buf, elem, 1);
3689 buf = kzalloc(buf_len, GFP_KERNEL);
3693 buf->num_elems = cpu_to_le16(num_items);
3694 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
3695 ICE_AQC_RES_TYPE_M) | alloc_shared);
3697 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
3698 ice_aqc_opc_alloc_res, NULL);
3702 *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
3710 * ice_free_res_cntr - free resource counter
3711 * @hw: pointer to the hardware structure
3712 * @type: type of resource
3713 * @alloc_shared: if set it is shared else dedicated
3714 * @num_items: number of entries to be freed for FD resource type
3715 * @counter_id: counter ID resource which needs to be freed
3718 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
3721 struct ice_aqc_alloc_free_res_elem *buf;
3726 buf_len = struct_size(buf, elem, 1);
3727 buf = kzalloc(buf_len, GFP_KERNEL);
3731 buf->num_elems = cpu_to_le16(num_items);
3732 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
3733 ICE_AQC_RES_TYPE_M) | alloc_shared);
3734 buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
3736 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
3737 ice_aqc_opc_free_res, NULL);
3739 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
3745 /* This is mapping table entry that maps every word within a given protocol
3746 * structure to the real byte offset as per the specification of that
3748 * for example dst address is 3 words in ethertype header and corresponding
3749 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
3750 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
3751 * matching entry describing its field. This needs to be updated if new
3752 * structure is added to that union.
3754 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
3755 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
3756 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
3757 { ICE_ETYPE_OL, { 0 } },
3758 { ICE_VLAN_OFOS, { 2, 0 } },
3759 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
3760 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
3761 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
3762 26, 28, 30, 32, 34, 36, 38 } },
3763 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
3764 26, 28, 30, 32, 34, 36, 38 } },
3765 { ICE_TCP_IL, { 0, 2 } },
3766 { ICE_UDP_OF, { 0, 2 } },
3767 { ICE_UDP_ILOS, { 0, 2 } },
3768 { ICE_VXLAN, { 8, 10, 12, 14 } },
3769 { ICE_GENEVE, { 8, 10, 12, 14 } },
3770 { ICE_NVGRE, { 0, 2, 4, 6 } },
3773 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
3774 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
3775 { ICE_MAC_IL, ICE_MAC_IL_HW },
3776 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
3777 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
3778 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
3779 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
3780 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
3781 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
3782 { ICE_TCP_IL, ICE_TCP_IL_HW },
3783 { ICE_UDP_OF, ICE_UDP_OF_HW },
3784 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
3785 { ICE_VXLAN, ICE_UDP_OF_HW },
3786 { ICE_GENEVE, ICE_UDP_OF_HW },
3787 { ICE_NVGRE, ICE_GRE_OF_HW },
3791 * ice_find_recp - find a recipe
3792 * @hw: pointer to the hardware structure
3793 * @lkup_exts: extension sequence to match
3794 * @tun_type: type of recipe tunnel
3796 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
3799 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
3800 enum ice_sw_tunnel_type tun_type)
3802 bool refresh_required = true;
3803 struct ice_sw_recipe *recp;
3806 /* Walk through existing recipes to find a match */
3807 recp = hw->switch_info->recp_list;
3808 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3809 /* If recipe was not created for this ID, in SW bookkeeping,
3810 * check if FW has an entry for this recipe. If the FW has an
3811 * entry update it in our SW bookkeeping and continue with the
3814 if (!recp[i].recp_created)
3815 if (ice_get_recp_frm_fw(hw,
3816 hw->switch_info->recp_list, i,
3820 /* Skip inverse action recipes */
3821 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
3822 ICE_AQ_RECIPE_ACT_INV_ACT)
3825 /* if number of words we are looking for match */
3826 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
3827 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
3828 struct ice_fv_word *be = lkup_exts->fv_words;
3829 u16 *cr = recp[i].lkup_exts.field_mask;
3830 u16 *de = lkup_exts->field_mask;
3834 /* ar, cr, and qr are related to the recipe words, while
3835 * be, de, and pe are related to the lookup words
3837 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
3838 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
3840 if (ar[qr].off == be[pe].off &&
3841 ar[qr].prot_id == be[pe].prot_id &&
3843 /* Found the "pe"th word in the
3848 /* After walking through all the words in the
3849 * "i"th recipe if "p"th word was not found then
3850 * this recipe is not what we are looking for.
3851 * So break out from this loop and try the next
3854 if (qr >= recp[i].lkup_exts.n_val_words) {
3859 /* If for "i"th recipe the found was never set to false
3860 * then it means we found our match
3861 * Also tun type of recipe needs to be checked
3863 if (found && recp[i].tun_type == tun_type)
3864 return i; /* Return the recipe ID */
3867 return ICE_MAX_NUM_RECIPES;
3871 * ice_prot_type_to_id - get protocol ID from protocol type
3872 * @type: protocol type
3873 * @id: pointer to variable that will receive the ID
3875 * Returns true if found, false otherwise
3877 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
3881 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
3882 if (ice_prot_id_tbl[i].type == type) {
3883 *id = ice_prot_id_tbl[i].protocol_id;
3890 * ice_fill_valid_words - count valid words
3891 * @rule: advanced rule with lookup information
3892 * @lkup_exts: byte offset extractions of the words that are valid
3894 * calculate valid words in a lookup rule using mask value
3897 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
3898 struct ice_prot_lkup_ext *lkup_exts)
3900 u8 j, word, prot_id, ret_val;
3902 if (!ice_prot_type_to_id(rule->type, &prot_id))
3905 word = lkup_exts->n_val_words;
3907 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
3908 if (((u16 *)&rule->m_u)[j] &&
3909 rule->type < ARRAY_SIZE(ice_prot_ext)) {
3910 /* No more space to accommodate */
3911 if (word >= ICE_MAX_CHAIN_WORDS)
3913 lkup_exts->fv_words[word].off =
3914 ice_prot_ext[rule->type].offs[j];
3915 lkup_exts->fv_words[word].prot_id =
3916 ice_prot_id_tbl[rule->type].protocol_id;
3917 lkup_exts->field_mask[word] =
3918 be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
3922 ret_val = word - lkup_exts->n_val_words;
3923 lkup_exts->n_val_words = word;
3929 * ice_create_first_fit_recp_def - Create a recipe grouping
3930 * @hw: pointer to the hardware structure
3931 * @lkup_exts: an array of protocol header extractions
3932 * @rg_list: pointer to a list that stores new recipe groups
3933 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
3935 * Using first fit algorithm, take all the words that are still not done
3936 * and start grouping them in 4-word groups. Each group makes up one
3940 ice_create_first_fit_recp_def(struct ice_hw *hw,
3941 struct ice_prot_lkup_ext *lkup_exts,
3942 struct list_head *rg_list,
3945 struct ice_pref_recipe_group *grp = NULL;
3950 /* Walk through every word in the rule to check if it is not done. If so
3951 * then this word needs to be part of a new recipe.
3953 for (j = 0; j < lkup_exts->n_val_words; j++)
3954 if (!test_bit(j, lkup_exts->done)) {
3956 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
3957 struct ice_recp_grp_entry *entry;
3959 entry = devm_kzalloc(ice_hw_to_dev(hw),
3964 list_add(&entry->l_entry, rg_list);
3965 grp = &entry->r_group;
3969 grp->pairs[grp->n_val_pairs].prot_id =
3970 lkup_exts->fv_words[j].prot_id;
3971 grp->pairs[grp->n_val_pairs].off =
3972 lkup_exts->fv_words[j].off;
3973 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
3981 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
3982 * @hw: pointer to the hardware structure
3983 * @fv_list: field vector with the extraction sequence information
3984 * @rg_list: recipe groupings with protocol-offset pairs
3986 * Helper function to fill in the field vector indices for protocol-offset
3987 * pairs. These indexes are then ultimately programmed into a recipe.
3990 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
3991 struct list_head *rg_list)
3993 struct ice_sw_fv_list_entry *fv;
3994 struct ice_recp_grp_entry *rg;
3995 struct ice_fv_word *fv_ext;
3997 if (list_empty(fv_list))
4000 fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4002 fv_ext = fv->fv_ptr->ew;
4004 list_for_each_entry(rg, rg_list, l_entry) {
4007 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4008 struct ice_fv_word *pr;
4013 pr = &rg->r_group.pairs[i];
4014 mask = rg->r_group.mask[i];
4016 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4017 if (fv_ext[j].prot_id == pr->prot_id &&
4018 fv_ext[j].off == pr->off) {
4021 /* Store index of field vector */
4023 rg->fv_mask[i] = mask;
4027 /* Protocol/offset could not be found, caller gave an
4039 * ice_find_free_recp_res_idx - find free result indexes for recipe
4040 * @hw: pointer to hardware structure
4041 * @profiles: bitmap of profiles that will be associated with the new recipe
4042 * @free_idx: pointer to variable to receive the free index bitmap
4044 * The algorithm used here is:
4045 * 1. When creating a new recipe, create a set P which contains all
4046 * Profiles that will be associated with our new recipe
4048 * 2. For each Profile p in set P:
4049 * a. Add all recipes associated with Profile p into set R
4050 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4051 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4052 * i. Or just assume they all have the same possible indexes:
4054 * i.e., PossibleIndexes = 0x0000F00000000000
4056 * 3. For each Recipe r in set R:
4057 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4058 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4060 * FreeIndexes will contain the bits indicating the indexes free for use,
4061 * then the code needs to update the recipe[r].used_result_idx_bits to
4062 * indicate which indexes were selected for use by this recipe.
4065 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4066 unsigned long *free_idx)
4068 DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4069 DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4070 DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4073 bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4074 bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4076 bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
4078 /* For each profile we are going to associate the recipe with, add the
4079 * recipes that are associated with that profile. This will give us
4080 * the set of recipes that our recipe may collide with. Also, determine
4081 * what possible result indexes are usable given this set of profiles.
4083 for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4084 bitmap_or(recipes, recipes, profile_to_recipe[bit],
4085 ICE_MAX_NUM_RECIPES);
4086 bitmap_and(possible_idx, possible_idx,
4087 hw->switch_info->prof_res_bm[bit],
4091 /* For each recipe that our new recipe may collide with, determine
4092 * which indexes have been used.
4094 for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4095 bitmap_or(used_idx, used_idx,
4096 hw->switch_info->recp_list[bit].res_idxs,
4099 bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4101 /* return number of free indexes */
4102 return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
4106 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4107 * @hw: pointer to hardware structure
4108 * @rm: recipe management list entry
4109 * @profiles: bitmap of profiles that will be associated.
4112 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4113 unsigned long *profiles)
4115 DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
4116 struct ice_aqc_recipe_data_elem *tmp;
4117 struct ice_aqc_recipe_data_elem *buf;
4118 struct ice_recp_grp_entry *entry;
4125 /* When more than one recipe are required, another recipe is needed to
4126 * chain them together. Matching a tunnel metadata ID takes up one of
4127 * the match fields in the chaining recipe reducing the number of
4128 * chained recipes by one.
4130 /* check number of free result indices */
4131 bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
4132 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
4134 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
4135 free_res_idx, rm->n_grp_count);
4137 if (rm->n_grp_count > 1) {
4138 if (rm->n_grp_count > free_res_idx)
4144 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
4147 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
4151 buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
4158 bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4159 recipe_count = ICE_MAX_NUM_RECIPES;
4160 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
4162 if (status || recipe_count == 0)
4165 /* Allocate the recipe resources, and configure them according to the
4166 * match fields from protocol headers and extracted field vectors.
4168 chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
4169 list_for_each_entry(entry, &rm->rg_list, l_entry) {
4172 status = ice_alloc_recipe(hw, &entry->rid);
4176 /* Clear the result index of the located recipe, as this will be
4177 * updated, if needed, later in the recipe creation process.
4179 tmp[0].content.result_indx = 0;
4181 buf[recps] = tmp[0];
4182 buf[recps].recipe_indx = (u8)entry->rid;
4183 /* if the recipe is a non-root recipe RID should be programmed
4184 * as 0 for the rules to be applied correctly.
4186 buf[recps].content.rid = 0;
4187 memset(&buf[recps].content.lkup_indx, 0,
4188 sizeof(buf[recps].content.lkup_indx));
4190 /* All recipes use look-up index 0 to match switch ID. */
4191 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4192 buf[recps].content.mask[0] =
4193 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
4194 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
4197 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4198 buf[recps].content.lkup_indx[i] = 0x80;
4199 buf[recps].content.mask[i] = 0;
4202 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
4203 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
4204 buf[recps].content.mask[i + 1] =
4205 cpu_to_le16(entry->fv_mask[i]);
4208 if (rm->n_grp_count > 1) {
4209 /* Checks to see if there really is a valid result index
4212 if (chain_idx >= ICE_MAX_FV_WORDS) {
4213 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
4218 entry->chain_idx = chain_idx;
4219 buf[recps].content.result_indx =
4220 ICE_AQ_RECIPE_RESULT_EN |
4221 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
4222 ICE_AQ_RECIPE_RESULT_DATA_M);
4223 clear_bit(chain_idx, result_idx_bm);
4224 chain_idx = find_first_bit(result_idx_bm,
4228 /* fill recipe dependencies */
4229 bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
4230 ICE_MAX_NUM_RECIPES);
4231 set_bit(buf[recps].recipe_indx,
4232 (unsigned long *)buf[recps].recipe_bitmap);
4233 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4237 if (rm->n_grp_count == 1) {
4238 rm->root_rid = buf[0].recipe_indx;
4239 set_bit(buf[0].recipe_indx, rm->r_bitmap);
4240 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
4241 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
4242 memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
4243 sizeof(buf[0].recipe_bitmap));
4248 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
4249 * the recipe which is getting created if specified
4250 * by user. Usually any advanced switch filter, which results
4251 * into new extraction sequence, ended up creating a new recipe
4252 * of type ROOT and usually recipes are associated with profiles
4253 * Switch rule referreing newly created recipe, needs to have
4254 * either/or 'fwd' or 'join' priority, otherwise switch rule
4255 * evaluation will not happen correctly. In other words, if
4256 * switch rule to be evaluated on priority basis, then recipe
4257 * needs to have priority, otherwise it will be evaluated last.
4259 buf[0].content.act_ctrl_fwd_priority = rm->priority;
4261 struct ice_recp_grp_entry *last_chain_entry;
4264 /* Allocate the last recipe that will chain the outcomes of the
4265 * other recipes together
4267 status = ice_alloc_recipe(hw, &rid);
4271 buf[recps].recipe_indx = (u8)rid;
4272 buf[recps].content.rid = (u8)rid;
4273 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
4274 /* the new entry created should also be part of rg_list to
4275 * make sure we have complete recipe
4277 last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
4278 sizeof(*last_chain_entry),
4280 if (!last_chain_entry) {
4284 last_chain_entry->rid = rid;
4285 memset(&buf[recps].content.lkup_indx, 0,
4286 sizeof(buf[recps].content.lkup_indx));
4287 /* All recipes use look-up index 0 to match switch ID. */
4288 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4289 buf[recps].content.mask[0] =
4290 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
4291 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4292 buf[recps].content.lkup_indx[i] =
4293 ICE_AQ_RECIPE_LKUP_IGNORE;
4294 buf[recps].content.mask[i] = 0;
4298 /* update r_bitmap with the recp that is used for chaining */
4299 set_bit(rid, rm->r_bitmap);
4300 /* this is the recipe that chains all the other recipes so it
4301 * should not have a chaining ID to indicate the same
4303 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
4304 list_for_each_entry(entry, &rm->rg_list, l_entry) {
4305 last_chain_entry->fv_idx[i] = entry->chain_idx;
4306 buf[recps].content.lkup_indx[i] = entry->chain_idx;
4307 buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
4308 set_bit(entry->rid, rm->r_bitmap);
4310 list_add(&last_chain_entry->l_entry, &rm->rg_list);
4311 if (sizeof(buf[recps].recipe_bitmap) >=
4312 sizeof(rm->r_bitmap)) {
4313 memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
4314 sizeof(buf[recps].recipe_bitmap));
4319 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4322 rm->root_rid = (u8)rid;
4324 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
4328 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
4329 ice_release_change_lock(hw);
4333 /* Every recipe that just got created add it to the recipe
4336 list_for_each_entry(entry, &rm->rg_list, l_entry) {
4337 struct ice_switch_info *sw = hw->switch_info;
4338 bool is_root, idx_found = false;
4339 struct ice_sw_recipe *recp;
4340 u16 idx, buf_idx = 0;
4342 /* find buffer index for copying some data */
4343 for (idx = 0; idx < rm->n_grp_count; idx++)
4344 if (buf[idx].recipe_indx == entry->rid) {
4354 recp = &sw->recp_list[entry->rid];
4355 is_root = (rm->root_rid == entry->rid);
4356 recp->is_root = is_root;
4358 recp->root_rid = entry->rid;
4359 recp->big_recp = (is_root && rm->n_grp_count > 1);
4361 memcpy(&recp->ext_words, entry->r_group.pairs,
4362 entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
4364 memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
4365 sizeof(recp->r_bitmap));
4367 /* Copy non-result fv index values and masks to recipe. This
4368 * call will also update the result recipe bitmask.
4370 ice_collect_result_idx(&buf[buf_idx], recp);
4372 /* for non-root recipes, also copy to the root, this allows
4373 * easier matching of a complete chained recipe
4376 ice_collect_result_idx(&buf[buf_idx],
4377 &sw->recp_list[rm->root_rid]);
4379 recp->n_ext_words = entry->r_group.n_val_pairs;
4380 recp->chain_idx = entry->chain_idx;
4381 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
4382 recp->n_grp_count = rm->n_grp_count;
4383 recp->tun_type = rm->tun_type;
4384 recp->recp_created = true;
4393 devm_kfree(ice_hw_to_dev(hw), buf);
4398 * ice_create_recipe_group - creates recipe group
4399 * @hw: pointer to hardware structure
4400 * @rm: recipe management list entry
4401 * @lkup_exts: lookup elements
4404 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
4405 struct ice_prot_lkup_ext *lkup_exts)
4410 rm->n_grp_count = 0;
4412 /* Create recipes for words that are marked not done by packing them
4415 status = ice_create_first_fit_recp_def(hw, lkup_exts,
4416 &rm->rg_list, &recp_count);
4418 rm->n_grp_count += recp_count;
4419 rm->n_ext_words = lkup_exts->n_val_words;
4420 memcpy(&rm->ext_words, lkup_exts->fv_words,
4421 sizeof(rm->ext_words));
4422 memcpy(rm->word_masks, lkup_exts->field_mask,
4423 sizeof(rm->word_masks));
4430 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
4431 * @hw: pointer to hardware structure
4432 * @lkups: lookup elements or match criteria for the advanced recipe, one
4433 * structure per protocol header
4434 * @lkups_cnt: number of protocols
4435 * @bm: bitmap of field vectors to consider
4436 * @fv_list: pointer to a list that holds the returned field vectors
4439 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
4440 unsigned long *bm, struct list_head *fv_list)
4446 prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
4450 for (i = 0; i < lkups_cnt; i++)
4451 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
4456 /* Find field vectors that include all specified protocol types */
4457 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
4465 * ice_tun_type_match_word - determine if tun type needs a match mask
4466 * @tun_type: tunnel type
4467 * @mask: mask to be used for the tunnel
4469 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
4472 case ICE_SW_TUN_GENEVE:
4473 case ICE_SW_TUN_VXLAN:
4474 case ICE_SW_TUN_NVGRE:
4475 *mask = ICE_TUN_FLAG_MASK;
4485 * ice_add_special_words - Add words that are not protocols, such as metadata
4486 * @rinfo: other information regarding the rule e.g. priority and action info
4487 * @lkup_exts: lookup word structure
4490 ice_add_special_words(struct ice_adv_rule_info *rinfo,
4491 struct ice_prot_lkup_ext *lkup_exts)
4495 /* If this is a tunneled packet, then add recipe index to match the
4496 * tunnel bit in the packet metadata flags.
4498 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
4499 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
4500 u8 word = lkup_exts->n_val_words++;
4502 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
4503 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
4504 lkup_exts->field_mask[word] = mask;
4513 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
4514 * @hw: pointer to hardware structure
4515 * @rinfo: other information regarding the rule e.g. priority and action info
4516 * @bm: pointer to memory for returning the bitmap of field vectors
4519 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
4522 enum ice_prof_type prof_type;
4524 bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
4526 switch (rinfo->tun_type) {
4528 prof_type = ICE_PROF_NON_TUN;
4530 case ICE_ALL_TUNNELS:
4531 prof_type = ICE_PROF_TUN_ALL;
4533 case ICE_SW_TUN_GENEVE:
4534 case ICE_SW_TUN_VXLAN:
4535 prof_type = ICE_PROF_TUN_UDP;
4537 case ICE_SW_TUN_NVGRE:
4538 prof_type = ICE_PROF_TUN_GRE;
4540 case ICE_SW_TUN_AND_NON_TUN:
4542 prof_type = ICE_PROF_ALL;
4546 ice_get_sw_fv_bitmap(hw, prof_type, bm);
4550 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
4551 * @hw: pointer to hardware structure
4552 * @lkups: lookup elements or match criteria for the advanced recipe, one
4553 * structure per protocol header
4554 * @lkups_cnt: number of protocols
4555 * @rinfo: other information regarding the rule e.g. priority and action info
4556 * @rid: return the recipe ID of the recipe created
4559 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
4560 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
4562 DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
4563 DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
4564 struct ice_prot_lkup_ext *lkup_exts;
4565 struct ice_recp_grp_entry *r_entry;
4566 struct ice_sw_fv_list_entry *fvit;
4567 struct ice_recp_grp_entry *r_tmp;
4568 struct ice_sw_fv_list_entry *tmp;
4569 struct ice_sw_recipe *rm;
4576 lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
4580 /* Determine the number of words to be matched and if it exceeds a
4581 * recipe's restrictions
4583 for (i = 0; i < lkups_cnt; i++) {
4586 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
4588 goto err_free_lkup_exts;
4591 count = ice_fill_valid_words(&lkups[i], lkup_exts);
4594 goto err_free_lkup_exts;
4598 rm = kzalloc(sizeof(*rm), GFP_KERNEL);
4601 goto err_free_lkup_exts;
4604 /* Get field vectors that contain fields extracted from all the protocol
4605 * headers being programmed.
4607 INIT_LIST_HEAD(&rm->fv_list);
4608 INIT_LIST_HEAD(&rm->rg_list);
4610 /* Get bitmap of field vectors (profiles) that are compatible with the
4611 * rule request; only these will be searched in the subsequent call to
4614 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
4616 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
4620 /* Create any special protocol/offset pairs, such as looking at tunnel
4621 * bits by extracting metadata
4623 status = ice_add_special_words(rinfo, lkup_exts);
4625 goto err_free_lkup_exts;
4627 /* Group match words into recipes using preferred recipe grouping
4630 status = ice_create_recipe_group(hw, rm, lkup_exts);
4634 /* set the recipe priority if specified */
4635 rm->priority = (u8)rinfo->priority;
4637 /* Find offsets from the field vector. Pick the first one for all the
4640 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
4644 /* get bitmap of all profiles the recipe will be associated with */
4645 bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
4646 list_for_each_entry(fvit, &rm->fv_list, list_entry) {
4647 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
4648 set_bit((u16)fvit->profile_id, profiles);
4651 /* Look for a recipe which matches our requested fv / mask list */
4652 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
4653 if (*rid < ICE_MAX_NUM_RECIPES)
4654 /* Success if found a recipe that match the existing criteria */
4657 rm->tun_type = rinfo->tun_type;
4658 /* Recipe we need does not exist, add a recipe */
4659 status = ice_add_sw_recipe(hw, rm, profiles);
4663 /* Associate all the recipes created with all the profiles in the
4664 * common field vector.
4666 list_for_each_entry(fvit, &rm->fv_list, list_entry) {
4667 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
4670 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
4671 (u8 *)r_bitmap, NULL);
4675 bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
4676 ICE_MAX_NUM_RECIPES);
4677 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
4681 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
4684 ice_release_change_lock(hw);
4689 /* Update profile to recipe bitmap array */
4690 bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
4691 ICE_MAX_NUM_RECIPES);
4693 /* Update recipe to profile bitmap array */
4694 for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
4695 set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
4698 *rid = rm->root_rid;
4699 memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
4700 sizeof(*lkup_exts));
4702 list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
4703 list_del(&r_entry->l_entry);
4704 devm_kfree(ice_hw_to_dev(hw), r_entry);
4707 list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
4708 list_del(&fvit->list_entry);
4709 devm_kfree(ice_hw_to_dev(hw), fvit);
4713 devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
4724 * ice_find_dummy_packet - find dummy packet
4726 * @lkups: lookup elements or match criteria for the advanced recipe, one
4727 * structure per protocol header
4728 * @lkups_cnt: number of protocols
4729 * @tun_type: tunnel type
4730 * @pkt: dummy packet to fill according to filter match criteria
4731 * @pkt_len: packet length of dummy packet
4732 * @offsets: pointer to receive the pointer to the offsets for the packet
4735 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
4736 enum ice_sw_tunnel_type tun_type,
4737 const u8 **pkt, u16 *pkt_len,
4738 const struct ice_dummy_pkt_offsets **offsets)
4740 bool tcp = false, udp = false, ipv6 = false, vlan = false;
4743 for (i = 0; i < lkups_cnt; i++) {
4744 if (lkups[i].type == ICE_UDP_ILOS)
4746 else if (lkups[i].type == ICE_TCP_IL)
4748 else if (lkups[i].type == ICE_IPV6_OFOS)
4750 else if (lkups[i].type == ICE_VLAN_OFOS)
4752 else if (lkups[i].type == ICE_ETYPE_OL &&
4753 lkups[i].h_u.ethertype.ethtype_id ==
4754 cpu_to_be16(ICE_IPV6_ETHER_ID) &&
4755 lkups[i].m_u.ethertype.ethtype_id ==
4756 cpu_to_be16(0xFFFF))
4760 if (tun_type == ICE_SW_TUN_NVGRE) {
4762 *pkt = dummy_gre_tcp_packet;
4763 *pkt_len = sizeof(dummy_gre_tcp_packet);
4764 *offsets = dummy_gre_tcp_packet_offsets;
4768 *pkt = dummy_gre_udp_packet;
4769 *pkt_len = sizeof(dummy_gre_udp_packet);
4770 *offsets = dummy_gre_udp_packet_offsets;
4774 if (tun_type == ICE_SW_TUN_VXLAN ||
4775 tun_type == ICE_SW_TUN_GENEVE) {
4777 *pkt = dummy_udp_tun_tcp_packet;
4778 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
4779 *offsets = dummy_udp_tun_tcp_packet_offsets;
4783 *pkt = dummy_udp_tun_udp_packet;
4784 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
4785 *offsets = dummy_udp_tun_udp_packet_offsets;
4791 *pkt = dummy_vlan_udp_packet;
4792 *pkt_len = sizeof(dummy_vlan_udp_packet);
4793 *offsets = dummy_vlan_udp_packet_offsets;
4796 *pkt = dummy_udp_packet;
4797 *pkt_len = sizeof(dummy_udp_packet);
4798 *offsets = dummy_udp_packet_offsets;
4800 } else if (udp && ipv6) {
4802 *pkt = dummy_vlan_udp_ipv6_packet;
4803 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
4804 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
4807 *pkt = dummy_udp_ipv6_packet;
4808 *pkt_len = sizeof(dummy_udp_ipv6_packet);
4809 *offsets = dummy_udp_ipv6_packet_offsets;
4811 } else if ((tcp && ipv6) || ipv6) {
4813 *pkt = dummy_vlan_tcp_ipv6_packet;
4814 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
4815 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
4818 *pkt = dummy_tcp_ipv6_packet;
4819 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
4820 *offsets = dummy_tcp_ipv6_packet_offsets;
4825 *pkt = dummy_vlan_tcp_packet;
4826 *pkt_len = sizeof(dummy_vlan_tcp_packet);
4827 *offsets = dummy_vlan_tcp_packet_offsets;
4829 *pkt = dummy_tcp_packet;
4830 *pkt_len = sizeof(dummy_tcp_packet);
4831 *offsets = dummy_tcp_packet_offsets;
4836 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
4838 * @lkups: lookup elements or match criteria for the advanced recipe, one
4839 * structure per protocol header
4840 * @lkups_cnt: number of protocols
4841 * @s_rule: stores rule information from the match criteria
4842 * @dummy_pkt: dummy packet to fill according to filter match criteria
4843 * @pkt_len: packet length of dummy packet
4844 * @offsets: offset info for the dummy packet
4847 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
4848 struct ice_aqc_sw_rules_elem *s_rule,
4849 const u8 *dummy_pkt, u16 pkt_len,
4850 const struct ice_dummy_pkt_offsets *offsets)
4855 /* Start with a packet with a pre-defined/dummy content. Then, fill
4856 * in the header values to be looked up or matched.
4858 pkt = s_rule->pdata.lkup_tx_rx.hdr;
4860 memcpy(pkt, dummy_pkt, pkt_len);
4862 for (i = 0; i < lkups_cnt; i++) {
4863 enum ice_protocol_type type;
4864 u16 offset = 0, len = 0, j;
4867 /* find the start of this layer; it should be found since this
4868 * was already checked when search for the dummy packet
4870 type = lkups[i].type;
4871 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
4872 if (type == offsets[j].type) {
4873 offset = offsets[j].offset;
4878 /* this should never happen in a correct calling sequence */
4882 switch (lkups[i].type) {
4885 len = sizeof(struct ice_ether_hdr);
4888 len = sizeof(struct ice_ethtype_hdr);
4891 len = sizeof(struct ice_vlan_hdr);
4895 len = sizeof(struct ice_ipv4_hdr);
4899 len = sizeof(struct ice_ipv6_hdr);
4904 len = sizeof(struct ice_l4_hdr);
4907 len = sizeof(struct ice_sctp_hdr);
4910 len = sizeof(struct ice_nvgre_hdr);
4914 len = sizeof(struct ice_udp_tnl_hdr);
4920 /* the length should be a word multiple */
4921 if (len % ICE_BYTES_PER_WORD)
4924 /* We have the offset to the header start, the length, the
4925 * caller's header values and mask. Use this information to
4926 * copy the data into the dummy packet appropriately based on
4927 * the mask. Note that we need to only write the bits as
4928 * indicated by the mask to make sure we don't improperly write
4929 * over any significant packet data.
4931 for (j = 0; j < len / sizeof(u16); j++)
4932 if (((u16 *)&lkups[i].m_u)[j])
4933 ((u16 *)(pkt + offset))[j] =
4934 (((u16 *)(pkt + offset))[j] &
4935 ~((u16 *)&lkups[i].m_u)[j]) |
4936 (((u16 *)&lkups[i].h_u)[j] &
4937 ((u16 *)&lkups[i].m_u)[j]);
4940 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len);
4946 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
4947 * @hw: pointer to the hardware structure
4948 * @tun_type: tunnel type
4949 * @pkt: dummy packet to fill in
4950 * @offsets: offset info for the dummy packet
4953 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
4954 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
4959 case ICE_SW_TUN_VXLAN:
4960 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
4963 case ICE_SW_TUN_GENEVE:
4964 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
4968 /* Nothing needs to be done for this tunnel type */
4972 /* Find the outer UDP protocol header and insert the port number */
4973 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
4974 if (offsets[i].type == ICE_UDP_OF) {
4975 struct ice_l4_hdr *hdr;
4978 offset = offsets[i].offset;
4979 hdr = (struct ice_l4_hdr *)&pkt[offset];
4980 hdr->dst_port = cpu_to_be16(open_port);
4990 * ice_find_adv_rule_entry - Search a rule entry
4991 * @hw: pointer to the hardware structure
4992 * @lkups: lookup elements or match criteria for the advanced recipe, one
4993 * structure per protocol header
4994 * @lkups_cnt: number of protocols
4995 * @recp_id: recipe ID for which we are finding the rule
4996 * @rinfo: other information regarding the rule e.g. priority and action info
4998 * Helper function to search for a given advance rule entry
4999 * Returns pointer to entry storing the rule if found
5001 static struct ice_adv_fltr_mgmt_list_entry *
5002 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5003 u16 lkups_cnt, u16 recp_id,
5004 struct ice_adv_rule_info *rinfo)
5006 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5007 struct ice_switch_info *sw = hw->switch_info;
5010 list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5012 bool lkups_matched = true;
5014 if (lkups_cnt != list_itr->lkups_cnt)
5016 for (i = 0; i < list_itr->lkups_cnt; i++)
5017 if (memcmp(&list_itr->lkups[i], &lkups[i],
5019 lkups_matched = false;
5022 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5023 rinfo->tun_type == list_itr->rule_info.tun_type &&
5031 * ice_adv_add_update_vsi_list
5032 * @hw: pointer to the hardware structure
5033 * @m_entry: pointer to current adv filter management list entry
5034 * @cur_fltr: filter information from the book keeping entry
5035 * @new_fltr: filter information with the new VSI to be added
5037 * Call AQ command to add or update previously created VSI list with new VSI.
5039 * Helper function to do book keeping associated with adding filter information
5040 * The algorithm to do the booking keeping is described below :
5041 * When a VSI needs to subscribe to a given advanced filter
5042 * if only one VSI has been added till now
5043 * Allocate a new VSI list and add two VSIs
5044 * to this list using switch rule command
5045 * Update the previously created switch rule with the
5046 * newly created VSI list ID
5047 * if a VSI list was previously created
5048 * Add the new VSI to the previously created VSI list set
5049 * using the update switch rule command
5052 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5053 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5054 struct ice_adv_rule_info *cur_fltr,
5055 struct ice_adv_rule_info *new_fltr)
5057 u16 vsi_list_id = 0;
5060 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5061 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5062 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5065 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5066 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5067 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5068 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5071 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5072 /* Only one entry existed in the mapping and it was not already
5073 * a part of a VSI list. So, create a VSI list with the old and
5076 struct ice_fltr_info tmp_fltr;
5077 u16 vsi_handle_arr[2];
5079 /* A rule already exists with the new VSI being added */
5080 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5081 new_fltr->sw_act.fwd_id.hw_vsi_id)
5084 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5085 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5086 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5092 memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5093 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
5094 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5095 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5096 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5097 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
5099 /* Update the previous switch rule of "forward to VSI" to
5102 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5106 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5107 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5108 m_entry->vsi_list_info =
5109 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5112 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5114 if (!m_entry->vsi_list_info)
5117 /* A rule already exists with the new VSI being added */
5118 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
5121 /* Update the previously created VSI list set with
5122 * the new VSI ID passed in
5124 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5126 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5128 ice_aqc_opc_update_sw_rules,
5130 /* update VSI list mapping info with new VSI ID */
5132 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
5135 m_entry->vsi_count++;
5140 * ice_add_adv_rule - helper function to create an advanced switch rule
5141 * @hw: pointer to the hardware structure
5142 * @lkups: information on the words that needs to be looked up. All words
5143 * together makes one recipe
5144 * @lkups_cnt: num of entries in the lkups array
5145 * @rinfo: other information related to the rule that needs to be programmed
5146 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5147 * ignored is case of error.
5149 * This function can program only 1 rule at a time. The lkups is used to
5150 * describe the all the words that forms the "lookup" portion of the recipe.
5151 * These words can span multiple protocols. Callers to this function need to
5152 * pass in a list of protocol headers with lookup information along and mask
5153 * that determines which words are valid from the given protocol header.
5154 * rinfo describes other information related to this rule such as forwarding
5155 * IDs, priority of this rule, etc.
5158 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5159 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5160 struct ice_rule_query_data *added_entry)
5162 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5163 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
5164 const struct ice_dummy_pkt_offsets *pkt_offsets;
5165 struct ice_aqc_sw_rules_elem *s_rule = NULL;
5166 struct list_head *rule_head;
5167 struct ice_switch_info *sw;
5168 const u8 *pkt = NULL;
5174 /* Initialize profile to result index bitmap */
5175 if (!hw->switch_info->prof_res_bm_init) {
5176 hw->switch_info->prof_res_bm_init = 1;
5177 ice_init_prof_result_bm(hw);
5183 /* get # of words we need to match */
5185 for (i = 0; i < lkups_cnt; i++) {
5188 ptr = (u16 *)&lkups[i].m_u;
5189 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
5194 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
5197 /* make sure that we can locate a dummy packet */
5198 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
5202 goto err_ice_add_adv_rule;
5205 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5206 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
5207 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5208 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
5211 vsi_handle = rinfo->sw_act.vsi_handle;
5212 if (!ice_is_vsi_valid(hw, vsi_handle))
5215 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5216 rinfo->sw_act.fwd_id.hw_vsi_id =
5217 ice_get_hw_vsi_num(hw, vsi_handle);
5218 if (rinfo->sw_act.flag & ICE_FLTR_TX)
5219 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
5221 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
5224 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5226 /* we have to add VSI to VSI_LIST and increment vsi_count.
5227 * Also Update VSI list so that we can change forwarding rule
5228 * if the rule already exists, we will check if it exists with
5229 * same vsi_id, if not then add it to the VSI list if it already
5230 * exists if not then create a VSI list and add the existing VSI
5231 * ID and the new VSI ID to the list
5232 * We will add that VSI to the list
5234 status = ice_adv_add_update_vsi_list(hw, m_entry,
5235 &m_entry->rule_info,
5238 added_entry->rid = rid;
5239 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
5240 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5244 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5245 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
5248 if (!rinfo->flags_info.act_valid) {
5249 act |= ICE_SINGLE_ACT_LAN_ENABLE;
5250 act |= ICE_SINGLE_ACT_LB_ENABLE;
5252 act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
5253 ICE_SINGLE_ACT_LB_ENABLE);
5256 switch (rinfo->sw_act.fltr_act) {
5257 case ICE_FWD_TO_VSI:
5258 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
5259 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
5260 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
5263 act |= ICE_SINGLE_ACT_TO_Q;
5264 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5265 ICE_SINGLE_ACT_Q_INDEX_M;
5267 case ICE_FWD_TO_QGRP:
5268 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
5269 (u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
5270 act |= ICE_SINGLE_ACT_TO_Q;
5271 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5272 ICE_SINGLE_ACT_Q_INDEX_M;
5273 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
5274 ICE_SINGLE_ACT_Q_REGION_M;
5276 case ICE_DROP_PACKET:
5277 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
5278 ICE_SINGLE_ACT_VALID_BIT;
5282 goto err_ice_add_adv_rule;
5285 /* set the rule LOOKUP type based on caller specified 'Rx'
5286 * instead of hardcoding it to be either LOOKUP_TX/RX
5288 * for 'Rx' set the source to be the port number
5289 * for 'Tx' set the source to be the source HW VSI number (determined
5293 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
5294 s_rule->pdata.lkup_tx_rx.src =
5295 cpu_to_le16(hw->port_info->lport);
5297 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
5298 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src);
5301 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid);
5302 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
5304 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
5305 pkt_len, pkt_offsets);
5307 goto err_ice_add_adv_rule;
5309 if (rinfo->tun_type != ICE_NON_TUN &&
5310 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
5311 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
5312 s_rule->pdata.lkup_tx_rx.hdr,
5315 goto err_ice_add_adv_rule;
5318 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5319 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
5322 goto err_ice_add_adv_rule;
5323 adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
5324 sizeof(struct ice_adv_fltr_mgmt_list_entry),
5328 goto err_ice_add_adv_rule;
5331 adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
5332 lkups_cnt * sizeof(*lkups), GFP_KERNEL);
5333 if (!adv_fltr->lkups) {
5335 goto err_ice_add_adv_rule;
5338 adv_fltr->lkups_cnt = lkups_cnt;
5339 adv_fltr->rule_info = *rinfo;
5340 adv_fltr->rule_info.fltr_rule_id =
5341 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
5342 sw = hw->switch_info;
5343 sw->recp_list[rid].adv_rule = true;
5344 rule_head = &sw->recp_list[rid].filt_rules;
5346 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5347 adv_fltr->vsi_count = 1;
5349 /* Add rule entry to book keeping list */
5350 list_add(&adv_fltr->list_entry, rule_head);
5352 added_entry->rid = rid;
5353 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
5354 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5356 err_ice_add_adv_rule:
5357 if (status && adv_fltr) {
5358 devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
5359 devm_kfree(ice_hw_to_dev(hw), adv_fltr);
5368 * ice_replay_vsi_fltr - Replay filters for requested VSI
5369 * @hw: pointer to the hardware structure
5370 * @vsi_handle: driver VSI handle
5371 * @recp_id: Recipe ID for which rules need to be replayed
5372 * @list_head: list for which filters need to be replayed
5374 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
5375 * It is required to pass valid VSI handle.
5378 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
5379 struct list_head *list_head)
5381 struct ice_fltr_mgmt_list_entry *itr;
5385 if (list_empty(list_head))
5387 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5389 list_for_each_entry(itr, list_head, list_entry) {
5390 struct ice_fltr_list_entry f_entry;
5392 f_entry.fltr_info = itr->fltr_info;
5393 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
5394 itr->fltr_info.vsi_handle == vsi_handle) {
5395 /* update the src in case it is VSI num */
5396 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
5397 f_entry.fltr_info.src = hw_vsi_id;
5398 status = ice_add_rule_internal(hw, recp_id, &f_entry);
5403 if (!itr->vsi_list_info ||
5404 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
5406 /* Clearing it so that the logic can add it back */
5407 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
5408 f_entry.fltr_info.vsi_handle = vsi_handle;
5409 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
5410 /* update the src in case it is VSI num */
5411 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
5412 f_entry.fltr_info.src = hw_vsi_id;
5413 if (recp_id == ICE_SW_LKUP_VLAN)
5414 status = ice_add_vlan_internal(hw, &f_entry);
5416 status = ice_add_rule_internal(hw, recp_id, &f_entry);
5425 * ice_adv_rem_update_vsi_list
5426 * @hw: pointer to the hardware structure
5427 * @vsi_handle: VSI handle of the VSI to remove
5428 * @fm_list: filter management entry for which the VSI list management needs to
5432 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
5433 struct ice_adv_fltr_mgmt_list_entry *fm_list)
5435 struct ice_vsi_list_map_info *vsi_list_info;
5436 enum ice_sw_lkup_type lkup_type;
5440 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
5441 fm_list->vsi_count == 0)
5444 /* A rule with the VSI being removed does not exist */
5445 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
5448 lkup_type = ICE_SW_LKUP_LAST;
5449 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
5450 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
5451 ice_aqc_opc_update_sw_rules,
5456 fm_list->vsi_count--;
5457 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
5458 vsi_list_info = fm_list->vsi_list_info;
5459 if (fm_list->vsi_count == 1) {
5460 struct ice_fltr_info tmp_fltr;
5463 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
5465 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
5468 /* Make sure VSI list is empty before removing it below */
5469 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
5471 ice_aqc_opc_update_sw_rules,
5476 memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5477 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
5478 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
5479 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
5480 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5481 tmp_fltr.fwd_id.hw_vsi_id =
5482 ice_get_hw_vsi_num(hw, rem_vsi_handle);
5483 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
5484 ice_get_hw_vsi_num(hw, rem_vsi_handle);
5485 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
5487 /* Update the previous switch rule of "MAC forward to VSI" to
5488 * "MAC fwd to VSI list"
5490 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5492 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
5493 tmp_fltr.fwd_id.hw_vsi_id, status);
5496 fm_list->vsi_list_info->ref_cnt--;
5498 /* Remove the VSI list since it is no longer used */
5499 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
5501 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
5502 vsi_list_id, status);
5506 list_del(&vsi_list_info->list_entry);
5507 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
5508 fm_list->vsi_list_info = NULL;
5515 * ice_rem_adv_rule - removes existing advanced switch rule
5516 * @hw: pointer to the hardware structure
5517 * @lkups: information on the words that needs to be looked up. All words
5518 * together makes one recipe
5519 * @lkups_cnt: num of entries in the lkups array
5520 * @rinfo: Its the pointer to the rule information for the rule
5522 * This function can be used to remove 1 rule at a time. The lkups is
5523 * used to describe all the words that forms the "lookup" portion of the
5524 * rule. These words can span multiple protocols. Callers to this function
5525 * need to pass in a list of protocol headers with lookup information along
5526 * and mask that determines which words are valid from the given protocol
5527 * header. rinfo describes other information related to this rule such as
5528 * forwarding IDs, priority of this rule, etc.
5531 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5532 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
5534 struct ice_adv_fltr_mgmt_list_entry *list_elem;
5535 struct ice_prot_lkup_ext lkup_exts;
5536 bool remove_rule = false;
5537 struct mutex *rule_lock; /* Lock to protect filter rule list */
5538 u16 i, rid, vsi_handle;
5541 memset(&lkup_exts, 0, sizeof(lkup_exts));
5542 for (i = 0; i < lkups_cnt; i++) {
5545 if (lkups[i].type >= ICE_PROTOCOL_LAST)
5548 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
5553 /* Create any special protocol/offset pairs, such as looking at tunnel
5554 * bits by extracting metadata
5556 status = ice_add_special_words(rinfo, &lkup_exts);
5560 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
5561 /* If did not find a recipe that match the existing criteria */
5562 if (rid == ICE_MAX_NUM_RECIPES)
5565 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
5566 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5567 /* the rule is already removed */
5570 mutex_lock(rule_lock);
5571 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
5573 } else if (list_elem->vsi_count > 1) {
5574 remove_rule = false;
5575 vsi_handle = rinfo->sw_act.vsi_handle;
5576 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5578 vsi_handle = rinfo->sw_act.vsi_handle;
5579 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5581 mutex_unlock(rule_lock);
5584 if (list_elem->vsi_count == 0)
5587 mutex_unlock(rule_lock);
5589 struct ice_aqc_sw_rules_elem *s_rule;
5592 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5593 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
5596 s_rule->pdata.lkup_tx_rx.act = 0;
5597 s_rule->pdata.lkup_tx_rx.index =
5598 cpu_to_le16(list_elem->rule_info.fltr_rule_id);
5599 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
5600 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5602 ice_aqc_opc_remove_sw_rules, NULL);
5603 if (!status || status == -ENOENT) {
5604 struct ice_switch_info *sw = hw->switch_info;
5606 mutex_lock(rule_lock);
5607 list_del(&list_elem->list_entry);
5608 devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
5609 devm_kfree(ice_hw_to_dev(hw), list_elem);
5610 mutex_unlock(rule_lock);
5611 if (list_empty(&sw->recp_list[rid].filt_rules))
5612 sw->recp_list[rid].adv_rule = false;
5620 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
5621 * @hw: pointer to the hardware structure
5622 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
5624 * This function is used to remove 1 rule at a time. The removal is based on
5625 * the remove_entry parameter. This function will remove rule for a given
5626 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
5629 ice_rem_adv_rule_by_id(struct ice_hw *hw,
5630 struct ice_rule_query_data *remove_entry)
5632 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5633 struct list_head *list_head;
5634 struct ice_adv_rule_info rinfo;
5635 struct ice_switch_info *sw;
5637 sw = hw->switch_info;
5638 if (!sw->recp_list[remove_entry->rid].recp_created)
5640 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
5641 list_for_each_entry(list_itr, list_head, list_entry) {
5642 if (list_itr->rule_info.fltr_rule_id ==
5643 remove_entry->rule_id) {
5644 rinfo = list_itr->rule_info;
5645 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
5646 return ice_rem_adv_rule(hw, list_itr->lkups,
5647 list_itr->lkups_cnt, &rinfo);
5650 /* either list is empty or unable to find rule */
5655 * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
5657 * @hw: pointer to the hardware structure
5658 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
5660 * This function is used to remove all the rules for a given VSI and as soon
5661 * as removing a rule fails, it will return immediately with the error code,
5662 * else it will return success.
5664 int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
5666 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
5667 struct ice_vsi_list_map_info *map_info;
5668 struct ice_adv_rule_info rinfo;
5669 struct list_head *list_head;
5670 struct ice_switch_info *sw;
5674 sw = hw->switch_info;
5675 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
5676 if (!sw->recp_list[rid].recp_created)
5678 if (!sw->recp_list[rid].adv_rule)
5681 list_head = &sw->recp_list[rid].filt_rules;
5682 list_for_each_entry_safe(list_itr, tmp_entry, list_head,
5684 rinfo = list_itr->rule_info;
5686 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
5687 map_info = list_itr->vsi_list_info;
5691 if (!test_bit(vsi_handle, map_info->vsi_map))
5693 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
5697 rinfo.sw_act.vsi_handle = vsi_handle;
5698 status = ice_rem_adv_rule(hw, list_itr->lkups,
5699 list_itr->lkups_cnt, &rinfo);
5708 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
5709 * @hw: pointer to the hardware structure
5710 * @vsi_handle: driver VSI handle
5711 * @list_head: list for which filters need to be replayed
5713 * Replay the advanced rule for the given VSI.
5716 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
5717 struct list_head *list_head)
5719 struct ice_rule_query_data added_entry = { 0 };
5720 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
5723 if (list_empty(list_head))
5725 list_for_each_entry(adv_fltr, list_head, list_entry) {
5726 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
5727 u16 lk_cnt = adv_fltr->lkups_cnt;
5729 if (vsi_handle != rinfo->sw_act.vsi_handle)
5731 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
5740 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
5741 * @hw: pointer to the hardware structure
5742 * @vsi_handle: driver VSI handle
5744 * Replays filters for requested VSI via vsi_handle.
5746 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
5748 struct ice_switch_info *sw = hw->switch_info;
5752 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5753 struct list_head *head;
5755 head = &sw->recp_list[i].filt_replay_rules;
5756 if (!sw->recp_list[i].adv_rule)
5757 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
5759 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
5767 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
5768 * @hw: pointer to the HW struct
5770 * Deletes the filter replay rules.
5772 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
5774 struct ice_switch_info *sw = hw->switch_info;
5780 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5781 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
5782 struct list_head *l_head;
5784 l_head = &sw->recp_list[i].filt_replay_rules;
5785 if (!sw->recp_list[i].adv_rule)
5786 ice_rem_sw_rule_info(hw, l_head);
5788 ice_rem_adv_rule_info(hw, l_head);