1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019, Mellanox Technologies */
7 #include <linux/mlx5/driver.h>
8 #include <linux/refcount.h>
12 #include "mlx5_ifc_dr.h"
15 #define DR_RULE_MAX_STES 17
16 #define DR_ACTION_MAX_STES 5
17 #define WIRE_PORT 0xFFFF
18 #define DR_STE_SVLAN 0x1
19 #define DR_STE_CVLAN 0x2
20 #define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
22 #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
23 #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
24 #define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
26 enum mlx5dr_icm_chunk_size {
28 DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */
53 enum mlx5dr_icm_type {
55 DR_ICM_TYPE_MODIFY_ACTION,
58 static inline enum mlx5dr_icm_chunk_size
59 mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk)
62 if (chunk < DR_CHUNK_SIZE_MAX)
65 return DR_CHUNK_SIZE_MAX;
70 DR_STE_SIZE_CTRL = 32,
72 DR_STE_SIZE_MASK = 16,
76 DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
80 DR_MODIFY_ACTION_SIZE = 8,
83 enum mlx5dr_matcher_criteria {
84 DR_MATCHER_CRITERIA_EMPTY = 0,
85 DR_MATCHER_CRITERIA_OUTER = 1 << 0,
86 DR_MATCHER_CRITERIA_MISC = 1 << 1,
87 DR_MATCHER_CRITERIA_INNER = 1 << 2,
88 DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
89 DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
90 DR_MATCHER_CRITERIA_MAX = 1 << 5,
93 enum mlx5dr_action_type {
94 DR_ACTION_TYP_TNL_L2_TO_L2,
95 DR_ACTION_TYP_L2_TO_TNL_L2,
96 DR_ACTION_TYP_TNL_L3_TO_L2,
97 DR_ACTION_TYP_L2_TO_TNL_L3,
103 DR_ACTION_TYP_MODIFY_HDR,
105 DR_ACTION_TYP_POP_VLAN,
106 DR_ACTION_TYP_PUSH_VLAN,
116 struct mlx5dr_icm_pool;
117 struct mlx5dr_icm_chunk;
118 struct mlx5dr_icm_buddy_mem;
119 struct mlx5dr_ste_htbl;
120 struct mlx5dr_match_param;
121 struct mlx5dr_cmd_caps;
122 struct mlx5dr_matcher_rx_tx;
126 /* refcount: indicates the num of rules that using this ste */
129 /* attached to the miss_list head at each htbl entry */
130 struct list_head miss_list_node;
132 /* each rule member that uses this ste attached here */
133 struct list_head rule_list;
135 /* this ste is member of htbl */
136 struct mlx5dr_ste_htbl *htbl;
138 struct mlx5dr_ste_htbl *next_htbl;
140 /* this ste is part of a rule, located in ste's chain */
141 u8 ste_chain_location;
144 struct mlx5dr_ste_htbl_ctrl {
145 /* total number of valid entries belonging to this hash table. This
146 * includes the non collision and collision entries
148 unsigned int num_of_valid_entries;
150 /* total number of collisions entries attached to this table */
151 unsigned int num_of_collisions;
152 unsigned int increase_threshold;
156 struct mlx5dr_ste_htbl {
160 struct mlx5dr_icm_chunk *chunk;
161 struct mlx5dr_ste *ste_arr;
164 struct list_head *miss_list;
166 enum mlx5dr_icm_chunk_size chunk_size;
167 struct mlx5dr_ste *pointing_ste;
169 struct mlx5dr_ste_htbl_ctrl ctrl;
172 struct mlx5dr_ste_send_info {
173 struct mlx5dr_ste *ste;
174 struct list_head send_list;
177 u8 data_cont[DR_STE_SIZE];
181 void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
182 u16 offset, u8 *data,
183 struct mlx5dr_ste_send_info *ste_info,
184 struct list_head *send_list,
187 struct mlx5dr_ste_build {
191 struct mlx5dr_domain *dmn;
192 struct mlx5dr_cmd_caps *caps;
195 u8 bit_mask[DR_STE_SIZE_MASK];
196 int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
197 struct mlx5dr_ste_build *sb,
201 struct mlx5dr_ste_htbl *
202 mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
203 enum mlx5dr_icm_chunk_size chunk_size,
204 u8 lu_type, u16 byte_mask);
206 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
208 static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
212 mlx5dr_ste_htbl_free(htbl);
215 static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
221 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
222 void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, u16 gvmi);
223 void mlx5dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
224 struct mlx5dr_ste_htbl *next_htbl);
225 void mlx5dr_ste_set_miss_addr(u8 *hw_ste, u64 miss_addr);
226 u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste);
227 void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi);
228 void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size);
229 void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr);
230 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
231 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
233 void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag);
234 void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id);
235 void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id,
236 int size, bool encap_l3);
237 void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p);
238 void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan);
239 void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p);
240 void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_tpid_pcp_dei_vid,
242 void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type);
243 u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p);
244 void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
246 void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p);
247 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
248 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
249 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
251 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
252 struct mlx5dr_matcher *matcher,
253 struct mlx5dr_matcher_rx_tx *nic_matcher);
254 static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
255 struct mlx5dr_matcher *matcher,
256 struct mlx5dr_matcher_rx_tx *nic_matcher)
260 mlx5dr_ste_free(ste, matcher, nic_matcher);
263 /* initial as 0, increased only when ste appears in a new rule */
264 static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
269 static inline bool mlx5dr_ste_is_not_used(struct mlx5dr_ste *ste)
271 return !ste->refcount;
274 void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
275 struct mlx5dr_ste_htbl *next_htbl);
276 bool mlx5dr_ste_equal_tag(void *src, void *dst);
277 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
278 struct mlx5dr_matcher_rx_tx *nic_matcher,
279 struct mlx5dr_ste *ste,
281 enum mlx5dr_icm_chunk_size log_table_size);
283 /* STE build functions */
284 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
286 struct mlx5dr_match_param *mask,
287 struct mlx5dr_match_param *value);
288 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
289 struct mlx5dr_matcher_rx_tx *nic_matcher,
290 struct mlx5dr_match_param *value,
292 void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *builder,
293 struct mlx5dr_match_param *mask,
294 bool inner, bool rx);
295 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
296 struct mlx5dr_match_param *mask,
297 bool inner, bool rx);
298 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
299 struct mlx5dr_match_param *mask,
300 bool inner, bool rx);
301 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
302 struct mlx5dr_match_param *mask,
303 bool inner, bool rx);
304 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
305 struct mlx5dr_match_param *mask,
306 bool inner, bool rx);
307 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
308 struct mlx5dr_match_param *mask,
309 bool inner, bool rx);
310 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
311 struct mlx5dr_match_param *mask,
312 bool inner, bool rx);
313 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
314 struct mlx5dr_match_param *mask,
315 bool inner, bool rx);
316 void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
317 struct mlx5dr_match_param *mask,
318 bool inner, bool rx);
319 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
320 struct mlx5dr_match_param *mask,
321 bool inner, bool rx);
322 void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb,
323 struct mlx5dr_match_param *mask,
324 bool inner, bool rx);
325 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
326 struct mlx5dr_match_param *mask,
327 bool inner, bool rx);
328 void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb,
329 struct mlx5dr_match_param *mask,
330 bool inner, bool rx);
331 int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb,
332 struct mlx5dr_match_param *mask,
333 struct mlx5dr_cmd_caps *caps,
334 bool inner, bool rx);
335 void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
336 struct mlx5dr_match_param *mask,
337 bool inner, bool rx);
338 void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb,
339 struct mlx5dr_match_param *mask,
340 bool inner, bool rx);
341 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
342 struct mlx5dr_match_param *mask,
343 bool inner, bool rx);
344 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
345 struct mlx5dr_match_param *mask,
346 bool inner, bool rx);
347 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
348 struct mlx5dr_match_param *mask,
349 bool inner, bool rx);
350 void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
351 struct mlx5dr_match_param *mask,
352 struct mlx5dr_domain *dmn,
353 bool inner, bool rx);
354 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
357 int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
358 struct mlx5dr_matcher_rx_tx *nic_matcher,
359 struct mlx5dr_action *actions[],
362 u32 *new_hw_ste_arr_sz);
364 struct mlx5dr_match_spec {
365 u32 smac_47_16; /* Source MAC address of incoming packet */
366 /* Incoming packet Ethertype - this is the Ethertype
367 * following the last VLAN tag of the packet
370 u32 smac_15_0:16; /* Source MAC address of incoming packet */
371 u32 dmac_47_16; /* Destination MAC address of incoming packet */
372 /* VLAN ID of first VLAN tag in the incoming packet.
373 * Valid only when cvlan_tag==1 or svlan_tag==1
376 /* CFI bit of first VLAN tag in the incoming packet.
377 * Valid only when cvlan_tag==1 or svlan_tag==1
380 /* Priority of first VLAN tag in the incoming packet.
381 * Valid only when cvlan_tag==1 or svlan_tag==1
384 u32 dmac_15_0:16; /* Destination MAC address of incoming packet */
385 /* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
386 * Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
389 u32 ip_version:4; /* IP version */
390 u32 frag:1; /* Packet is an IP fragment */
391 /* The first vlan in the packet is s-vlan (0x8a88).
392 * cvlan_tag and svlan_tag cannot be set together
395 /* The first vlan in the packet is c-vlan (0x8100).
396 * cvlan_tag and svlan_tag cannot be set together
399 /* Explicit Congestion Notification derived from
400 * Traffic Class/TOS field of IPv6/v4
403 /* Differentiated Services Code Point derived from
404 * Traffic Class/TOS field of IPv6/v4
407 u32 ip_protocol:8; /* IP protocol */
408 /* TCP destination port.
409 * tcp and udp sport/dport are mutually exclusive
412 /* TCP source port.;tcp and udp sport/dport are mutually exclusive */
416 /* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
418 /* UDP source port.;tcp and udp sport/dport are mutually exclusive */
420 /* IPv6 source address of incoming packets
421 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
422 * This field should be qualified by an appropriate ethertype
425 /* IPv6 source address of incoming packets
426 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
427 * This field should be qualified by an appropriate ethertype
430 /* IPv6 source address of incoming packets
431 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
432 * This field should be qualified by an appropriate ethertype
435 /* IPv6 source address of incoming packets
436 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
437 * This field should be qualified by an appropriate ethertype
440 /* IPv6 destination address of incoming packets
441 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
442 * This field should be qualified by an appropriate ethertype
445 /* IPv6 destination address of incoming packets
446 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
447 * This field should be qualified by an appropriate ethertype
450 /* IPv6 destination address of incoming packets
451 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
452 * This field should be qualified by an appropriate ethertype
455 /* IPv6 destination address of incoming packets
456 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
457 * This field should be qualified by an appropriate ethertype
462 struct mlx5dr_match_misc {
463 u32 source_sqn:24; /* Source SQN */
464 u32 source_vhca_port:4;
465 /* used with GRE, sequence number exist when gre_s_present == 1 */
467 /* used with GRE, key exist when gre_k_present == 1 */
469 u32 reserved_auto1:1;
470 /* used with GRE, checksum exist when gre_c_present == 1 */
472 /* Source port.;0xffff determines wire port */
474 u32 source_eswitch_owner_vhca_id:16;
475 /* VLAN ID of first VLAN tag the inner header of the incoming packet.
476 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
478 u32 inner_second_vid:12;
479 /* CFI bit of first VLAN tag in the inner header of the incoming packet.
480 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
482 u32 inner_second_cfi:1;
483 /* Priority of second VLAN tag in the inner header of the incoming packet.
484 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
486 u32 inner_second_prio:3;
487 /* VLAN ID of first VLAN tag the outer header of the incoming packet.
488 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
490 u32 outer_second_vid:12;
491 /* CFI bit of first VLAN tag in the outer header of the incoming packet.
492 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
494 u32 outer_second_cfi:1;
495 /* Priority of second VLAN tag in the outer header of the incoming packet.
496 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
498 u32 outer_second_prio:3;
499 u32 gre_protocol:16; /* GRE Protocol (outer) */
500 u32 reserved_auto3:12;
501 /* The second vlan in the inner header of the packet is s-vlan (0x8a88).
502 * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
504 u32 inner_second_svlan_tag:1;
505 /* The second vlan in the outer header of the packet is s-vlan (0x8a88).
506 * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
508 u32 outer_second_svlan_tag:1;
509 /* The second vlan in the inner header of the packet is c-vlan (0x8100).
510 * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
512 u32 inner_second_cvlan_tag:1;
513 /* The second vlan in the outer header of the packet is c-vlan (0x8100).
514 * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
516 u32 outer_second_cvlan_tag:1;
517 u32 gre_key_l:8; /* GRE Key [7:0] (outer) */
518 u32 gre_key_h:24; /* GRE Key[31:8] (outer) */
519 u32 reserved_auto4:8;
520 u32 vxlan_vni:24; /* VXLAN VNI (outer) */
521 u32 geneve_oam:1; /* GENEVE OAM field (outer) */
522 u32 reserved_auto5:7;
523 u32 geneve_vni:24; /* GENEVE VNI field (outer) */
524 u32 outer_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (outer) */
525 u32 reserved_auto6:12;
526 u32 inner_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (inner) */
527 u32 reserved_auto7:12;
528 u32 geneve_protocol_type:16; /* GENEVE protocol type (outer) */
529 u32 geneve_opt_len:6; /* GENEVE OptLen (outer) */
530 u32 reserved_auto8:10;
531 u32 bth_dst_qp:24; /* Destination QP in BTH header */
532 u32 reserved_auto9:8;
533 u8 reserved_auto10[20];
536 struct mlx5dr_match_misc2 {
537 u32 outer_first_mpls_ttl:8; /* First MPLS TTL (outer) */
538 u32 outer_first_mpls_s_bos:1; /* First MPLS S_BOS (outer) */
539 u32 outer_first_mpls_exp:3; /* First MPLS EXP (outer) */
540 u32 outer_first_mpls_label:20; /* First MPLS LABEL (outer) */
541 u32 inner_first_mpls_ttl:8; /* First MPLS TTL (inner) */
542 u32 inner_first_mpls_s_bos:1; /* First MPLS S_BOS (inner) */
543 u32 inner_first_mpls_exp:3; /* First MPLS EXP (inner) */
544 u32 inner_first_mpls_label:20; /* First MPLS LABEL (inner) */
545 u32 outer_first_mpls_over_gre_ttl:8; /* last MPLS TTL (outer) */
546 u32 outer_first_mpls_over_gre_s_bos:1; /* last MPLS S_BOS (outer) */
547 u32 outer_first_mpls_over_gre_exp:3; /* last MPLS EXP (outer) */
548 u32 outer_first_mpls_over_gre_label:20; /* last MPLS LABEL (outer) */
549 u32 outer_first_mpls_over_udp_ttl:8; /* last MPLS TTL (outer) */
550 u32 outer_first_mpls_over_udp_s_bos:1; /* last MPLS S_BOS (outer) */
551 u32 outer_first_mpls_over_udp_exp:3; /* last MPLS EXP (outer) */
552 u32 outer_first_mpls_over_udp_label:20; /* last MPLS LABEL (outer) */
553 u32 metadata_reg_c_7; /* metadata_reg_c_7 */
554 u32 metadata_reg_c_6; /* metadata_reg_c_6 */
555 u32 metadata_reg_c_5; /* metadata_reg_c_5 */
556 u32 metadata_reg_c_4; /* metadata_reg_c_4 */
557 u32 metadata_reg_c_3; /* metadata_reg_c_3 */
558 u32 metadata_reg_c_2; /* metadata_reg_c_2 */
559 u32 metadata_reg_c_1; /* metadata_reg_c_1 */
560 u32 metadata_reg_c_0; /* metadata_reg_c_0 */
561 u32 metadata_reg_a; /* metadata_reg_a */
562 u8 reserved_auto2[12];
565 struct mlx5dr_match_misc3 {
566 u32 inner_tcp_seq_num;
567 u32 outer_tcp_seq_num;
568 u32 inner_tcp_ack_num;
569 u32 outer_tcp_ack_num;
570 u32 outer_vxlan_gpe_vni:24;
571 u32 reserved_auto1:8;
572 u32 reserved_auto2:16;
573 u32 outer_vxlan_gpe_flags:8;
574 u32 outer_vxlan_gpe_next_protocol:8;
575 u32 icmpv4_header_data;
576 u32 icmpv6_header_data;
581 u8 reserved_auto3[0x1c];
584 struct mlx5dr_match_param {
585 struct mlx5dr_match_spec outer;
586 struct mlx5dr_match_misc misc;
587 struct mlx5dr_match_spec inner;
588 struct mlx5dr_match_misc2 misc2;
589 struct mlx5dr_match_misc3 misc3;
592 #define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
593 (_misc3)->icmpv4_code || \
594 (_misc3)->icmpv4_header_data)
596 struct mlx5dr_esw_caps {
597 u64 drop_icm_address_rx;
598 u64 drop_icm_address_tx;
599 u64 uplink_icm_address_rx;
600 u64 uplink_icm_address_tx;
604 struct mlx5dr_cmd_vport_cap {
612 struct mlx5dr_cmd_caps {
614 u64 nic_rx_drop_address;
615 u64 nic_tx_drop_address;
616 u64 nic_tx_allow_address;
617 u64 esw_rx_drop_address;
618 u64 esw_tx_drop_address;
620 u64 hdr_modify_icm_addr;
622 u8 flex_parser_id_icmp_dw0;
623 u8 flex_parser_id_icmp_dw1;
624 u8 flex_parser_id_icmpv6_dw0;
625 u8 flex_parser_id_icmpv6_dw1;
627 u16 roce_min_src_udp;
630 bool eswitch_manager;
635 struct mlx5dr_esw_caps esw_caps;
636 struct mlx5dr_cmd_vport_cap *vports_caps;
637 bool prio_tag_required;
640 struct mlx5dr_domain_rx_tx {
642 u64 default_icm_addr;
643 enum mlx5dr_ste_entry_type ste_type;
644 struct mutex mutex; /* protect rx/tx domain */
647 struct mlx5dr_domain_info {
648 bool supp_sw_steering;
651 u32 max_log_sw_icm_sz;
652 u32 max_log_action_icm_sz;
653 struct mlx5dr_domain_rx_tx rx;
654 struct mlx5dr_domain_rx_tx tx;
655 struct mlx5dr_cmd_caps caps;
658 struct mlx5dr_domain_cache {
659 struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft;
662 struct mlx5dr_domain {
663 struct mlx5dr_domain *peer_dmn;
664 struct mlx5_core_dev *mdev;
666 struct mlx5_uars_page *uar;
667 enum mlx5dr_domain_type type;
669 struct mlx5dr_icm_pool *ste_icm_pool;
670 struct mlx5dr_icm_pool *action_icm_pool;
671 struct mlx5dr_send_ring *send_ring;
672 struct mlx5dr_domain_info info;
673 struct mlx5dr_domain_cache cache;
676 struct mlx5dr_table_rx_tx {
677 struct mlx5dr_ste_htbl *s_anchor;
678 struct mlx5dr_domain_rx_tx *nic_dmn;
679 u64 default_icm_addr;
682 struct mlx5dr_table {
683 struct mlx5dr_domain *dmn;
684 struct mlx5dr_table_rx_tx rx;
685 struct mlx5dr_table_rx_tx tx;
690 struct list_head matcher_list;
691 struct mlx5dr_action *miss_action;
695 struct mlx5dr_matcher_rx_tx {
696 struct mlx5dr_ste_htbl *s_htbl;
697 struct mlx5dr_ste_htbl *e_anchor;
698 struct mlx5dr_ste_build *ste_builder;
699 struct mlx5dr_ste_build ste_builder_arr[DR_RULE_IPV_MAX]
703 u8 num_of_builders_arr[DR_RULE_IPV_MAX][DR_RULE_IPV_MAX];
704 u64 default_icm_addr;
705 struct mlx5dr_table_rx_tx *nic_tbl;
708 struct mlx5dr_matcher {
709 struct mlx5dr_table *tbl;
710 struct mlx5dr_matcher_rx_tx rx;
711 struct mlx5dr_matcher_rx_tx tx;
712 struct list_head matcher_list;
714 struct mlx5dr_match_param mask;
717 struct mlx5dv_flow_matcher *dv_matcher;
720 struct mlx5dr_rule_member {
721 struct mlx5dr_ste *ste;
722 /* attached to mlx5dr_rule via this */
723 struct list_head list;
724 /* attached to mlx5dr_ste via this */
725 struct list_head use_ste_list;
728 struct mlx5dr_action {
729 enum mlx5dr_action_type action_type;
733 struct mlx5dr_domain *dmn;
734 struct mlx5dr_icm_chunk *chunk;
743 struct mlx5dr_domain *dmn;
750 struct mlx5dr_table *tbl;
752 struct mlx5dr_domain *dmn;
755 enum fs_flow_table_type type;
758 struct mlx5dr_action **ref_actions;
759 u32 num_of_ref_actions;
768 struct mlx5dr_domain *dmn;
769 struct mlx5dr_cmd_vport_cap *caps;
772 u32 vlan_hdr; /* tpid_pcp_dei_vid */
778 enum mlx5dr_connect_type {
783 struct mlx5dr_htbl_connect_info {
784 enum mlx5dr_connect_type type;
786 struct mlx5dr_ste_htbl *hit_next_htbl;
791 struct mlx5dr_rule_rx_tx {
792 struct list_head rule_members_list;
793 struct mlx5dr_matcher_rx_tx *nic_matcher;
797 struct mlx5dr_matcher *matcher;
798 struct mlx5dr_rule_rx_tx rx;
799 struct mlx5dr_rule_rx_tx tx;
800 struct list_head rule_actions_list;
804 void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste,
805 struct mlx5dr_ste *ste);
807 struct mlx5dr_icm_chunk {
808 struct mlx5dr_icm_buddy_mem *buddy_mem;
809 struct list_head chunk_list;
816 /* indicates the index of this chunk in the whole memory,
817 * used for deleting the chunk from the buddy
821 /* Memory optimisation */
822 struct mlx5dr_ste *ste_arr;
824 struct list_head *miss_list;
827 static inline void mlx5dr_domain_nic_lock(struct mlx5dr_domain_rx_tx *nic_dmn)
829 mutex_lock(&nic_dmn->mutex);
832 static inline void mlx5dr_domain_nic_unlock(struct mlx5dr_domain_rx_tx *nic_dmn)
834 mutex_unlock(&nic_dmn->mutex);
837 static inline void mlx5dr_domain_lock(struct mlx5dr_domain *dmn)
839 mlx5dr_domain_nic_lock(&dmn->info.rx);
840 mlx5dr_domain_nic_lock(&dmn->info.tx);
843 static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn)
845 mlx5dr_domain_nic_unlock(&dmn->info.tx);
846 mlx5dr_domain_nic_unlock(&dmn->info.rx);
849 int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
850 struct mlx5dr_matcher_rx_tx *nic_matcher,
851 enum mlx5dr_ipv outer_ipv,
852 enum mlx5dr_ipv inner_ipv);
855 mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type)
857 if (icm_type == DR_ICM_TYPE_STE)
860 return DR_MODIFY_ACTION_SIZE;
864 mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
866 return 1 << chunk_size;
870 mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,
871 enum mlx5dr_icm_type icm_type)
876 entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(icm_type);
877 num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
879 return entry_size * num_of_entries;
882 static inline struct mlx5dr_cmd_vport_cap *
883 mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport)
885 if (!caps->vports_caps ||
886 (vport >= caps->num_vports && vport != WIRE_PORT))
889 if (vport == WIRE_PORT)
890 vport = caps->num_vports;
892 return &caps->vports_caps[vport];
895 struct mlx5dr_cmd_query_flow_table_details {
898 u64 sw_owner_icm_root_1;
899 u64 sw_owner_icm_root_0;
902 struct mlx5dr_cmd_create_flow_table_attr {
913 /* internal API functions */
914 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
915 struct mlx5dr_cmd_caps *caps);
916 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
917 bool other_vport, u16 vport_number,
919 u64 *icm_address_tx);
920 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev,
921 bool other_vport, u16 vport_number, u16 *gvmi);
922 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
923 struct mlx5dr_esw_caps *caps);
924 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev);
925 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
929 u32 modify_header_id,
931 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
934 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
938 u32 *modify_header_id);
939 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
940 u32 modify_header_id);
941 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
945 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
949 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
950 struct mlx5dr_cmd_create_flow_table_attr *attr,
951 u64 *fdb_rx_icm_addr,
953 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
956 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
957 enum fs_flow_table_type type,
959 struct mlx5dr_cmd_query_flow_table_details *output);
960 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
961 enum mlx5_reformat_ctx_type rt,
962 size_t reformat_size,
965 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
968 struct mlx5dr_cmd_gid_attr {
974 struct mlx5dr_cmd_qp_create_attr {
987 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
988 u16 index, struct mlx5dr_cmd_gid_attr *attr);
990 struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
991 enum mlx5dr_icm_type icm_type);
992 void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
994 struct mlx5dr_icm_chunk *
995 mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
996 enum mlx5dr_icm_chunk_size chunk_size);
997 void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
998 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
999 struct mlx5dr_domain_rx_tx *nic_dmn,
1000 struct mlx5dr_ste_htbl *htbl,
1001 struct mlx5dr_htbl_connect_info *connect_info,
1002 bool update_hw_ste);
1003 void mlx5dr_ste_set_formatted_ste(u16 gvmi,
1004 struct mlx5dr_domain_rx_tx *nic_dmn,
1005 struct mlx5dr_ste_htbl *htbl,
1007 struct mlx5dr_htbl_connect_info *connect_info);
1008 void mlx5dr_ste_copy_param(u8 match_criteria,
1009 struct mlx5dr_match_param *set_param,
1010 struct mlx5dr_match_parameters *mask);
1013 struct mlx5_core_dev *mdev;
1014 struct mlx5_wq_qp wq;
1015 struct mlx5_uars_page *uar;
1016 struct mlx5_wq_ctrl wq_ctrl;
1022 unsigned int *wqe_head;
1023 unsigned int wqe_cnt;
1029 unsigned int wqe_cnt;
1031 int max_inline_data;
1035 struct mlx5_core_dev *mdev;
1036 struct mlx5_cqwq wq;
1037 struct mlx5_wq_ctrl wq_ctrl;
1038 struct mlx5_core_cq mcq;
1039 struct mlx5dr_qp *qp;
1043 struct mlx5_core_dev *mdev;
1044 struct mlx5_core_mkey mkey;
1045 dma_addr_t dma_addr;
1050 #define MAX_SEND_CQE 64
1051 #define MIN_READ_SYNC 64
1053 struct mlx5dr_send_ring {
1054 struct mlx5dr_cq *cq;
1055 struct mlx5dr_qp *qp;
1056 struct mlx5dr_mr *mr;
1057 /* How much wqes are waiting for completion */
1059 /* Signal request per this trash hold value */
1061 /* Each post_send_size less than max_post_send_size */
1062 u32 max_post_send_size;
1063 /* manage the send queue */
1067 struct ib_wc wc[MAX_SEND_CQE];
1068 u8 sync_buff[MIN_READ_SYNC];
1069 struct mlx5dr_mr *sync_mr;
1070 spinlock_t lock; /* Protect the data path of the send ring */
1073 int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
1074 void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
1075 struct mlx5dr_send_ring *send_ring);
1076 int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn);
1077 int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn,
1078 struct mlx5dr_ste *ste,
1082 int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
1083 struct mlx5dr_ste_htbl *htbl,
1084 u8 *formatted_ste, u8 *mask);
1085 int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
1086 struct mlx5dr_ste_htbl *htbl,
1088 bool update_hw_ste);
1089 int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
1090 struct mlx5dr_action *action);
1092 struct mlx5dr_cmd_ft_info {
1095 enum fs_flow_table_type type;
1098 struct mlx5dr_cmd_flow_destination_hw_info {
1099 enum mlx5_flow_destination_type type;
1114 struct mlx5dr_cmd_fte_info {
1117 struct mlx5_flow_context flow_context;
1119 struct mlx5_flow_act action;
1120 struct mlx5dr_cmd_flow_destination_hw_info *dest_arr;
1123 int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
1124 int opmod, int modify_mask,
1125 struct mlx5dr_cmd_ft_info *ft,
1127 struct mlx5dr_cmd_fte_info *fte);
1129 struct mlx5dr_fw_recalc_cs_ft {
1136 struct mlx5dr_fw_recalc_cs_ft *
1137 mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num);
1138 void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
1139 struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
1140 int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
1143 int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
1144 struct mlx5dr_cmd_flow_destination_hw_info *dest,
1149 void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
1151 #endif /* _DR_TYPES_H_ */