1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
4 #include <linux/types.h>
5 #include <linux/crc32.h>
8 #define DR_STE_CRC_POLY 0xEDB88320L
14 #define IP_VERSION_IPV4 0x4
15 #define IP_VERSION_IPV6 0x6
19 #define DR_STE_ENABLE_FLOW_TAG BIT(31)
21 /* Set to STE a specific value using DR_STE_SET */
22 #define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
23 if ((spec)->s_fname) { \
24 MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
25 (spec)->s_fname = 0; \
29 /* Set to STE spec->s_fname to tag->t_fname */
30 #define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
31 DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
33 /* Set to STE -1 to bit_mask->bm_fname and set spec->s_fname as used */
34 #define DR_STE_SET_MASK(lookup_type, bit_mask, bm_fname, spec, s_fname) \
35 DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, -1)
37 /* Set to STE spec->s_fname to bit_mask->bm_fname and set spec->s_fname as used */
38 #define DR_STE_SET_MASK_V(lookup_type, bit_mask, bm_fname, spec, s_fname) \
39 DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, (spec)->s_fname)
41 #define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
42 MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
43 MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
44 MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
45 MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
46 MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
47 MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
48 MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
49 MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
50 MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
53 #define DR_STE_SET_MPLS_MASK(lookup_type, mask, in_out, bit_mask) do { \
54 DR_STE_SET_MASK_V(lookup_type, mask, mpls0_label, mask, \
55 in_out##_first_mpls_label);\
56 DR_STE_SET_MASK_V(lookup_type, mask, mpls0_s_bos, mask, \
57 in_out##_first_mpls_s_bos); \
58 DR_STE_SET_MASK_V(lookup_type, mask, mpls0_exp, mask, \
59 in_out##_first_mpls_exp); \
60 DR_STE_SET_MASK_V(lookup_type, mask, mpls0_ttl, mask, \
61 in_out##_first_mpls_ttl); \
64 #define DR_STE_SET_MPLS_TAG(lookup_type, mask, in_out, tag) do { \
65 DR_STE_SET_TAG(lookup_type, tag, mpls0_label, mask, \
66 in_out##_first_mpls_label);\
67 DR_STE_SET_TAG(lookup_type, tag, mpls0_s_bos, mask, \
68 in_out##_first_mpls_s_bos); \
69 DR_STE_SET_TAG(lookup_type, tag, mpls0_exp, mask, \
70 in_out##_first_mpls_exp); \
71 DR_STE_SET_TAG(lookup_type, tag, mpls0_ttl, mask, \
72 in_out##_first_mpls_ttl); \
75 #define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
76 (_misc)->outer_first_mpls_over_gre_label || \
77 (_misc)->outer_first_mpls_over_gre_exp || \
78 (_misc)->outer_first_mpls_over_gre_s_bos || \
79 (_misc)->outer_first_mpls_over_gre_ttl)
80 #define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
81 (_misc)->outer_first_mpls_over_udp_label || \
82 (_misc)->outer_first_mpls_over_udp_exp || \
83 (_misc)->outer_first_mpls_over_udp_s_bos || \
84 (_misc)->outer_first_mpls_over_udp_ttl)
86 #define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
87 ((inner) ? MLX5DR_STE_LU_TYPE_##lookup_type##_I : \
88 (rx) ? MLX5DR_STE_LU_TYPE_##lookup_type##_D : \
89 MLX5DR_STE_LU_TYPE_##lookup_type##_O)
91 enum dr_ste_tunl_action {
92 DR_STE_TUNL_ACTION_NONE = 0,
93 DR_STE_TUNL_ACTION_ENABLE = 1,
94 DR_STE_TUNL_ACTION_DECAP = 2,
95 DR_STE_TUNL_ACTION_L3_DECAP = 3,
96 DR_STE_TUNL_ACTION_POP_VLAN = 4,
99 enum dr_ste_action_type {
100 DR_STE_ACTION_TYPE_PUSH_VLAN = 1,
101 DR_STE_ACTION_TYPE_ENCAP_L3 = 3,
102 DR_STE_ACTION_TYPE_ENCAP = 4,
105 struct dr_hw_ste_format {
106 u8 ctrl[DR_STE_SIZE_CTRL];
107 u8 tag[DR_STE_SIZE_TAG];
108 u8 mask[DR_STE_SIZE_MASK];
111 static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
113 u32 crc = crc32(0, input_data, length);
118 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
120 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
121 u8 masked[DR_STE_SIZE_TAG] = {};
126 /* Don't calculate CRC if the result is predicted */
127 if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
130 /* Mask tag using byte mask, bit per byte */
131 bit = 1 << (DR_STE_SIZE_TAG - 1);
132 for (i = 0; i < DR_STE_SIZE_TAG; i++) {
133 if (htbl->byte_mask & bit)
134 masked[i] = hw_ste->tag[i];
139 crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
140 index = crc32 & (htbl->chunk->num_of_entries - 1);
145 static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
150 for (i = 0; i < DR_STE_SIZE_MASK; i++) {
151 byte_mask = byte_mask << 1;
152 if (bit_mask[i] == 0xff)
158 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
160 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
162 memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
165 void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
167 MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
168 DR_STE_ENABLE_FLOW_TAG | flow_tag);
171 void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
173 /* This can be used for both rx_steering_mult and for sx_transmit */
174 MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
175 MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
178 void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p)
180 MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
183 void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
186 MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
187 DR_STE_ACTION_TYPE_PUSH_VLAN);
188 MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
189 /* Due to HW limitation we need to set this bit, otherwise reforamt +
190 * push vlan will not work.
193 mlx5dr_ste_set_go_back_bit(hw_ste_p);
196 void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, int size, bool encap_l3)
198 MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
199 encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
200 /* The hardware expects here size in words (2 byte) */
201 MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
202 MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
205 void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p)
207 MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
208 DR_STE_TUNL_ACTION_DECAP);
211 void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p)
213 MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
214 DR_STE_TUNL_ACTION_POP_VLAN);
217 void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
219 MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
220 DR_STE_TUNL_ACTION_L3_DECAP);
221 MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
224 void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type)
226 MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
229 u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p)
231 return MLX5_GET(ste_general, hw_ste_p, entry_type);
234 void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
237 MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
239 MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
243 void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
245 MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
248 void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type,
251 MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
252 MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
253 MLX5_SET(ste_general, hw_ste_p, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
255 /* Set GVMI once, this is the same for RX/TX
256 * bits 63_48 of next table base / miss address encode the next GVMI
258 MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
259 MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
260 MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
263 static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
265 memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
266 memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
269 static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
271 hw_ste->tag[0] = 0xdc;
275 u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste)
278 (MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_31_6) |
279 MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_39_32) << 26);
284 void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size)
286 u64 index = (icm_addr >> 5) | ht_size;
288 MLX5_SET(ste_general, hw_ste, next_table_base_39_32_size, index >> 27);
289 MLX5_SET(ste_general, hw_ste, next_table_base_31_5_size, index);
292 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
294 u32 index = ste - ste->htbl->ste_arr;
296 return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
299 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
301 u32 index = ste - ste->htbl->ste_arr;
303 return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
306 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
308 u32 index = ste - ste->htbl->ste_arr;
310 return &ste->htbl->miss_list[index];
313 static void dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
314 struct mlx5dr_ste_htbl *next_htbl)
316 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
317 u8 *hw_ste = ste->hw_ste;
319 MLX5_SET(ste_general, hw_ste, byte_mask, next_htbl->byte_mask);
320 MLX5_SET(ste_general, hw_ste, next_lu_type, next_htbl->lu_type);
321 mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
323 dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
326 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
329 return ste_location == nic_matcher->num_of_builders;
332 /* Replace relevant fields, except of:
333 * htbl - keep the origin htbl
334 * miss_list + list - already took the src from the list.
335 * icm_addr/mr_addr - depends on the hosting table.
338 * | a | -> | b | -> | c | ->
342 * While the data that was in b copied to a.
344 static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
346 memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
347 dst->next_htbl = src->next_htbl;
349 dst->next_htbl->pointing_ste = dst;
351 dst->refcount = src->refcount;
353 INIT_LIST_HEAD(&dst->rule_list);
354 list_splice_tail_init(&src->rule_list, &dst->rule_list);
357 /* Free ste which is the head and the only one in miss_list */
359 dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
360 struct mlx5dr_matcher_rx_tx *nic_matcher,
361 struct mlx5dr_ste_send_info *ste_info_head,
362 struct list_head *send_ste_list,
363 struct mlx5dr_ste_htbl *stats_tbl)
365 u8 tmp_data_ste[DR_STE_SIZE] = {};
366 struct mlx5dr_ste tmp_ste = {};
369 tmp_ste.hw_ste = tmp_data_ste;
371 /* Use temp ste because dr_ste_always_miss_addr
372 * touches bit_mask area which doesn't exist at ste->hw_ste.
374 memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
375 miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
376 mlx5dr_ste_always_miss_addr(&tmp_ste, miss_addr);
377 memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
379 list_del_init(&ste->miss_list_node);
381 /* Write full STE size in order to have "always_miss" */
382 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
386 true /* Copy data */);
388 stats_tbl->ctrl.num_of_valid_entries--;
391 /* Free ste which is the head but NOT the only one in miss_list:
392 * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
395 dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
396 struct mlx5dr_ste_send_info *ste_info_head,
397 struct list_head *send_ste_list,
398 struct mlx5dr_ste_htbl *stats_tbl)
401 struct mlx5dr_ste_htbl *next_miss_htbl;
403 next_miss_htbl = next_ste->htbl;
405 /* Remove from the miss_list the next_ste before copy */
406 list_del_init(&next_ste->miss_list_node);
408 /* All rule-members that use next_ste should know about that */
409 mlx5dr_rule_update_rule_member(next_ste, ste);
411 /* Move data from next into ste */
412 dr_ste_replace(ste, next_ste);
414 /* Del the htbl that contains the next_ste.
415 * The origin htbl stay with the same number of entries.
417 mlx5dr_htbl_put(next_miss_htbl);
419 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE_REDUCED,
423 true /* Copy data */);
425 stats_tbl->ctrl.num_of_collisions--;
426 stats_tbl->ctrl.num_of_valid_entries--;
429 /* Free ste that is located in the middle of the miss list:
430 * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
432 static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
433 struct mlx5dr_ste_send_info *ste_info,
434 struct list_head *send_ste_list,
435 struct mlx5dr_ste_htbl *stats_tbl)
437 struct mlx5dr_ste *prev_ste;
440 prev_ste = list_prev_entry(ste, miss_list_node);
441 if (WARN_ON(!prev_ste))
444 miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste);
445 mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr);
447 mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_REDUCED, 0,
448 prev_ste->hw_ste, ste_info,
449 send_ste_list, true /* Copy data*/);
451 list_del_init(&ste->miss_list_node);
453 stats_tbl->ctrl.num_of_valid_entries--;
454 stats_tbl->ctrl.num_of_collisions--;
457 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
458 struct mlx5dr_matcher *matcher,
459 struct mlx5dr_matcher_rx_tx *nic_matcher)
461 struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
462 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
463 struct mlx5dr_ste_send_info ste_info_head;
464 struct mlx5dr_ste *next_ste, *first_ste;
465 bool put_on_origin_table = true;
466 struct mlx5dr_ste_htbl *stats_tbl;
467 LIST_HEAD(send_ste_list);
469 first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
470 struct mlx5dr_ste, miss_list_node);
471 stats_tbl = first_ste->htbl;
475 * a. head ste is the only ste in the miss list
476 * b. head ste is not the only ste in the miss-list
479 if (first_ste == ste) { /* Ste is the head */
480 struct mlx5dr_ste *last_ste;
482 last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
483 struct mlx5dr_ste, miss_list_node);
484 if (last_ste == first_ste)
487 next_ste = list_next_entry(ste, miss_list_node);
490 /* One and only entry in the list */
491 dr_ste_remove_head_ste(ste, nic_matcher,
496 /* First but not only entry in the list */
497 dr_ste_replace_head_ste(ste, next_ste, &ste_info_head,
498 &send_ste_list, stats_tbl);
499 put_on_origin_table = false;
501 } else { /* Ste in the middle of the list */
502 dr_ste_remove_middle_ste(ste, &ste_info_head, &send_ste_list, stats_tbl);
506 list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
507 &send_ste_list, send_list) {
508 list_del(&cur_ste_info->send_list);
509 mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
510 cur_ste_info->data, cur_ste_info->size,
511 cur_ste_info->offset);
514 if (put_on_origin_table)
515 mlx5dr_htbl_put(ste->htbl);
518 bool mlx5dr_ste_equal_tag(void *src, void *dst)
520 struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
521 struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
523 return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
526 void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
527 struct mlx5dr_ste_htbl *next_htbl)
529 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
531 mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
534 void mlx5dr_ste_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
536 u64 index = miss_addr >> 6;
538 /* Miss address for TX and RX STEs located in the same offsets */
539 MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
540 MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
543 void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr)
545 u8 *hw_ste = ste->hw_ste;
547 MLX5_SET(ste_rx_steering_mult, hw_ste, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
548 mlx5dr_ste_set_miss_addr(hw_ste, miss_addr);
549 dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
552 /* The assumption here is that we don't update the ste->hw_ste if it is not
553 * used ste, so it will be all zero, checking the next_lu_type.
555 bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste)
557 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)p_hw_ste;
559 if (MLX5_GET(ste_general, hw_ste, next_lu_type) ==
560 MLX5DR_STE_LU_TYPE_NOP)
566 bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
568 return !ste->refcount;
571 /* Init one ste as a pattern for ste data array */
572 void mlx5dr_ste_set_formatted_ste(u16 gvmi,
573 struct mlx5dr_domain_rx_tx *nic_dmn,
574 struct mlx5dr_ste_htbl *htbl,
576 struct mlx5dr_htbl_connect_info *connect_info)
578 struct mlx5dr_ste ste = {};
580 mlx5dr_ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
581 ste.hw_ste = formatted_ste;
583 if (connect_info->type == CONNECT_HIT)
584 dr_ste_always_hit_htbl(&ste, connect_info->hit_next_htbl);
586 mlx5dr_ste_always_miss_addr(&ste, connect_info->miss_icm_addr);
589 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
590 struct mlx5dr_domain_rx_tx *nic_dmn,
591 struct mlx5dr_ste_htbl *htbl,
592 struct mlx5dr_htbl_connect_info *connect_info,
595 u8 formatted_ste[DR_STE_SIZE] = {};
597 mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
603 return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
606 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
607 struct mlx5dr_matcher_rx_tx *nic_matcher,
608 struct mlx5dr_ste *ste,
610 enum mlx5dr_icm_chunk_size log_table_size)
612 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)cur_hw_ste;
613 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
614 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
615 struct mlx5dr_htbl_connect_info info;
616 struct mlx5dr_ste_htbl *next_htbl;
618 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
622 next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
623 byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
625 next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
630 mlx5dr_dbg(dmn, "Failed allocating table\n");
634 /* Write new table to HW */
635 info.type = CONNECT_MISS;
636 info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
637 if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
639 mlx5dr_info(dmn, "Failed writing table to HW\n");
643 mlx5dr_ste_set_hit_addr_by_next_htbl(cur_hw_ste, next_htbl);
644 ste->next_htbl = next_htbl;
645 next_htbl->pointing_ste = ste;
651 mlx5dr_ste_htbl_free(next_htbl);
655 static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
657 struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
660 htbl->ctrl.may_grow = true;
662 if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
663 htbl->ctrl.may_grow = false;
665 /* Threshold is 50%, one is added to table of size 1 */
666 num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
667 ctrl->increase_threshold = (num_of_entries + 1) / 2;
670 struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
671 enum mlx5dr_icm_chunk_size chunk_size,
672 u8 lu_type, u16 byte_mask)
674 struct mlx5dr_icm_chunk *chunk;
675 struct mlx5dr_ste_htbl *htbl;
678 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
682 chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
687 htbl->lu_type = lu_type;
688 htbl->byte_mask = byte_mask;
689 htbl->ste_arr = chunk->ste_arr;
690 htbl->hw_ste_arr = chunk->hw_ste_arr;
691 htbl->miss_list = chunk->miss_list;
694 for (i = 0; i < chunk->num_of_entries; i++) {
695 struct mlx5dr_ste *ste = &htbl->ste_arr[i];
697 ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
700 INIT_LIST_HEAD(&ste->miss_list_node);
701 INIT_LIST_HEAD(&htbl->miss_list[i]);
702 INIT_LIST_HEAD(&ste->rule_list);
705 htbl->chunk_size = chunk_size;
706 dr_ste_set_ctrl(htbl);
714 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
719 mlx5dr_icm_free_chunk(htbl->chunk);
724 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
726 struct mlx5dr_match_param *mask,
727 struct mlx5dr_match_param *value)
729 if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
730 if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
731 mlx5dr_dbg(dmn, "Partial mask source_port is not supported\n");
739 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
740 struct mlx5dr_matcher_rx_tx *nic_matcher,
741 struct mlx5dr_match_param *value,
744 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
745 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
746 struct mlx5dr_ste_build *sb;
749 ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
750 &matcher->mask, value);
754 sb = nic_matcher->ste_builder;
755 for (i = 0; i < nic_matcher->num_of_builders; i++) {
756 mlx5dr_ste_init(ste_arr,
759 dmn->info.caps.gvmi);
761 mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
763 ret = sb->ste_build_tag_func(value, sb, ste_arr);
767 /* Connect the STEs */
768 if (i < (nic_matcher->num_of_builders - 1)) {
769 /* Need the next builder for these fields,
770 * not relevant for the last ste in the chain.
773 MLX5_SET(ste_general, ste_arr, next_lu_type, sb->lu_type);
774 MLX5_SET(ste_general, ste_arr, byte_mask, sb->byte_mask);
776 ste_arr += DR_STE_SIZE;
781 static int dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
782 bool inner, u8 *bit_mask)
784 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
786 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
787 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
789 if (mask->smac_47_16 || mask->smac_15_0) {
790 MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
791 mask->smac_47_16 >> 16);
792 MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
793 mask->smac_47_16 << 16 | mask->smac_15_0);
794 mask->smac_47_16 = 0;
798 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
799 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
800 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
801 DR_STE_SET_MASK(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
803 if (mask->cvlan_tag) {
804 MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
806 } else if (mask->svlan_tag) {
807 MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
811 if (mask->cvlan_tag || mask->svlan_tag) {
812 pr_info("Invalid c/svlan mask configuration\n");
819 static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
821 spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
822 spec->gre_k_present = MLX5_GET(fte_match_set_misc, mask, gre_k_present);
823 spec->gre_s_present = MLX5_GET(fte_match_set_misc, mask, gre_s_present);
824 spec->source_vhca_port = MLX5_GET(fte_match_set_misc, mask, source_vhca_port);
825 spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn);
827 spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port);
828 spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask,
829 source_eswitch_owner_vhca_id);
831 spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio);
832 spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi);
833 spec->outer_second_vid = MLX5_GET(fte_match_set_misc, mask, outer_second_vid);
834 spec->inner_second_prio = MLX5_GET(fte_match_set_misc, mask, inner_second_prio);
835 spec->inner_second_cfi = MLX5_GET(fte_match_set_misc, mask, inner_second_cfi);
836 spec->inner_second_vid = MLX5_GET(fte_match_set_misc, mask, inner_second_vid);
838 spec->outer_second_cvlan_tag =
839 MLX5_GET(fte_match_set_misc, mask, outer_second_cvlan_tag);
840 spec->inner_second_cvlan_tag =
841 MLX5_GET(fte_match_set_misc, mask, inner_second_cvlan_tag);
842 spec->outer_second_svlan_tag =
843 MLX5_GET(fte_match_set_misc, mask, outer_second_svlan_tag);
844 spec->inner_second_svlan_tag =
845 MLX5_GET(fte_match_set_misc, mask, inner_second_svlan_tag);
847 spec->gre_protocol = MLX5_GET(fte_match_set_misc, mask, gre_protocol);
849 spec->gre_key_h = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi);
850 spec->gre_key_l = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo);
852 spec->vxlan_vni = MLX5_GET(fte_match_set_misc, mask, vxlan_vni);
854 spec->geneve_vni = MLX5_GET(fte_match_set_misc, mask, geneve_vni);
855 spec->geneve_oam = MLX5_GET(fte_match_set_misc, mask, geneve_oam);
857 spec->outer_ipv6_flow_label =
858 MLX5_GET(fte_match_set_misc, mask, outer_ipv6_flow_label);
860 spec->inner_ipv6_flow_label =
861 MLX5_GET(fte_match_set_misc, mask, inner_ipv6_flow_label);
863 spec->geneve_opt_len = MLX5_GET(fte_match_set_misc, mask, geneve_opt_len);
864 spec->geneve_protocol_type =
865 MLX5_GET(fte_match_set_misc, mask, geneve_protocol_type);
867 spec->bth_dst_qp = MLX5_GET(fte_match_set_misc, mask, bth_dst_qp);
870 static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec)
874 spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16);
876 spec->smac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0);
877 spec->ethertype = MLX5_GET(fte_match_set_lyr_2_4, mask, ethertype);
879 spec->dmac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16);
881 spec->dmac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0);
882 spec->first_prio = MLX5_GET(fte_match_set_lyr_2_4, mask, first_prio);
883 spec->first_cfi = MLX5_GET(fte_match_set_lyr_2_4, mask, first_cfi);
884 spec->first_vid = MLX5_GET(fte_match_set_lyr_2_4, mask, first_vid);
886 spec->ip_protocol = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_protocol);
887 spec->ip_dscp = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_dscp);
888 spec->ip_ecn = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_ecn);
889 spec->cvlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, cvlan_tag);
890 spec->svlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, svlan_tag);
891 spec->frag = MLX5_GET(fte_match_set_lyr_2_4, mask, frag);
892 spec->ip_version = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_version);
893 spec->tcp_flags = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_flags);
894 spec->tcp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_sport);
895 spec->tcp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_dport);
897 spec->ttl_hoplimit = MLX5_GET(fte_match_set_lyr_2_4, mask, ttl_hoplimit);
899 spec->udp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_sport);
900 spec->udp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_dport);
902 memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
903 src_ipv4_src_ipv6.ipv6_layout.ipv6),
906 spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
907 spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
908 spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
909 spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
911 memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
912 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
915 spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
916 spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
917 spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
918 spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
921 static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec)
923 spec->outer_first_mpls_label =
924 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_label);
925 spec->outer_first_mpls_exp =
926 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp);
927 spec->outer_first_mpls_s_bos =
928 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos);
929 spec->outer_first_mpls_ttl =
930 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl);
931 spec->inner_first_mpls_label =
932 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_label);
933 spec->inner_first_mpls_exp =
934 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp);
935 spec->inner_first_mpls_s_bos =
936 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos);
937 spec->inner_first_mpls_ttl =
938 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl);
939 spec->outer_first_mpls_over_gre_label =
940 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label);
941 spec->outer_first_mpls_over_gre_exp =
942 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp);
943 spec->outer_first_mpls_over_gre_s_bos =
944 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos);
945 spec->outer_first_mpls_over_gre_ttl =
946 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl);
947 spec->outer_first_mpls_over_udp_label =
948 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label);
949 spec->outer_first_mpls_over_udp_exp =
950 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp);
951 spec->outer_first_mpls_over_udp_s_bos =
952 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos);
953 spec->outer_first_mpls_over_udp_ttl =
954 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl);
955 spec->metadata_reg_c_7 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_7);
956 spec->metadata_reg_c_6 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_6);
957 spec->metadata_reg_c_5 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_5);
958 spec->metadata_reg_c_4 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_4);
959 spec->metadata_reg_c_3 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_3);
960 spec->metadata_reg_c_2 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_2);
961 spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1);
962 spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0);
963 spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a);
964 spec->metadata_reg_b = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_b);
967 static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
969 spec->inner_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_seq_num);
970 spec->outer_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_seq_num);
971 spec->inner_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_ack_num);
972 spec->outer_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_ack_num);
973 spec->outer_vxlan_gpe_vni =
974 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_vni);
975 spec->outer_vxlan_gpe_next_protocol =
976 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol);
977 spec->outer_vxlan_gpe_flags =
978 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_flags);
979 spec->icmpv4_header_data = MLX5_GET(fte_match_set_misc3, mask, icmp_header_data);
980 spec->icmpv6_header_data =
981 MLX5_GET(fte_match_set_misc3, mask, icmpv6_header_data);
982 spec->icmpv4_type = MLX5_GET(fte_match_set_misc3, mask, icmp_type);
983 spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code);
984 spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type);
985 spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code);
988 void mlx5dr_ste_copy_param(u8 match_criteria,
989 struct mlx5dr_match_param *set_param,
990 struct mlx5dr_match_parameters *mask)
992 u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
993 u8 *data = (u8 *)mask->match_buf;
994 size_t param_location;
997 if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
998 if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
999 memcpy(tail_param, data, mask->match_sz);
1002 buff = mask->match_buf;
1004 dr_ste_copy_mask_spec(buff, &set_param->outer);
1006 param_location = sizeof(struct mlx5dr_match_spec);
1008 if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
1009 if (mask->match_sz < param_location +
1010 sizeof(struct mlx5dr_match_misc)) {
1011 memcpy(tail_param, data + param_location,
1012 mask->match_sz - param_location);
1015 buff = data + param_location;
1017 dr_ste_copy_mask_misc(buff, &set_param->misc);
1019 param_location += sizeof(struct mlx5dr_match_misc);
1021 if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
1022 if (mask->match_sz < param_location +
1023 sizeof(struct mlx5dr_match_spec)) {
1024 memcpy(tail_param, data + param_location,
1025 mask->match_sz - param_location);
1028 buff = data + param_location;
1030 dr_ste_copy_mask_spec(buff, &set_param->inner);
1032 param_location += sizeof(struct mlx5dr_match_spec);
1034 if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
1035 if (mask->match_sz < param_location +
1036 sizeof(struct mlx5dr_match_misc2)) {
1037 memcpy(tail_param, data + param_location,
1038 mask->match_sz - param_location);
1041 buff = data + param_location;
1043 dr_ste_copy_mask_misc2(buff, &set_param->misc2);
1046 param_location += sizeof(struct mlx5dr_match_misc2);
1048 if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
1049 if (mask->match_sz < param_location +
1050 sizeof(struct mlx5dr_match_misc3)) {
1051 memcpy(tail_param, data + param_location,
1052 mask->match_sz - param_location);
1055 buff = data + param_location;
1057 dr_ste_copy_mask_misc3(buff, &set_param->misc3);
1061 static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
1062 struct mlx5dr_ste_build *sb,
1065 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1066 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1067 u8 *tag = hw_ste->tag;
1069 DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
1070 DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
1072 if (spec->smac_47_16 || spec->smac_15_0) {
1073 MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
1074 spec->smac_47_16 >> 16);
1075 MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
1076 spec->smac_47_16 << 16 | spec->smac_15_0);
1077 spec->smac_47_16 = 0;
1078 spec->smac_15_0 = 0;
1081 if (spec->ip_version) {
1082 if (spec->ip_version == IP_VERSION_IPV4) {
1083 MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
1084 spec->ip_version = 0;
1085 } else if (spec->ip_version == IP_VERSION_IPV6) {
1086 MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
1087 spec->ip_version = 0;
1089 pr_info("Unsupported ip_version value\n");
1094 DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
1095 DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
1096 DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
1098 if (spec->cvlan_tag) {
1099 MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
1100 spec->cvlan_tag = 0;
1101 } else if (spec->svlan_tag) {
1102 MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
1103 spec->svlan_tag = 0;
1108 int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb,
1109 struct mlx5dr_match_param *mask,
1110 bool inner, bool rx)
1114 ret = dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
1120 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner);
1121 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1122 sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag;
1127 static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value,
1128 bool inner, u8 *bit_mask)
1130 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1132 DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_127_96, mask, dst_ip_127_96);
1133 DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_95_64, mask, dst_ip_95_64);
1134 DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_63_32, mask, dst_ip_63_32);
1135 DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_31_0, mask, dst_ip_31_0);
1138 static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
1139 struct mlx5dr_ste_build *sb,
1142 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1143 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1144 u8 *tag = hw_ste->tag;
1146 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
1147 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
1148 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
1149 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
1154 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
1155 struct mlx5dr_match_param *mask,
1156 bool inner, bool rx)
1158 dr_ste_build_eth_l3_ipv6_dst_bit_mask(mask, inner, sb->bit_mask);
1162 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, rx, inner);
1163 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1164 sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_dst_tag;
1167 static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *value,
1168 bool inner, u8 *bit_mask)
1170 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1172 DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_127_96, mask, src_ip_127_96);
1173 DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_95_64, mask, src_ip_95_64);
1174 DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_63_32, mask, src_ip_63_32);
1175 DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_31_0, mask, src_ip_31_0);
1178 static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
1179 struct mlx5dr_ste_build *sb,
1182 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1183 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1184 u8 *tag = hw_ste->tag;
1186 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
1187 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
1188 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
1189 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
1194 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
1195 struct mlx5dr_match_param *mask,
1196 bool inner, bool rx)
1198 dr_ste_build_eth_l3_ipv6_src_bit_mask(mask, inner, sb->bit_mask);
1202 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, rx, inner);
1203 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1204 sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_src_tag;
1207 static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param *value,
1211 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1213 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1214 destination_address, mask, dst_ip_31_0);
1215 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1216 source_address, mask, src_ip_31_0);
1217 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1218 destination_port, mask, tcp_dport);
1219 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1220 destination_port, mask, udp_dport);
1221 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1222 source_port, mask, tcp_sport);
1223 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1224 source_port, mask, udp_sport);
1225 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1226 protocol, mask, ip_protocol);
1227 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1228 fragmented, mask, frag);
1229 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1230 dscp, mask, ip_dscp);
1231 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1234 if (mask->tcp_flags) {
1235 DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, bit_mask, mask);
1236 mask->tcp_flags = 0;
1240 static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
1241 struct mlx5dr_ste_build *sb,
1244 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1245 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1246 u8 *tag = hw_ste->tag;
1248 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
1249 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
1250 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
1251 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
1252 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
1253 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
1254 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
1255 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
1256 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
1257 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
1259 if (spec->tcp_flags) {
1260 DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
1261 spec->tcp_flags = 0;
1267 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
1268 struct mlx5dr_match_param *mask,
1269 bool inner, bool rx)
1271 dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(mask, inner, sb->bit_mask);
1275 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, rx, inner);
1276 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1277 sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_5_tuple_tag;
1281 dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
1282 bool inner, u8 *bit_mask)
1284 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1285 struct mlx5dr_match_misc *misc_mask = &value->misc;
1287 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
1288 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
1289 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_priority, mask, first_prio);
1290 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
1291 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
1292 DR_STE_SET_MASK(eth_l2_src, bit_mask, l3_type, mask, ip_version);
1294 if (mask->svlan_tag || mask->cvlan_tag) {
1295 MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
1296 mask->cvlan_tag = 0;
1297 mask->svlan_tag = 0;
1301 if (misc_mask->inner_second_cvlan_tag ||
1302 misc_mask->inner_second_svlan_tag) {
1303 MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
1304 misc_mask->inner_second_cvlan_tag = 0;
1305 misc_mask->inner_second_svlan_tag = 0;
1308 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1309 second_vlan_id, misc_mask, inner_second_vid);
1310 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1311 second_cfi, misc_mask, inner_second_cfi);
1312 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1313 second_priority, misc_mask, inner_second_prio);
1315 if (misc_mask->outer_second_cvlan_tag ||
1316 misc_mask->outer_second_svlan_tag) {
1317 MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
1318 misc_mask->outer_second_cvlan_tag = 0;
1319 misc_mask->outer_second_svlan_tag = 0;
1322 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1323 second_vlan_id, misc_mask, outer_second_vid);
1324 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1325 second_cfi, misc_mask, outer_second_cfi);
1326 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1327 second_priority, misc_mask, outer_second_prio);
1331 static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
1332 bool inner, u8 *hw_ste_p)
1334 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1335 struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
1336 struct mlx5dr_match_misc *misc_spec = &value->misc;
1337 u8 *tag = hw_ste->tag;
1339 DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
1340 DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
1341 DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
1342 DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
1343 DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
1345 if (spec->ip_version) {
1346 if (spec->ip_version == IP_VERSION_IPV4) {
1347 MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
1348 spec->ip_version = 0;
1349 } else if (spec->ip_version == IP_VERSION_IPV6) {
1350 MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
1351 spec->ip_version = 0;
1353 pr_info("Unsupported ip_version value\n");
1358 if (spec->cvlan_tag) {
1359 MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
1360 spec->cvlan_tag = 0;
1361 } else if (spec->svlan_tag) {
1362 MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
1363 spec->svlan_tag = 0;
1367 if (misc_spec->inner_second_cvlan_tag) {
1368 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
1369 misc_spec->inner_second_cvlan_tag = 0;
1370 } else if (misc_spec->inner_second_svlan_tag) {
1371 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
1372 misc_spec->inner_second_svlan_tag = 0;
1375 DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
1376 DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
1377 DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
1379 if (misc_spec->outer_second_cvlan_tag) {
1380 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
1381 misc_spec->outer_second_cvlan_tag = 0;
1382 } else if (misc_spec->outer_second_svlan_tag) {
1383 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
1384 misc_spec->outer_second_svlan_tag = 0;
1386 DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
1387 DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
1388 DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
1394 static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
1395 bool inner, u8 *bit_mask)
1397 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1399 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
1400 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
1402 dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1405 static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
1406 struct mlx5dr_ste_build *sb,
1409 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1410 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1411 u8 *tag = hw_ste->tag;
1413 DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
1414 DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
1416 return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p);
1419 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
1420 struct mlx5dr_match_param *mask,
1421 bool inner, bool rx)
1423 dr_ste_build_eth_l2_src_bit_mask(mask, inner, sb->bit_mask);
1426 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, rx, inner);
1427 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1428 sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_tag;
1431 static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
1432 bool inner, u8 *bit_mask)
1434 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1436 DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
1437 DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
1439 dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1442 static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
1443 struct mlx5dr_ste_build *sb,
1446 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1447 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1448 u8 *tag = hw_ste->tag;
1450 DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
1451 DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
1453 return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p);
1456 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
1457 struct mlx5dr_match_param *mask,
1458 bool inner, bool rx)
1460 dr_ste_build_eth_l2_dst_bit_mask(mask, inner, sb->bit_mask);
1464 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, rx, inner);
1465 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1466 sb->ste_build_tag_func = &dr_ste_build_eth_l2_dst_tag;
1469 static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
1470 bool inner, u8 *bit_mask)
1472 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1473 struct mlx5dr_match_misc *misc = &value->misc;
1475 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
1476 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
1477 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
1478 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
1479 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
1480 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
1481 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
1482 DR_STE_SET_MASK(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
1484 if (misc->vxlan_vni) {
1485 MLX5_SET(ste_eth_l2_tnl, bit_mask,
1486 l2_tunneling_network_id, (misc->vxlan_vni << 8));
1487 misc->vxlan_vni = 0;
1490 if (mask->svlan_tag || mask->cvlan_tag) {
1491 MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
1492 mask->cvlan_tag = 0;
1493 mask->svlan_tag = 0;
1497 static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
1498 struct mlx5dr_ste_build *sb,
1501 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1502 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1503 struct mlx5dr_match_misc *misc = &value->misc;
1504 u8 *tag = hw_ste->tag;
1506 DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
1507 DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
1508 DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
1509 DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
1510 DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
1511 DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
1512 DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
1514 if (misc->vxlan_vni) {
1515 MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
1516 (misc->vxlan_vni << 8));
1517 misc->vxlan_vni = 0;
1520 if (spec->cvlan_tag) {
1521 MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
1522 spec->cvlan_tag = 0;
1523 } else if (spec->svlan_tag) {
1524 MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
1525 spec->svlan_tag = 0;
1528 if (spec->ip_version) {
1529 if (spec->ip_version == IP_VERSION_IPV4) {
1530 MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
1531 spec->ip_version = 0;
1532 } else if (spec->ip_version == IP_VERSION_IPV6) {
1533 MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
1534 spec->ip_version = 0;
1543 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
1544 struct mlx5dr_match_param *mask, bool inner, bool rx)
1546 dr_ste_build_eth_l2_tnl_bit_mask(mask, inner, sb->bit_mask);
1550 sb->lu_type = MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I;
1551 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1552 sb->ste_build_tag_func = &dr_ste_build_eth_l2_tnl_tag;
1555 static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *value,
1556 bool inner, u8 *bit_mask)
1558 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1560 DR_STE_SET_MASK_V(eth_l3_ipv4_misc, bit_mask, time_to_live, mask, ttl_hoplimit);
1563 static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
1564 struct mlx5dr_ste_build *sb,
1567 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1568 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1569 u8 *tag = hw_ste->tag;
1571 DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
1576 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
1577 struct mlx5dr_match_param *mask,
1578 bool inner, bool rx)
1580 dr_ste_build_eth_l3_ipv4_misc_bit_mask(mask, inner, sb->bit_mask);
1584 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, rx, inner);
1585 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1586 sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_misc_tag;
1589 static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value,
1590 bool inner, u8 *bit_mask)
1592 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1594 DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, tcp_dport);
1595 DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, tcp_sport);
1596 DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, udp_dport);
1597 DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, udp_sport);
1598 DR_STE_SET_MASK_V(eth_l4, bit_mask, protocol, mask, ip_protocol);
1599 DR_STE_SET_MASK_V(eth_l4, bit_mask, fragmented, mask, frag);
1600 DR_STE_SET_MASK_V(eth_l4, bit_mask, dscp, mask, ip_dscp);
1601 DR_STE_SET_MASK_V(eth_l4, bit_mask, ecn, mask, ip_ecn);
1602 DR_STE_SET_MASK_V(eth_l4, bit_mask, ipv6_hop_limit, mask, ttl_hoplimit);
1604 if (mask->tcp_flags) {
1605 DR_STE_SET_TCP_FLAGS(eth_l4, bit_mask, mask);
1606 mask->tcp_flags = 0;
1610 static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
1611 struct mlx5dr_ste_build *sb,
1614 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1615 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1616 u8 *tag = hw_ste->tag;
1618 DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
1619 DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
1620 DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
1621 DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
1622 DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
1623 DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
1624 DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
1625 DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
1626 DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
1628 if (spec->tcp_flags) {
1629 DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
1630 spec->tcp_flags = 0;
1636 void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
1637 struct mlx5dr_match_param *mask,
1638 bool inner, bool rx)
1640 dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask);
1644 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, rx, inner);
1645 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1646 sb->ste_build_tag_func = &dr_ste_build_ipv6_l3_l4_tag;
1649 static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
1650 struct mlx5dr_ste_build *sb,
1656 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
1659 sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
1661 sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
1664 static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value,
1665 bool inner, u8 *bit_mask)
1667 struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
1670 DR_STE_SET_MPLS_MASK(mpls, misc2_mask, inner, bit_mask);
1672 DR_STE_SET_MPLS_MASK(mpls, misc2_mask, outer, bit_mask);
1675 static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value,
1676 struct mlx5dr_ste_build *sb,
1679 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1680 struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
1681 u8 *tag = hw_ste->tag;
1684 DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag);
1686 DR_STE_SET_MPLS_TAG(mpls, misc2_mask, outer, tag);
1691 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
1692 struct mlx5dr_match_param *mask,
1693 bool inner, bool rx)
1695 dr_ste_build_mpls_bit_mask(mask, inner, sb->bit_mask);
1699 sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, rx, inner);
1700 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1701 sb->ste_build_tag_func = &dr_ste_build_mpls_tag;
1704 static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value,
1705 bool inner, u8 *bit_mask)
1707 struct mlx5dr_match_misc *misc_mask = &value->misc;
1709 DR_STE_SET_MASK_V(gre, bit_mask, gre_protocol, misc_mask, gre_protocol);
1710 DR_STE_SET_MASK_V(gre, bit_mask, gre_k_present, misc_mask, gre_k_present);
1711 DR_STE_SET_MASK_V(gre, bit_mask, gre_key_h, misc_mask, gre_key_h);
1712 DR_STE_SET_MASK_V(gre, bit_mask, gre_key_l, misc_mask, gre_key_l);
1714 DR_STE_SET_MASK_V(gre, bit_mask, gre_c_present, misc_mask, gre_c_present);
1715 DR_STE_SET_MASK_V(gre, bit_mask, gre_s_present, misc_mask, gre_s_present);
1718 static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
1719 struct mlx5dr_ste_build *sb,
1722 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1723 struct mlx5dr_match_misc *misc = &value->misc;
1724 u8 *tag = hw_ste->tag;
1726 DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
1728 DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
1729 DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
1730 DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
1732 DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
1734 DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
1739 void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
1740 struct mlx5dr_match_param *mask, bool inner, bool rx)
1742 dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask);
1746 sb->lu_type = MLX5DR_STE_LU_TYPE_GRE;
1747 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1748 sb->ste_build_tag_func = &dr_ste_build_gre_tag;
1751 static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value,
1752 bool inner, u8 *bit_mask)
1754 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
1756 if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
1757 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
1758 misc_2_mask, outer_first_mpls_over_gre_label);
1760 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
1761 misc_2_mask, outer_first_mpls_over_gre_exp);
1763 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
1764 misc_2_mask, outer_first_mpls_over_gre_s_bos);
1766 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
1767 misc_2_mask, outer_first_mpls_over_gre_ttl);
1769 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
1770 misc_2_mask, outer_first_mpls_over_udp_label);
1772 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
1773 misc_2_mask, outer_first_mpls_over_udp_exp);
1775 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
1776 misc_2_mask, outer_first_mpls_over_udp_s_bos);
1778 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
1779 misc_2_mask, outer_first_mpls_over_udp_ttl);
1783 static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
1784 struct mlx5dr_ste_build *sb,
1787 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1788 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
1789 u8 *tag = hw_ste->tag;
1791 if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
1792 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
1793 misc_2_mask, outer_first_mpls_over_gre_label);
1795 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
1796 misc_2_mask, outer_first_mpls_over_gre_exp);
1798 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
1799 misc_2_mask, outer_first_mpls_over_gre_s_bos);
1801 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
1802 misc_2_mask, outer_first_mpls_over_gre_ttl);
1804 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
1805 misc_2_mask, outer_first_mpls_over_udp_label);
1807 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
1808 misc_2_mask, outer_first_mpls_over_udp_exp);
1810 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
1811 misc_2_mask, outer_first_mpls_over_udp_s_bos);
1813 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
1814 misc_2_mask, outer_first_mpls_over_udp_ttl);
1819 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
1820 struct mlx5dr_match_param *mask,
1821 bool inner, bool rx)
1823 dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask);
1827 sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_0;
1828 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1829 sb->ste_build_tag_func = &dr_ste_build_flex_parser_0_tag;
1832 #define ICMP_TYPE_OFFSET_FIRST_DW 24
1833 #define ICMP_CODE_OFFSET_FIRST_DW 16
1834 #define ICMP_HEADER_DATA_OFFSET_SECOND_DW 0
1836 static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
1837 struct mlx5dr_cmd_caps *caps,
1840 struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3;
1841 bool is_ipv4_mask = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3_mask);
1842 u32 icmp_header_data_mask;
1849 icmp_header_data_mask = misc_3_mask->icmpv4_header_data;
1850 icmp_type_mask = misc_3_mask->icmpv4_type;
1851 icmp_code_mask = misc_3_mask->icmpv4_code;
1852 dw0_location = caps->flex_parser_id_icmp_dw0;
1853 dw1_location = caps->flex_parser_id_icmp_dw1;
1855 icmp_header_data_mask = misc_3_mask->icmpv6_header_data;
1856 icmp_type_mask = misc_3_mask->icmpv6_type;
1857 icmp_code_mask = misc_3_mask->icmpv6_code;
1858 dw0_location = caps->flex_parser_id_icmpv6_dw0;
1859 dw1_location = caps->flex_parser_id_icmpv6_dw1;
1862 switch (dw0_location) {
1864 if (icmp_type_mask) {
1865 MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
1866 (icmp_type_mask << ICMP_TYPE_OFFSET_FIRST_DW));
1868 misc_3_mask->icmpv4_type = 0;
1870 misc_3_mask->icmpv6_type = 0;
1872 if (icmp_code_mask) {
1873 u32 cur_val = MLX5_GET(ste_flex_parser_1, bit_mask,
1875 MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
1876 cur_val | (icmp_code_mask << ICMP_CODE_OFFSET_FIRST_DW));
1878 misc_3_mask->icmpv4_code = 0;
1880 misc_3_mask->icmpv6_code = 0;
1887 switch (dw1_location) {
1889 if (icmp_header_data_mask) {
1890 MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_5,
1891 (icmp_header_data_mask << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
1893 misc_3_mask->icmpv4_header_data = 0;
1895 misc_3_mask->icmpv6_header_data = 0;
1905 static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
1906 struct mlx5dr_ste_build *sb,
1909 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1910 struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
1911 u8 *tag = hw_ste->tag;
1912 u32 icmp_header_data;
1919 is_ipv4 = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3);
1921 icmp_header_data = misc_3->icmpv4_header_data;
1922 icmp_type = misc_3->icmpv4_type;
1923 icmp_code = misc_3->icmpv4_code;
1924 dw0_location = sb->caps->flex_parser_id_icmp_dw0;
1925 dw1_location = sb->caps->flex_parser_id_icmp_dw1;
1927 icmp_header_data = misc_3->icmpv6_header_data;
1928 icmp_type = misc_3->icmpv6_type;
1929 icmp_code = misc_3->icmpv6_code;
1930 dw0_location = sb->caps->flex_parser_id_icmpv6_dw0;
1931 dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
1934 switch (dw0_location) {
1937 MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
1938 (icmp_type << ICMP_TYPE_OFFSET_FIRST_DW));
1940 misc_3->icmpv4_type = 0;
1942 misc_3->icmpv6_type = 0;
1946 u32 cur_val = MLX5_GET(ste_flex_parser_1, tag,
1948 MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
1949 cur_val | (icmp_code << ICMP_CODE_OFFSET_FIRST_DW));
1951 misc_3->icmpv4_code = 0;
1953 misc_3->icmpv6_code = 0;
1960 switch (dw1_location) {
1962 if (icmp_header_data) {
1963 MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
1964 (icmp_header_data << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
1966 misc_3->icmpv4_header_data = 0;
1968 misc_3->icmpv6_header_data = 0;
1978 int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
1979 struct mlx5dr_match_param *mask,
1980 struct mlx5dr_cmd_caps *caps,
1981 bool inner, bool rx)
1985 ret = dr_ste_build_flex_parser_1_bit_mask(mask, caps, sb->bit_mask);
1992 sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_1;
1993 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1994 sb->ste_build_tag_func = &dr_ste_build_flex_parser_1_tag;
1999 static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *value,
2000 bool inner, u8 *bit_mask)
2002 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
2004 DR_STE_SET_MASK_V(general_purpose, bit_mask,
2005 general_purpose_lookup_field, misc_2_mask,
2009 static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value,
2010 struct mlx5dr_ste_build *sb,
2013 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2014 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
2015 u8 *tag = hw_ste->tag;
2017 DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
2018 misc_2_mask, metadata_reg_a);
2023 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
2024 struct mlx5dr_match_param *mask,
2025 bool inner, bool rx)
2027 dr_ste_build_general_purpose_bit_mask(mask, inner, sb->bit_mask);
2031 sb->lu_type = MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE;
2032 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2033 sb->ste_build_tag_func = &dr_ste_build_general_purpose_tag;
2036 static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value,
2037 bool inner, u8 *bit_mask)
2039 struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
2042 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
2044 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
2047 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
2049 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
2054 static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
2055 struct mlx5dr_ste_build *sb,
2058 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2059 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
2060 u8 *tag = hw_ste->tag;
2063 DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
2064 DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
2066 DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
2067 DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
2073 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
2074 struct mlx5dr_match_param *mask,
2075 bool inner, bool rx)
2077 dr_ste_build_eth_l4_misc_bit_mask(mask, inner, sb->bit_mask);
2081 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, rx, inner);
2082 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2083 sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag;
2087 dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value,
2088 bool inner, u8 *bit_mask)
2090 struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
2092 DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
2093 outer_vxlan_gpe_flags,
2094 misc_3_mask, outer_vxlan_gpe_flags);
2095 DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
2096 outer_vxlan_gpe_next_protocol,
2097 misc_3_mask, outer_vxlan_gpe_next_protocol);
2098 DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
2099 outer_vxlan_gpe_vni,
2100 misc_3_mask, outer_vxlan_gpe_vni);
2104 dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
2105 struct mlx5dr_ste_build *sb,
2108 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2109 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
2110 u8 *tag = hw_ste->tag;
2112 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
2113 outer_vxlan_gpe_flags, misc3,
2114 outer_vxlan_gpe_flags);
2115 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
2116 outer_vxlan_gpe_next_protocol, misc3,
2117 outer_vxlan_gpe_next_protocol);
2118 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
2119 outer_vxlan_gpe_vni, misc3,
2120 outer_vxlan_gpe_vni);
2125 void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
2126 struct mlx5dr_match_param *mask,
2127 bool inner, bool rx)
2129 dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner,
2134 sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
2135 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2136 sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_vxlan_gpe_tag;
2140 dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value,
2143 struct mlx5dr_match_misc *misc_mask = &value->misc;
2145 DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
2146 geneve_protocol_type,
2147 misc_mask, geneve_protocol_type);
2148 DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
2150 misc_mask, geneve_oam);
2151 DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
2153 misc_mask, geneve_opt_len);
2154 DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
2156 misc_mask, geneve_vni);
2160 dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
2161 struct mlx5dr_ste_build *sb,
2164 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2165 struct mlx5dr_match_misc *misc = &value->misc;
2166 u8 *tag = hw_ste->tag;
2168 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
2169 geneve_protocol_type, misc, geneve_protocol_type);
2170 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
2171 geneve_oam, misc, geneve_oam);
2172 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
2173 geneve_opt_len, misc, geneve_opt_len);
2174 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
2175 geneve_vni, misc, geneve_vni);
2180 void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb,
2181 struct mlx5dr_match_param *mask,
2182 bool inner, bool rx)
2184 dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask);
2187 sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
2188 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2189 sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_geneve_tag;
2192 static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
2195 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
2197 DR_STE_SET_MASK_V(register_0, bit_mask, register_0_h,
2198 misc_2_mask, metadata_reg_c_0);
2199 DR_STE_SET_MASK_V(register_0, bit_mask, register_0_l,
2200 misc_2_mask, metadata_reg_c_1);
2201 DR_STE_SET_MASK_V(register_0, bit_mask, register_1_h,
2202 misc_2_mask, metadata_reg_c_2);
2203 DR_STE_SET_MASK_V(register_0, bit_mask, register_1_l,
2204 misc_2_mask, metadata_reg_c_3);
2207 static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value,
2208 struct mlx5dr_ste_build *sb,
2211 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2212 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
2213 u8 *tag = hw_ste->tag;
2215 DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
2216 DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
2217 DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
2218 DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
2223 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
2224 struct mlx5dr_match_param *mask,
2225 bool inner, bool rx)
2227 dr_ste_build_register_0_bit_mask(mask, sb->bit_mask);
2231 sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0;
2232 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2233 sb->ste_build_tag_func = &dr_ste_build_register_0_tag;
2236 static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value,
2239 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
2241 DR_STE_SET_MASK_V(register_1, bit_mask, register_2_h,
2242 misc_2_mask, metadata_reg_c_4);
2243 DR_STE_SET_MASK_V(register_1, bit_mask, register_2_l,
2244 misc_2_mask, metadata_reg_c_5);
2245 DR_STE_SET_MASK_V(register_1, bit_mask, register_3_h,
2246 misc_2_mask, metadata_reg_c_6);
2247 DR_STE_SET_MASK_V(register_1, bit_mask, register_3_l,
2248 misc_2_mask, metadata_reg_c_7);
2251 static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value,
2252 struct mlx5dr_ste_build *sb,
2255 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2256 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
2257 u8 *tag = hw_ste->tag;
2259 DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
2260 DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
2261 DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
2262 DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
2267 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
2268 struct mlx5dr_match_param *mask,
2269 bool inner, bool rx)
2271 dr_ste_build_register_1_bit_mask(mask, sb->bit_mask);
2275 sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1;
2276 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2277 sb->ste_build_tag_func = &dr_ste_build_register_1_tag;
2280 static int dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
2283 struct mlx5dr_match_misc *misc_mask = &value->misc;
2285 /* Partial misc source_port is not supported */
2286 if (misc_mask->source_port && misc_mask->source_port != 0xffff)
2289 /* Partial misc source_eswitch_owner_vhca_id is not supported */
2290 if (misc_mask->source_eswitch_owner_vhca_id &&
2291 misc_mask->source_eswitch_owner_vhca_id != 0xffff)
2294 DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
2295 DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
2296 misc_mask->source_eswitch_owner_vhca_id = 0;
2301 static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
2302 struct mlx5dr_ste_build *sb,
2305 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2306 struct mlx5dr_match_misc *misc = &value->misc;
2307 struct mlx5dr_cmd_vport_cap *vport_cap;
2308 struct mlx5dr_domain *dmn = sb->dmn;
2309 struct mlx5dr_cmd_caps *caps;
2310 u8 *tag = hw_ste->tag;
2312 DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
2314 if (sb->vhca_id_valid) {
2315 /* Find port GVMI based on the eswitch_owner_vhca_id */
2316 if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
2317 caps = &dmn->info.caps;
2318 else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
2319 dmn->peer_dmn->info.caps.gvmi))
2320 caps = &dmn->peer_dmn->info.caps;
2324 caps = &dmn->info.caps;
2327 vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
2331 if (vport_cap->vport_gvmi)
2332 MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
2334 misc->source_eswitch_owner_vhca_id = 0;
2335 misc->source_port = 0;
2340 int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
2341 struct mlx5dr_match_param *mask,
2342 struct mlx5dr_domain *dmn,
2343 bool inner, bool rx)
2347 /* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
2348 sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
2350 ret = dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
2357 sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
2358 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2359 sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag;