1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
4 #include <linux/types.h>
7 #define DR_STE_CRC_POLY 0xEDB88320L
13 #define IP_VERSION_IPV4 0x4
14 #define IP_VERSION_IPV6 0x6
18 #define DR_STE_ENABLE_FLOW_TAG BIT(31)
20 /* Set to STE a specific value using DR_STE_SET */
21 #define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
22 if ((spec)->s_fname) { \
23 MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
24 (spec)->s_fname = 0; \
28 /* Set to STE spec->s_fname to tag->t_fname */
29 #define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
30 DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
32 /* Set to STE -1 to bit_mask->bm_fname and set spec->s_fname as used */
33 #define DR_STE_SET_MASK(lookup_type, bit_mask, bm_fname, spec, s_fname) \
34 DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, -1)
36 /* Set to STE spec->s_fname to bit_mask->bm_fname and set spec->s_fname as used */
37 #define DR_STE_SET_MASK_V(lookup_type, bit_mask, bm_fname, spec, s_fname) \
38 DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, (spec)->s_fname)
40 #define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
41 MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
42 MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
43 MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
44 MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
45 MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
46 MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
47 MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
48 MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
49 MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
52 #define DR_STE_SET_MPLS_MASK(lookup_type, mask, in_out, bit_mask) do { \
53 DR_STE_SET_MASK_V(lookup_type, mask, mpls0_label, mask, \
54 in_out##_first_mpls_label);\
55 DR_STE_SET_MASK_V(lookup_type, mask, mpls0_s_bos, mask, \
56 in_out##_first_mpls_s_bos); \
57 DR_STE_SET_MASK_V(lookup_type, mask, mpls0_exp, mask, \
58 in_out##_first_mpls_exp); \
59 DR_STE_SET_MASK_V(lookup_type, mask, mpls0_ttl, mask, \
60 in_out##_first_mpls_ttl); \
63 #define DR_STE_SET_MPLS_TAG(lookup_type, mask, in_out, tag) do { \
64 DR_STE_SET_TAG(lookup_type, tag, mpls0_label, mask, \
65 in_out##_first_mpls_label);\
66 DR_STE_SET_TAG(lookup_type, tag, mpls0_s_bos, mask, \
67 in_out##_first_mpls_s_bos); \
68 DR_STE_SET_TAG(lookup_type, tag, mpls0_exp, mask, \
69 in_out##_first_mpls_exp); \
70 DR_STE_SET_TAG(lookup_type, tag, mpls0_ttl, mask, \
71 in_out##_first_mpls_ttl); \
74 #define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
75 (_misc)->outer_first_mpls_over_gre_label || \
76 (_misc)->outer_first_mpls_over_gre_exp || \
77 (_misc)->outer_first_mpls_over_gre_s_bos || \
78 (_misc)->outer_first_mpls_over_gre_ttl)
79 #define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
80 (_misc)->outer_first_mpls_over_udp_label || \
81 (_misc)->outer_first_mpls_over_udp_exp || \
82 (_misc)->outer_first_mpls_over_udp_s_bos || \
83 (_misc)->outer_first_mpls_over_udp_ttl)
85 #define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
86 ((inner) ? MLX5DR_STE_LU_TYPE_##lookup_type##_I : \
87 (rx) ? MLX5DR_STE_LU_TYPE_##lookup_type##_D : \
88 MLX5DR_STE_LU_TYPE_##lookup_type##_O)
90 enum dr_ste_tunl_action {
91 DR_STE_TUNL_ACTION_NONE = 0,
92 DR_STE_TUNL_ACTION_ENABLE = 1,
93 DR_STE_TUNL_ACTION_DECAP = 2,
94 DR_STE_TUNL_ACTION_L3_DECAP = 3,
95 DR_STE_TUNL_ACTION_POP_VLAN = 4,
98 enum dr_ste_action_type {
99 DR_STE_ACTION_TYPE_PUSH_VLAN = 1,
100 DR_STE_ACTION_TYPE_ENCAP_L3 = 3,
101 DR_STE_ACTION_TYPE_ENCAP = 4,
104 struct dr_hw_ste_format {
105 u8 ctrl[DR_STE_SIZE_CTRL];
106 u8 tag[DR_STE_SIZE_TAG];
107 u8 mask[DR_STE_SIZE_MASK];
110 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
112 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
113 u8 masked[DR_STE_SIZE_TAG] = {};
118 /* Don't calculate CRC if the result is predicted */
119 if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
122 /* Mask tag using byte mask, bit per byte */
123 bit = 1 << (DR_STE_SIZE_TAG - 1);
124 for (i = 0; i < DR_STE_SIZE_TAG; i++) {
125 if (htbl->byte_mask & bit)
126 masked[i] = hw_ste->tag[i];
131 crc32 = mlx5dr_crc32_slice8_calc(masked, DR_STE_SIZE_TAG);
132 index = crc32 & (htbl->chunk->num_of_entries - 1);
137 static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
142 for (i = 0; i < DR_STE_SIZE_MASK; i++) {
143 byte_mask = byte_mask << 1;
144 if (bit_mask[i] == 0xff)
150 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
152 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
154 memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
157 void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
159 MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
160 DR_STE_ENABLE_FLOW_TAG | flow_tag);
163 void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
165 /* This can be used for both rx_steering_mult and for sx_transmit */
166 MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
167 MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
170 void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p)
172 MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
175 void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
178 MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
179 DR_STE_ACTION_TYPE_PUSH_VLAN);
180 MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
181 /* Due to HW limitation we need to set this bit, otherwise reforamt +
182 * push vlan will not work.
185 mlx5dr_ste_set_go_back_bit(hw_ste_p);
188 void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, int size, bool encap_l3)
190 MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
191 encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
192 /* The hardware expects here size in words (2 byte) */
193 MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
194 MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
197 void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p)
199 MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
200 DR_STE_TUNL_ACTION_DECAP);
203 void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p)
205 MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
206 DR_STE_TUNL_ACTION_POP_VLAN);
209 void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
211 MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
212 DR_STE_TUNL_ACTION_L3_DECAP);
213 MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
216 void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type)
218 MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
221 u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p)
223 return MLX5_GET(ste_general, hw_ste_p, entry_type);
226 void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
229 MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
231 MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
235 void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
237 MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
240 void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type,
243 MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
244 MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
245 MLX5_SET(ste_general, hw_ste_p, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
247 /* Set GVMI once, this is the same for RX/TX
248 * bits 63_48 of next table base / miss address encode the next GVMI
250 MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
251 MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
252 MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
255 static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
257 memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
258 memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
261 static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
263 hw_ste->tag[0] = 0xdc;
267 u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste)
270 (MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_31_6) |
271 MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_39_32) << 26);
276 void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size)
278 u64 index = (icm_addr >> 5) | ht_size;
280 MLX5_SET(ste_general, hw_ste, next_table_base_39_32_size, index >> 27);
281 MLX5_SET(ste_general, hw_ste, next_table_base_31_5_size, index);
284 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
286 u32 index = ste - ste->htbl->ste_arr;
288 return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
291 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
293 u32 index = ste - ste->htbl->ste_arr;
295 return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
298 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
300 u32 index = ste - ste->htbl->ste_arr;
302 return &ste->htbl->miss_list[index];
305 static void dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
306 struct mlx5dr_ste_htbl *next_htbl)
308 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
309 u8 *hw_ste = ste->hw_ste;
311 MLX5_SET(ste_general, hw_ste, byte_mask, next_htbl->byte_mask);
312 MLX5_SET(ste_general, hw_ste, next_lu_type, next_htbl->lu_type);
313 mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
315 dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
318 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
321 return ste_location == nic_matcher->num_of_builders;
324 /* Replace relevant fields, except of:
325 * htbl - keep the origin htbl
326 * miss_list + list - already took the src from the list.
327 * icm_addr/mr_addr - depends on the hosting table.
330 * | a | -> | b | -> | c | ->
334 * While the data that was in b copied to a.
336 static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
338 memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
339 dst->next_htbl = src->next_htbl;
341 dst->next_htbl->pointing_ste = dst;
343 refcount_set(&dst->refcount, refcount_read(&src->refcount));
345 INIT_LIST_HEAD(&dst->rule_list);
346 list_splice_tail_init(&src->rule_list, &dst->rule_list);
349 /* Free ste which is the head and the only one in miss_list */
351 dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
352 struct mlx5dr_matcher_rx_tx *nic_matcher,
353 struct mlx5dr_ste_send_info *ste_info_head,
354 struct list_head *send_ste_list,
355 struct mlx5dr_ste_htbl *stats_tbl)
357 u8 tmp_data_ste[DR_STE_SIZE] = {};
358 struct mlx5dr_ste tmp_ste = {};
361 tmp_ste.hw_ste = tmp_data_ste;
363 /* Use temp ste because dr_ste_always_miss_addr
364 * touches bit_mask area which doesn't exist at ste->hw_ste.
366 memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
367 miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
368 mlx5dr_ste_always_miss_addr(&tmp_ste, miss_addr);
369 memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
371 list_del_init(&ste->miss_list_node);
373 /* Write full STE size in order to have "always_miss" */
374 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
378 true /* Copy data */);
380 stats_tbl->ctrl.num_of_valid_entries--;
383 /* Free ste which is the head but NOT the only one in miss_list:
384 * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
387 dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
388 struct mlx5dr_ste_send_info *ste_info_head,
389 struct list_head *send_ste_list,
390 struct mlx5dr_ste_htbl *stats_tbl)
393 struct mlx5dr_ste_htbl *next_miss_htbl;
395 next_miss_htbl = next_ste->htbl;
397 /* Remove from the miss_list the next_ste before copy */
398 list_del_init(&next_ste->miss_list_node);
400 /* All rule-members that use next_ste should know about that */
401 mlx5dr_rule_update_rule_member(next_ste, ste);
403 /* Move data from next into ste */
404 dr_ste_replace(ste, next_ste);
406 /* Del the htbl that contains the next_ste.
407 * The origin htbl stay with the same number of entries.
409 mlx5dr_htbl_put(next_miss_htbl);
411 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE_REDUCED,
415 true /* Copy data */);
417 stats_tbl->ctrl.num_of_collisions--;
418 stats_tbl->ctrl.num_of_valid_entries--;
421 /* Free ste that is located in the middle of the miss list:
422 * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
424 static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
425 struct mlx5dr_ste_send_info *ste_info,
426 struct list_head *send_ste_list,
427 struct mlx5dr_ste_htbl *stats_tbl)
429 struct mlx5dr_ste *prev_ste;
432 prev_ste = list_prev_entry(ste, miss_list_node);
433 if (WARN_ON(!prev_ste))
436 miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste);
437 mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr);
439 mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_REDUCED, 0,
440 prev_ste->hw_ste, ste_info,
441 send_ste_list, true /* Copy data*/);
443 list_del_init(&ste->miss_list_node);
445 stats_tbl->ctrl.num_of_valid_entries--;
446 stats_tbl->ctrl.num_of_collisions--;
449 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
450 struct mlx5dr_matcher *matcher,
451 struct mlx5dr_matcher_rx_tx *nic_matcher)
453 struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
454 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
455 struct mlx5dr_ste_send_info ste_info_head;
456 struct mlx5dr_ste *next_ste, *first_ste;
457 bool put_on_origin_table = true;
458 struct mlx5dr_ste_htbl *stats_tbl;
459 LIST_HEAD(send_ste_list);
461 first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
462 struct mlx5dr_ste, miss_list_node);
463 stats_tbl = first_ste->htbl;
467 * a. head ste is the only ste in the miss list
468 * b. head ste is not the only ste in the miss-list
471 if (first_ste == ste) { /* Ste is the head */
472 struct mlx5dr_ste *last_ste;
474 last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
475 struct mlx5dr_ste, miss_list_node);
476 if (last_ste == first_ste)
479 next_ste = list_next_entry(ste, miss_list_node);
482 /* One and only entry in the list */
483 dr_ste_remove_head_ste(ste, nic_matcher,
488 /* First but not only entry in the list */
489 dr_ste_replace_head_ste(ste, next_ste, &ste_info_head,
490 &send_ste_list, stats_tbl);
491 put_on_origin_table = false;
493 } else { /* Ste in the middle of the list */
494 dr_ste_remove_middle_ste(ste, &ste_info_head, &send_ste_list, stats_tbl);
498 list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
499 &send_ste_list, send_list) {
500 list_del(&cur_ste_info->send_list);
501 mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
502 cur_ste_info->data, cur_ste_info->size,
503 cur_ste_info->offset);
506 if (put_on_origin_table)
507 mlx5dr_htbl_put(ste->htbl);
510 bool mlx5dr_ste_equal_tag(void *src, void *dst)
512 struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
513 struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
515 return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
518 void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
519 struct mlx5dr_ste_htbl *next_htbl)
521 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
523 mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
526 void mlx5dr_ste_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
528 u64 index = miss_addr >> 6;
530 /* Miss address for TX and RX STEs located in the same offsets */
531 MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
532 MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
535 void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr)
537 u8 *hw_ste = ste->hw_ste;
539 MLX5_SET(ste_rx_steering_mult, hw_ste, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
540 mlx5dr_ste_set_miss_addr(hw_ste, miss_addr);
541 dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
544 /* The assumption here is that we don't update the ste->hw_ste if it is not
545 * used ste, so it will be all zero, checking the next_lu_type.
547 bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste)
549 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)p_hw_ste;
551 if (MLX5_GET(ste_general, hw_ste, next_lu_type) ==
552 MLX5DR_STE_LU_TYPE_NOP)
558 bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
560 return !refcount_read(&ste->refcount);
563 static u16 get_bits_per_mask(u16 byte_mask)
568 byte_mask = byte_mask & (byte_mask - 1);
575 /* Init one ste as a pattern for ste data array */
576 void mlx5dr_ste_set_formatted_ste(u16 gvmi,
577 struct mlx5dr_domain_rx_tx *nic_dmn,
578 struct mlx5dr_ste_htbl *htbl,
580 struct mlx5dr_htbl_connect_info *connect_info)
582 struct mlx5dr_ste ste = {};
584 mlx5dr_ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
585 ste.hw_ste = formatted_ste;
587 if (connect_info->type == CONNECT_HIT)
588 dr_ste_always_hit_htbl(&ste, connect_info->hit_next_htbl);
590 mlx5dr_ste_always_miss_addr(&ste, connect_info->miss_icm_addr);
593 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
594 struct mlx5dr_domain_rx_tx *nic_dmn,
595 struct mlx5dr_ste_htbl *htbl,
596 struct mlx5dr_htbl_connect_info *connect_info,
599 u8 formatted_ste[DR_STE_SIZE] = {};
601 mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
607 return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
610 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
611 struct mlx5dr_matcher_rx_tx *nic_matcher,
612 struct mlx5dr_ste *ste,
614 enum mlx5dr_icm_chunk_size log_table_size)
616 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)cur_hw_ste;
617 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
618 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
619 struct mlx5dr_htbl_connect_info info;
620 struct mlx5dr_ste_htbl *next_htbl;
622 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
627 next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
628 byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
630 /* Don't allocate table more than required,
631 * the size of the table defined via the byte_mask, so no need
632 * to allocate more than that.
634 bits_in_mask = get_bits_per_mask(byte_mask) * BITS_PER_BYTE;
635 log_table_size = min(log_table_size, bits_in_mask);
637 next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
642 mlx5dr_dbg(dmn, "Failed allocating table\n");
646 /* Write new table to HW */
647 info.type = CONNECT_MISS;
648 info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
649 if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
651 mlx5dr_info(dmn, "Failed writing table to HW\n");
655 mlx5dr_ste_set_hit_addr_by_next_htbl(cur_hw_ste, next_htbl);
656 ste->next_htbl = next_htbl;
657 next_htbl->pointing_ste = ste;
663 mlx5dr_ste_htbl_free(next_htbl);
667 static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
669 struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
672 htbl->ctrl.may_grow = true;
674 if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1)
675 htbl->ctrl.may_grow = false;
677 /* Threshold is 50%, one is added to table of size 1 */
678 num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
679 ctrl->increase_threshold = (num_of_entries + 1) / 2;
682 struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
683 enum mlx5dr_icm_chunk_size chunk_size,
684 u8 lu_type, u16 byte_mask)
686 struct mlx5dr_icm_chunk *chunk;
687 struct mlx5dr_ste_htbl *htbl;
690 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
694 chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
699 htbl->lu_type = lu_type;
700 htbl->byte_mask = byte_mask;
701 htbl->ste_arr = chunk->ste_arr;
702 htbl->hw_ste_arr = chunk->hw_ste_arr;
703 htbl->miss_list = chunk->miss_list;
704 refcount_set(&htbl->refcount, 0);
706 for (i = 0; i < chunk->num_of_entries; i++) {
707 struct mlx5dr_ste *ste = &htbl->ste_arr[i];
709 ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
711 refcount_set(&ste->refcount, 0);
712 INIT_LIST_HEAD(&ste->miss_list_node);
713 INIT_LIST_HEAD(&htbl->miss_list[i]);
714 INIT_LIST_HEAD(&ste->rule_list);
717 htbl->chunk_size = chunk_size;
718 dr_ste_set_ctrl(htbl);
726 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
728 if (refcount_read(&htbl->refcount))
731 mlx5dr_icm_free_chunk(htbl->chunk);
736 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
738 struct mlx5dr_match_param *mask,
739 struct mlx5dr_match_param *value)
741 if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
742 if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
743 mlx5dr_dbg(dmn, "Partial mask source_port is not supported\n");
751 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
752 struct mlx5dr_matcher_rx_tx *nic_matcher,
753 struct mlx5dr_match_param *value,
756 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
757 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
758 struct mlx5dr_ste_build *sb;
761 ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
762 &matcher->mask, value);
766 sb = nic_matcher->ste_builder;
767 for (i = 0; i < nic_matcher->num_of_builders; i++) {
768 mlx5dr_ste_init(ste_arr,
771 dmn->info.caps.gvmi);
773 mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
775 ret = sb->ste_build_tag_func(value, sb, ste_arr);
779 /* Connect the STEs */
780 if (i < (nic_matcher->num_of_builders - 1)) {
781 /* Need the next builder for these fields,
782 * not relevant for the last ste in the chain.
785 MLX5_SET(ste_general, ste_arr, next_lu_type, sb->lu_type);
786 MLX5_SET(ste_general, ste_arr, byte_mask, sb->byte_mask);
788 ste_arr += DR_STE_SIZE;
793 static int dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
794 bool inner, u8 *bit_mask)
796 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
798 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
799 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
801 if (mask->smac_47_16 || mask->smac_15_0) {
802 MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
803 mask->smac_47_16 >> 16);
804 MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
805 mask->smac_47_16 << 16 | mask->smac_15_0);
806 mask->smac_47_16 = 0;
810 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
811 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
812 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
813 DR_STE_SET_MASK(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
815 if (mask->cvlan_tag) {
816 MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
818 } else if (mask->svlan_tag) {
819 MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
823 if (mask->cvlan_tag || mask->svlan_tag) {
824 pr_info("Invalid c/svlan mask configuration\n");
831 static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
833 spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
834 spec->gre_k_present = MLX5_GET(fte_match_set_misc, mask, gre_k_present);
835 spec->gre_s_present = MLX5_GET(fte_match_set_misc, mask, gre_s_present);
836 spec->source_vhca_port = MLX5_GET(fte_match_set_misc, mask, source_vhca_port);
837 spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn);
839 spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port);
840 spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask,
841 source_eswitch_owner_vhca_id);
843 spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio);
844 spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi);
845 spec->outer_second_vid = MLX5_GET(fte_match_set_misc, mask, outer_second_vid);
846 spec->inner_second_prio = MLX5_GET(fte_match_set_misc, mask, inner_second_prio);
847 spec->inner_second_cfi = MLX5_GET(fte_match_set_misc, mask, inner_second_cfi);
848 spec->inner_second_vid = MLX5_GET(fte_match_set_misc, mask, inner_second_vid);
850 spec->outer_second_cvlan_tag =
851 MLX5_GET(fte_match_set_misc, mask, outer_second_cvlan_tag);
852 spec->inner_second_cvlan_tag =
853 MLX5_GET(fte_match_set_misc, mask, inner_second_cvlan_tag);
854 spec->outer_second_svlan_tag =
855 MLX5_GET(fte_match_set_misc, mask, outer_second_svlan_tag);
856 spec->inner_second_svlan_tag =
857 MLX5_GET(fte_match_set_misc, mask, inner_second_svlan_tag);
859 spec->gre_protocol = MLX5_GET(fte_match_set_misc, mask, gre_protocol);
861 spec->gre_key_h = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi);
862 spec->gre_key_l = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo);
864 spec->vxlan_vni = MLX5_GET(fte_match_set_misc, mask, vxlan_vni);
866 spec->geneve_vni = MLX5_GET(fte_match_set_misc, mask, geneve_vni);
867 spec->geneve_oam = MLX5_GET(fte_match_set_misc, mask, geneve_oam);
869 spec->outer_ipv6_flow_label =
870 MLX5_GET(fte_match_set_misc, mask, outer_ipv6_flow_label);
872 spec->inner_ipv6_flow_label =
873 MLX5_GET(fte_match_set_misc, mask, inner_ipv6_flow_label);
875 spec->geneve_opt_len = MLX5_GET(fte_match_set_misc, mask, geneve_opt_len);
876 spec->geneve_protocol_type =
877 MLX5_GET(fte_match_set_misc, mask, geneve_protocol_type);
879 spec->bth_dst_qp = MLX5_GET(fte_match_set_misc, mask, bth_dst_qp);
882 static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec)
886 spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16);
888 spec->smac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0);
889 spec->ethertype = MLX5_GET(fte_match_set_lyr_2_4, mask, ethertype);
891 spec->dmac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16);
893 spec->dmac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0);
894 spec->first_prio = MLX5_GET(fte_match_set_lyr_2_4, mask, first_prio);
895 spec->first_cfi = MLX5_GET(fte_match_set_lyr_2_4, mask, first_cfi);
896 spec->first_vid = MLX5_GET(fte_match_set_lyr_2_4, mask, first_vid);
898 spec->ip_protocol = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_protocol);
899 spec->ip_dscp = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_dscp);
900 spec->ip_ecn = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_ecn);
901 spec->cvlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, cvlan_tag);
902 spec->svlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, svlan_tag);
903 spec->frag = MLX5_GET(fte_match_set_lyr_2_4, mask, frag);
904 spec->ip_version = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_version);
905 spec->tcp_flags = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_flags);
906 spec->tcp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_sport);
907 spec->tcp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_dport);
909 spec->ttl_hoplimit = MLX5_GET(fte_match_set_lyr_2_4, mask, ttl_hoplimit);
911 spec->udp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_sport);
912 spec->udp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_dport);
914 memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
915 src_ipv4_src_ipv6.ipv6_layout.ipv6),
918 spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
919 spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
920 spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
921 spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
923 memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
924 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
927 spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
928 spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
929 spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
930 spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
933 static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec)
935 spec->outer_first_mpls_label =
936 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_label);
937 spec->outer_first_mpls_exp =
938 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp);
939 spec->outer_first_mpls_s_bos =
940 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos);
941 spec->outer_first_mpls_ttl =
942 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl);
943 spec->inner_first_mpls_label =
944 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_label);
945 spec->inner_first_mpls_exp =
946 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp);
947 spec->inner_first_mpls_s_bos =
948 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos);
949 spec->inner_first_mpls_ttl =
950 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl);
951 spec->outer_first_mpls_over_gre_label =
952 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label);
953 spec->outer_first_mpls_over_gre_exp =
954 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp);
955 spec->outer_first_mpls_over_gre_s_bos =
956 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos);
957 spec->outer_first_mpls_over_gre_ttl =
958 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl);
959 spec->outer_first_mpls_over_udp_label =
960 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label);
961 spec->outer_first_mpls_over_udp_exp =
962 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp);
963 spec->outer_first_mpls_over_udp_s_bos =
964 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos);
965 spec->outer_first_mpls_over_udp_ttl =
966 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl);
967 spec->metadata_reg_c_7 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_7);
968 spec->metadata_reg_c_6 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_6);
969 spec->metadata_reg_c_5 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_5);
970 spec->metadata_reg_c_4 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_4);
971 spec->metadata_reg_c_3 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_3);
972 spec->metadata_reg_c_2 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_2);
973 spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1);
974 spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0);
975 spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a);
976 spec->metadata_reg_b = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_b);
979 static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
981 spec->inner_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_seq_num);
982 spec->outer_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_seq_num);
983 spec->inner_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_ack_num);
984 spec->outer_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_ack_num);
985 spec->outer_vxlan_gpe_vni =
986 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_vni);
987 spec->outer_vxlan_gpe_next_protocol =
988 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol);
989 spec->outer_vxlan_gpe_flags =
990 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_flags);
991 spec->icmpv4_header_data = MLX5_GET(fte_match_set_misc3, mask, icmp_header_data);
992 spec->icmpv6_header_data =
993 MLX5_GET(fte_match_set_misc3, mask, icmpv6_header_data);
994 spec->icmpv4_type = MLX5_GET(fte_match_set_misc3, mask, icmp_type);
995 spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code);
996 spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type);
997 spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code);
1000 void mlx5dr_ste_copy_param(u8 match_criteria,
1001 struct mlx5dr_match_param *set_param,
1002 struct mlx5dr_match_parameters *mask)
1004 u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
1005 u8 *data = (u8 *)mask->match_buf;
1006 size_t param_location;
1009 if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
1010 if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
1011 memcpy(tail_param, data, mask->match_sz);
1014 buff = mask->match_buf;
1016 dr_ste_copy_mask_spec(buff, &set_param->outer);
1018 param_location = sizeof(struct mlx5dr_match_spec);
1020 if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
1021 if (mask->match_sz < param_location +
1022 sizeof(struct mlx5dr_match_misc)) {
1023 memcpy(tail_param, data + param_location,
1024 mask->match_sz - param_location);
1027 buff = data + param_location;
1029 dr_ste_copy_mask_misc(buff, &set_param->misc);
1031 param_location += sizeof(struct mlx5dr_match_misc);
1033 if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
1034 if (mask->match_sz < param_location +
1035 sizeof(struct mlx5dr_match_spec)) {
1036 memcpy(tail_param, data + param_location,
1037 mask->match_sz - param_location);
1040 buff = data + param_location;
1042 dr_ste_copy_mask_spec(buff, &set_param->inner);
1044 param_location += sizeof(struct mlx5dr_match_spec);
1046 if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
1047 if (mask->match_sz < param_location +
1048 sizeof(struct mlx5dr_match_misc2)) {
1049 memcpy(tail_param, data + param_location,
1050 mask->match_sz - param_location);
1053 buff = data + param_location;
1055 dr_ste_copy_mask_misc2(buff, &set_param->misc2);
1058 param_location += sizeof(struct mlx5dr_match_misc2);
1060 if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
1061 if (mask->match_sz < param_location +
1062 sizeof(struct mlx5dr_match_misc3)) {
1063 memcpy(tail_param, data + param_location,
1064 mask->match_sz - param_location);
1067 buff = data + param_location;
1069 dr_ste_copy_mask_misc3(buff, &set_param->misc3);
1073 static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
1074 struct mlx5dr_ste_build *sb,
1077 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1078 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1079 u8 *tag = hw_ste->tag;
1081 DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
1082 DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
1084 if (spec->smac_47_16 || spec->smac_15_0) {
1085 MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
1086 spec->smac_47_16 >> 16);
1087 MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
1088 spec->smac_47_16 << 16 | spec->smac_15_0);
1089 spec->smac_47_16 = 0;
1090 spec->smac_15_0 = 0;
1093 if (spec->ip_version) {
1094 if (spec->ip_version == IP_VERSION_IPV4) {
1095 MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
1096 spec->ip_version = 0;
1097 } else if (spec->ip_version == IP_VERSION_IPV6) {
1098 MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
1099 spec->ip_version = 0;
1101 pr_info("Unsupported ip_version value\n");
1106 DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
1107 DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
1108 DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
1110 if (spec->cvlan_tag) {
1111 MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
1112 spec->cvlan_tag = 0;
1113 } else if (spec->svlan_tag) {
1114 MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
1115 spec->svlan_tag = 0;
1120 int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb,
1121 struct mlx5dr_match_param *mask,
1122 bool inner, bool rx)
1126 ret = dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
1132 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner);
1133 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1134 sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag;
1139 static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value,
1140 bool inner, u8 *bit_mask)
1142 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1144 DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_127_96, mask, dst_ip_127_96);
1145 DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_95_64, mask, dst_ip_95_64);
1146 DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_63_32, mask, dst_ip_63_32);
1147 DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_31_0, mask, dst_ip_31_0);
1150 static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
1151 struct mlx5dr_ste_build *sb,
1154 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1155 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1156 u8 *tag = hw_ste->tag;
1158 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
1159 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
1160 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
1161 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
1166 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
1167 struct mlx5dr_match_param *mask,
1168 bool inner, bool rx)
1170 dr_ste_build_eth_l3_ipv6_dst_bit_mask(mask, inner, sb->bit_mask);
1174 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, rx, inner);
1175 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1176 sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_dst_tag;
1179 static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *value,
1180 bool inner, u8 *bit_mask)
1182 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1184 DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_127_96, mask, src_ip_127_96);
1185 DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_95_64, mask, src_ip_95_64);
1186 DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_63_32, mask, src_ip_63_32);
1187 DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_31_0, mask, src_ip_31_0);
1190 static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
1191 struct mlx5dr_ste_build *sb,
1194 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1195 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1196 u8 *tag = hw_ste->tag;
1198 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
1199 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
1200 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
1201 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
1206 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
1207 struct mlx5dr_match_param *mask,
1208 bool inner, bool rx)
1210 dr_ste_build_eth_l3_ipv6_src_bit_mask(mask, inner, sb->bit_mask);
1214 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, rx, inner);
1215 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1216 sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_src_tag;
1219 static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param *value,
1223 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1225 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1226 destination_address, mask, dst_ip_31_0);
1227 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1228 source_address, mask, src_ip_31_0);
1229 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1230 destination_port, mask, tcp_dport);
1231 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1232 destination_port, mask, udp_dport);
1233 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1234 source_port, mask, tcp_sport);
1235 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1236 source_port, mask, udp_sport);
1237 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1238 protocol, mask, ip_protocol);
1239 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1240 fragmented, mask, frag);
1241 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1242 dscp, mask, ip_dscp);
1243 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1246 if (mask->tcp_flags) {
1247 DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, bit_mask, mask);
1248 mask->tcp_flags = 0;
1252 static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
1253 struct mlx5dr_ste_build *sb,
1256 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1257 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1258 u8 *tag = hw_ste->tag;
1260 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
1261 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
1262 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
1263 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
1264 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
1265 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
1266 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
1267 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
1268 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
1269 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
1271 if (spec->tcp_flags) {
1272 DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
1273 spec->tcp_flags = 0;
1279 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
1280 struct mlx5dr_match_param *mask,
1281 bool inner, bool rx)
1283 dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(mask, inner, sb->bit_mask);
1287 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, rx, inner);
1288 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1289 sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_5_tuple_tag;
1293 dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
1294 bool inner, u8 *bit_mask)
1296 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1297 struct mlx5dr_match_misc *misc_mask = &value->misc;
1299 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
1300 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
1301 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_priority, mask, first_prio);
1302 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
1303 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
1304 DR_STE_SET_MASK(eth_l2_src, bit_mask, l3_type, mask, ip_version);
1306 if (mask->svlan_tag || mask->cvlan_tag) {
1307 MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
1308 mask->cvlan_tag = 0;
1309 mask->svlan_tag = 0;
1313 if (misc_mask->inner_second_cvlan_tag ||
1314 misc_mask->inner_second_svlan_tag) {
1315 MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
1316 misc_mask->inner_second_cvlan_tag = 0;
1317 misc_mask->inner_second_svlan_tag = 0;
1320 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1321 second_vlan_id, misc_mask, inner_second_vid);
1322 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1323 second_cfi, misc_mask, inner_second_cfi);
1324 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1325 second_priority, misc_mask, inner_second_prio);
1327 if (misc_mask->outer_second_cvlan_tag ||
1328 misc_mask->outer_second_svlan_tag) {
1329 MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
1330 misc_mask->outer_second_cvlan_tag = 0;
1331 misc_mask->outer_second_svlan_tag = 0;
1334 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1335 second_vlan_id, misc_mask, outer_second_vid);
1336 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1337 second_cfi, misc_mask, outer_second_cfi);
1338 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1339 second_priority, misc_mask, outer_second_prio);
1343 static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
1344 bool inner, u8 *hw_ste_p)
1346 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1347 struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
1348 struct mlx5dr_match_misc *misc_spec = &value->misc;
1349 u8 *tag = hw_ste->tag;
1351 DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
1352 DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
1353 DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
1354 DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
1355 DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
1357 if (spec->ip_version) {
1358 if (spec->ip_version == IP_VERSION_IPV4) {
1359 MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
1360 spec->ip_version = 0;
1361 } else if (spec->ip_version == IP_VERSION_IPV6) {
1362 MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
1363 spec->ip_version = 0;
1365 pr_info("Unsupported ip_version value\n");
1370 if (spec->cvlan_tag) {
1371 MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
1372 spec->cvlan_tag = 0;
1373 } else if (spec->svlan_tag) {
1374 MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
1375 spec->svlan_tag = 0;
1379 if (misc_spec->inner_second_cvlan_tag) {
1380 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
1381 misc_spec->inner_second_cvlan_tag = 0;
1382 } else if (misc_spec->inner_second_svlan_tag) {
1383 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
1384 misc_spec->inner_second_svlan_tag = 0;
1387 DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
1388 DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
1389 DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
1391 if (misc_spec->outer_second_cvlan_tag) {
1392 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
1393 misc_spec->outer_second_cvlan_tag = 0;
1394 } else if (misc_spec->outer_second_svlan_tag) {
1395 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
1396 misc_spec->outer_second_svlan_tag = 0;
1398 DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
1399 DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
1400 DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
1406 static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
1407 bool inner, u8 *bit_mask)
1409 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1411 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
1412 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
1414 dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1417 static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
1418 struct mlx5dr_ste_build *sb,
1421 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1422 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1423 u8 *tag = hw_ste->tag;
1425 DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
1426 DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
1428 return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p);
1431 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
1432 struct mlx5dr_match_param *mask,
1433 bool inner, bool rx)
1435 dr_ste_build_eth_l2_src_bit_mask(mask, inner, sb->bit_mask);
1438 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, rx, inner);
1439 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1440 sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_tag;
1443 static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
1444 bool inner, u8 *bit_mask)
1446 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1448 DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
1449 DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
1451 dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1454 static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
1455 struct mlx5dr_ste_build *sb,
1458 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1459 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1460 u8 *tag = hw_ste->tag;
1462 DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
1463 DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
1465 return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p);
1468 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
1469 struct mlx5dr_match_param *mask,
1470 bool inner, bool rx)
1472 dr_ste_build_eth_l2_dst_bit_mask(mask, inner, sb->bit_mask);
1476 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, rx, inner);
1477 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1478 sb->ste_build_tag_func = &dr_ste_build_eth_l2_dst_tag;
1481 static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
1482 bool inner, u8 *bit_mask)
1484 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1485 struct mlx5dr_match_misc *misc = &value->misc;
1487 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
1488 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
1489 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
1490 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
1491 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
1492 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
1493 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
1494 DR_STE_SET_MASK(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
1496 if (misc->vxlan_vni) {
1497 MLX5_SET(ste_eth_l2_tnl, bit_mask,
1498 l2_tunneling_network_id, (misc->vxlan_vni << 8));
1499 misc->vxlan_vni = 0;
1502 if (mask->svlan_tag || mask->cvlan_tag) {
1503 MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
1504 mask->cvlan_tag = 0;
1505 mask->svlan_tag = 0;
1509 static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
1510 struct mlx5dr_ste_build *sb,
1513 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1514 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1515 struct mlx5dr_match_misc *misc = &value->misc;
1516 u8 *tag = hw_ste->tag;
1518 DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
1519 DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
1520 DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
1521 DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
1522 DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
1523 DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
1524 DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
1526 if (misc->vxlan_vni) {
1527 MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
1528 (misc->vxlan_vni << 8));
1529 misc->vxlan_vni = 0;
1532 if (spec->cvlan_tag) {
1533 MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
1534 spec->cvlan_tag = 0;
1535 } else if (spec->svlan_tag) {
1536 MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
1537 spec->svlan_tag = 0;
1540 if (spec->ip_version) {
1541 if (spec->ip_version == IP_VERSION_IPV4) {
1542 MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
1543 spec->ip_version = 0;
1544 } else if (spec->ip_version == IP_VERSION_IPV6) {
1545 MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
1546 spec->ip_version = 0;
1555 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
1556 struct mlx5dr_match_param *mask, bool inner, bool rx)
1558 dr_ste_build_eth_l2_tnl_bit_mask(mask, inner, sb->bit_mask);
1562 sb->lu_type = MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I;
1563 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1564 sb->ste_build_tag_func = &dr_ste_build_eth_l2_tnl_tag;
1567 static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *value,
1568 bool inner, u8 *bit_mask)
1570 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1572 DR_STE_SET_MASK_V(eth_l3_ipv4_misc, bit_mask, time_to_live, mask, ttl_hoplimit);
1575 static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
1576 struct mlx5dr_ste_build *sb,
1579 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1580 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1581 u8 *tag = hw_ste->tag;
1583 DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
1588 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
1589 struct mlx5dr_match_param *mask,
1590 bool inner, bool rx)
1592 dr_ste_build_eth_l3_ipv4_misc_bit_mask(mask, inner, sb->bit_mask);
1596 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, rx, inner);
1597 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1598 sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_misc_tag;
1601 static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value,
1602 bool inner, u8 *bit_mask)
1604 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1606 DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, tcp_dport);
1607 DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, tcp_sport);
1608 DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, udp_dport);
1609 DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, udp_sport);
1610 DR_STE_SET_MASK_V(eth_l4, bit_mask, protocol, mask, ip_protocol);
1611 DR_STE_SET_MASK_V(eth_l4, bit_mask, fragmented, mask, frag);
1612 DR_STE_SET_MASK_V(eth_l4, bit_mask, dscp, mask, ip_dscp);
1613 DR_STE_SET_MASK_V(eth_l4, bit_mask, ecn, mask, ip_ecn);
1614 DR_STE_SET_MASK_V(eth_l4, bit_mask, ipv6_hop_limit, mask, ttl_hoplimit);
1616 if (mask->tcp_flags) {
1617 DR_STE_SET_TCP_FLAGS(eth_l4, bit_mask, mask);
1618 mask->tcp_flags = 0;
1622 static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
1623 struct mlx5dr_ste_build *sb,
1626 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1627 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1628 u8 *tag = hw_ste->tag;
1630 DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
1631 DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
1632 DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
1633 DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
1634 DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
1635 DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
1636 DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
1637 DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
1638 DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
1640 if (spec->tcp_flags) {
1641 DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
1642 spec->tcp_flags = 0;
1648 void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
1649 struct mlx5dr_match_param *mask,
1650 bool inner, bool rx)
1652 dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask);
1656 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, rx, inner);
1657 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1658 sb->ste_build_tag_func = &dr_ste_build_ipv6_l3_l4_tag;
1661 static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
1662 struct mlx5dr_ste_build *sb,
1668 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
1671 sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
1673 sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
1676 static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value,
1677 bool inner, u8 *bit_mask)
1679 struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
1682 DR_STE_SET_MPLS_MASK(mpls, misc2_mask, inner, bit_mask);
1684 DR_STE_SET_MPLS_MASK(mpls, misc2_mask, outer, bit_mask);
1687 static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value,
1688 struct mlx5dr_ste_build *sb,
1691 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1692 struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
1693 u8 *tag = hw_ste->tag;
1696 DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag);
1698 DR_STE_SET_MPLS_TAG(mpls, misc2_mask, outer, tag);
1703 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
1704 struct mlx5dr_match_param *mask,
1705 bool inner, bool rx)
1707 dr_ste_build_mpls_bit_mask(mask, inner, sb->bit_mask);
1711 sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, rx, inner);
1712 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1713 sb->ste_build_tag_func = &dr_ste_build_mpls_tag;
1716 static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value,
1717 bool inner, u8 *bit_mask)
1719 struct mlx5dr_match_misc *misc_mask = &value->misc;
1721 DR_STE_SET_MASK_V(gre, bit_mask, gre_protocol, misc_mask, gre_protocol);
1722 DR_STE_SET_MASK_V(gre, bit_mask, gre_k_present, misc_mask, gre_k_present);
1723 DR_STE_SET_MASK_V(gre, bit_mask, gre_key_h, misc_mask, gre_key_h);
1724 DR_STE_SET_MASK_V(gre, bit_mask, gre_key_l, misc_mask, gre_key_l);
1726 DR_STE_SET_MASK_V(gre, bit_mask, gre_c_present, misc_mask, gre_c_present);
1727 DR_STE_SET_MASK_V(gre, bit_mask, gre_s_present, misc_mask, gre_s_present);
1730 static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
1731 struct mlx5dr_ste_build *sb,
1734 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1735 struct mlx5dr_match_misc *misc = &value->misc;
1736 u8 *tag = hw_ste->tag;
1738 DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
1740 DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
1741 DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
1742 DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
1744 DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
1746 DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
1751 void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
1752 struct mlx5dr_match_param *mask, bool inner, bool rx)
1754 dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask);
1758 sb->lu_type = MLX5DR_STE_LU_TYPE_GRE;
1759 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1760 sb->ste_build_tag_func = &dr_ste_build_gre_tag;
1763 static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value,
1764 bool inner, u8 *bit_mask)
1766 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
1768 if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
1769 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
1770 misc_2_mask, outer_first_mpls_over_gre_label);
1772 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
1773 misc_2_mask, outer_first_mpls_over_gre_exp);
1775 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
1776 misc_2_mask, outer_first_mpls_over_gre_s_bos);
1778 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
1779 misc_2_mask, outer_first_mpls_over_gre_ttl);
1781 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
1782 misc_2_mask, outer_first_mpls_over_udp_label);
1784 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
1785 misc_2_mask, outer_first_mpls_over_udp_exp);
1787 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
1788 misc_2_mask, outer_first_mpls_over_udp_s_bos);
1790 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
1791 misc_2_mask, outer_first_mpls_over_udp_ttl);
1795 static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
1796 struct mlx5dr_ste_build *sb,
1799 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1800 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
1801 u8 *tag = hw_ste->tag;
1803 if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
1804 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
1805 misc_2_mask, outer_first_mpls_over_gre_label);
1807 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
1808 misc_2_mask, outer_first_mpls_over_gre_exp);
1810 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
1811 misc_2_mask, outer_first_mpls_over_gre_s_bos);
1813 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
1814 misc_2_mask, outer_first_mpls_over_gre_ttl);
1816 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
1817 misc_2_mask, outer_first_mpls_over_udp_label);
1819 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
1820 misc_2_mask, outer_first_mpls_over_udp_exp);
1822 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
1823 misc_2_mask, outer_first_mpls_over_udp_s_bos);
1825 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
1826 misc_2_mask, outer_first_mpls_over_udp_ttl);
1831 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
1832 struct mlx5dr_match_param *mask,
1833 bool inner, bool rx)
1835 dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask);
1839 sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_0;
1840 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1841 sb->ste_build_tag_func = &dr_ste_build_flex_parser_0_tag;
1844 #define ICMP_TYPE_OFFSET_FIRST_DW 24
1845 #define ICMP_CODE_OFFSET_FIRST_DW 16
1846 #define ICMP_HEADER_DATA_OFFSET_SECOND_DW 0
1848 static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
1849 struct mlx5dr_cmd_caps *caps,
1852 struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3;
1853 bool is_ipv4_mask = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3_mask);
1854 u32 icmp_header_data_mask;
1861 icmp_header_data_mask = misc_3_mask->icmpv4_header_data;
1862 icmp_type_mask = misc_3_mask->icmpv4_type;
1863 icmp_code_mask = misc_3_mask->icmpv4_code;
1864 dw0_location = caps->flex_parser_id_icmp_dw0;
1865 dw1_location = caps->flex_parser_id_icmp_dw1;
1867 icmp_header_data_mask = misc_3_mask->icmpv6_header_data;
1868 icmp_type_mask = misc_3_mask->icmpv6_type;
1869 icmp_code_mask = misc_3_mask->icmpv6_code;
1870 dw0_location = caps->flex_parser_id_icmpv6_dw0;
1871 dw1_location = caps->flex_parser_id_icmpv6_dw1;
1874 switch (dw0_location) {
1876 if (icmp_type_mask) {
1877 MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
1878 (icmp_type_mask << ICMP_TYPE_OFFSET_FIRST_DW));
1880 misc_3_mask->icmpv4_type = 0;
1882 misc_3_mask->icmpv6_type = 0;
1884 if (icmp_code_mask) {
1885 u32 cur_val = MLX5_GET(ste_flex_parser_1, bit_mask,
1887 MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
1888 cur_val | (icmp_code_mask << ICMP_CODE_OFFSET_FIRST_DW));
1890 misc_3_mask->icmpv4_code = 0;
1892 misc_3_mask->icmpv6_code = 0;
1899 switch (dw1_location) {
1901 if (icmp_header_data_mask) {
1902 MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_5,
1903 (icmp_header_data_mask << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
1905 misc_3_mask->icmpv4_header_data = 0;
1907 misc_3_mask->icmpv6_header_data = 0;
1917 static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
1918 struct mlx5dr_ste_build *sb,
1921 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1922 struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
1923 u8 *tag = hw_ste->tag;
1924 u32 icmp_header_data;
1931 is_ipv4 = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3);
1933 icmp_header_data = misc_3->icmpv4_header_data;
1934 icmp_type = misc_3->icmpv4_type;
1935 icmp_code = misc_3->icmpv4_code;
1936 dw0_location = sb->caps->flex_parser_id_icmp_dw0;
1937 dw1_location = sb->caps->flex_parser_id_icmp_dw1;
1939 icmp_header_data = misc_3->icmpv6_header_data;
1940 icmp_type = misc_3->icmpv6_type;
1941 icmp_code = misc_3->icmpv6_code;
1942 dw0_location = sb->caps->flex_parser_id_icmpv6_dw0;
1943 dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
1946 switch (dw0_location) {
1949 MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
1950 (icmp_type << ICMP_TYPE_OFFSET_FIRST_DW));
1952 misc_3->icmpv4_type = 0;
1954 misc_3->icmpv6_type = 0;
1958 u32 cur_val = MLX5_GET(ste_flex_parser_1, tag,
1960 MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
1961 cur_val | (icmp_code << ICMP_CODE_OFFSET_FIRST_DW));
1963 misc_3->icmpv4_code = 0;
1965 misc_3->icmpv6_code = 0;
1972 switch (dw1_location) {
1974 if (icmp_header_data) {
1975 MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
1976 (icmp_header_data << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
1978 misc_3->icmpv4_header_data = 0;
1980 misc_3->icmpv6_header_data = 0;
1990 int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
1991 struct mlx5dr_match_param *mask,
1992 struct mlx5dr_cmd_caps *caps,
1993 bool inner, bool rx)
1997 ret = dr_ste_build_flex_parser_1_bit_mask(mask, caps, sb->bit_mask);
2004 sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_1;
2005 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2006 sb->ste_build_tag_func = &dr_ste_build_flex_parser_1_tag;
2011 static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *value,
2012 bool inner, u8 *bit_mask)
2014 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
2016 DR_STE_SET_MASK_V(general_purpose, bit_mask,
2017 general_purpose_lookup_field, misc_2_mask,
2021 static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value,
2022 struct mlx5dr_ste_build *sb,
2025 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2026 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
2027 u8 *tag = hw_ste->tag;
2029 DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
2030 misc_2_mask, metadata_reg_a);
2035 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
2036 struct mlx5dr_match_param *mask,
2037 bool inner, bool rx)
2039 dr_ste_build_general_purpose_bit_mask(mask, inner, sb->bit_mask);
2043 sb->lu_type = MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE;
2044 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2045 sb->ste_build_tag_func = &dr_ste_build_general_purpose_tag;
2048 static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value,
2049 bool inner, u8 *bit_mask)
2051 struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
2054 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
2056 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
2059 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
2061 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
2066 static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
2067 struct mlx5dr_ste_build *sb,
2070 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2071 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
2072 u8 *tag = hw_ste->tag;
2075 DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
2076 DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
2078 DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
2079 DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
2085 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
2086 struct mlx5dr_match_param *mask,
2087 bool inner, bool rx)
2089 dr_ste_build_eth_l4_misc_bit_mask(mask, inner, sb->bit_mask);
2093 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, rx, inner);
2094 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2095 sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag;
2098 static void dr_ste_build_flex_parser_tnl_bit_mask(struct mlx5dr_match_param *value,
2099 bool inner, u8 *bit_mask)
2101 struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
2103 if (misc_3_mask->outer_vxlan_gpe_flags ||
2104 misc_3_mask->outer_vxlan_gpe_next_protocol) {
2105 MLX5_SET(ste_flex_parser_tnl, bit_mask,
2106 flex_parser_tunneling_header_63_32,
2107 (misc_3_mask->outer_vxlan_gpe_flags << 24) |
2108 (misc_3_mask->outer_vxlan_gpe_next_protocol));
2109 misc_3_mask->outer_vxlan_gpe_flags = 0;
2110 misc_3_mask->outer_vxlan_gpe_next_protocol = 0;
2113 if (misc_3_mask->outer_vxlan_gpe_vni) {
2114 MLX5_SET(ste_flex_parser_tnl, bit_mask,
2115 flex_parser_tunneling_header_31_0,
2116 misc_3_mask->outer_vxlan_gpe_vni << 8);
2117 misc_3_mask->outer_vxlan_gpe_vni = 0;
2121 static int dr_ste_build_flex_parser_tnl_tag(struct mlx5dr_match_param *value,
2122 struct mlx5dr_ste_build *sb,
2125 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2126 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
2127 u8 *tag = hw_ste->tag;
2129 if (misc3->outer_vxlan_gpe_flags ||
2130 misc3->outer_vxlan_gpe_next_protocol) {
2131 MLX5_SET(ste_flex_parser_tnl, tag,
2132 flex_parser_tunneling_header_63_32,
2133 (misc3->outer_vxlan_gpe_flags << 24) |
2134 (misc3->outer_vxlan_gpe_next_protocol));
2135 misc3->outer_vxlan_gpe_flags = 0;
2136 misc3->outer_vxlan_gpe_next_protocol = 0;
2139 if (misc3->outer_vxlan_gpe_vni) {
2140 MLX5_SET(ste_flex_parser_tnl, tag,
2141 flex_parser_tunneling_header_31_0,
2142 misc3->outer_vxlan_gpe_vni << 8);
2143 misc3->outer_vxlan_gpe_vni = 0;
2149 void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb,
2150 struct mlx5dr_match_param *mask,
2151 bool inner, bool rx)
2153 dr_ste_build_flex_parser_tnl_bit_mask(mask, inner, sb->bit_mask);
2157 sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
2158 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2159 sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_tag;
2162 static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
2165 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
2167 DR_STE_SET_MASK_V(register_0, bit_mask, register_0_h,
2168 misc_2_mask, metadata_reg_c_0);
2169 DR_STE_SET_MASK_V(register_0, bit_mask, register_0_l,
2170 misc_2_mask, metadata_reg_c_1);
2171 DR_STE_SET_MASK_V(register_0, bit_mask, register_1_h,
2172 misc_2_mask, metadata_reg_c_2);
2173 DR_STE_SET_MASK_V(register_0, bit_mask, register_1_l,
2174 misc_2_mask, metadata_reg_c_3);
2177 static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value,
2178 struct mlx5dr_ste_build *sb,
2181 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2182 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
2183 u8 *tag = hw_ste->tag;
2185 DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
2186 DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
2187 DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
2188 DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
2193 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
2194 struct mlx5dr_match_param *mask,
2195 bool inner, bool rx)
2197 dr_ste_build_register_0_bit_mask(mask, sb->bit_mask);
2201 sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0;
2202 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2203 sb->ste_build_tag_func = &dr_ste_build_register_0_tag;
2206 static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value,
2209 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
2211 DR_STE_SET_MASK_V(register_1, bit_mask, register_2_h,
2212 misc_2_mask, metadata_reg_c_4);
2213 DR_STE_SET_MASK_V(register_1, bit_mask, register_2_l,
2214 misc_2_mask, metadata_reg_c_5);
2215 DR_STE_SET_MASK_V(register_1, bit_mask, register_3_h,
2216 misc_2_mask, metadata_reg_c_6);
2217 DR_STE_SET_MASK_V(register_1, bit_mask, register_3_l,
2218 misc_2_mask, metadata_reg_c_7);
2221 static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value,
2222 struct mlx5dr_ste_build *sb,
2225 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2226 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
2227 u8 *tag = hw_ste->tag;
2229 DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
2230 DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
2231 DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
2232 DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
2237 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
2238 struct mlx5dr_match_param *mask,
2239 bool inner, bool rx)
2241 dr_ste_build_register_1_bit_mask(mask, sb->bit_mask);
2245 sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1;
2246 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2247 sb->ste_build_tag_func = &dr_ste_build_register_1_tag;
2250 static int dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
2253 struct mlx5dr_match_misc *misc_mask = &value->misc;
2255 /* Partial misc source_port is not supported */
2256 if (misc_mask->source_port && misc_mask->source_port != 0xffff)
2259 /* Partial misc source_eswitch_owner_vhca_id is not supported */
2260 if (misc_mask->source_eswitch_owner_vhca_id &&
2261 misc_mask->source_eswitch_owner_vhca_id != 0xffff)
2264 DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
2265 DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
2266 misc_mask->source_eswitch_owner_vhca_id = 0;
2271 static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
2272 struct mlx5dr_ste_build *sb,
2275 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2276 struct mlx5dr_match_misc *misc = &value->misc;
2277 struct mlx5dr_cmd_vport_cap *vport_cap;
2278 struct mlx5dr_domain *dmn = sb->dmn;
2279 struct mlx5dr_cmd_caps *caps;
2280 u8 *tag = hw_ste->tag;
2282 DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
2284 if (sb->vhca_id_valid) {
2285 /* Find port GVMI based on the eswitch_owner_vhca_id */
2286 if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
2287 caps = &dmn->info.caps;
2288 else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
2289 dmn->peer_dmn->info.caps.gvmi))
2290 caps = &dmn->peer_dmn->info.caps;
2294 caps = &dmn->info.caps;
2297 vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
2301 if (vport_cap->vport_gvmi)
2302 MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
2304 misc->source_eswitch_owner_vhca_id = 0;
2305 misc->source_port = 0;
2310 int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
2311 struct mlx5dr_match_param *mask,
2312 struct mlx5dr_domain *dmn,
2313 bool inner, bool rx)
2317 /* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
2318 sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
2320 ret = dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
2327 sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
2328 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2329 sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag;