1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
6 #define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
8 struct mlx5dr_rule_action_member {
9 struct mlx5dr_action *action;
10 struct list_head list;
13 static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
14 struct mlx5dr_ste *new_last_ste,
15 struct list_head *miss_list,
16 struct list_head *send_list)
18 struct mlx5dr_ste_send_info *ste_info_last;
19 struct mlx5dr_ste *last_ste;
21 /* The new entry will be inserted after the last */
22 last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
25 ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
29 mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste,
30 mlx5dr_ste_get_icm_addr(new_last_ste));
31 list_add_tail(&new_last_ste->miss_list_node, miss_list);
33 mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
35 ste_info_last, send_list, true);
40 static struct mlx5dr_ste *
41 dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
42 struct mlx5dr_matcher_rx_tx *nic_matcher,
45 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
46 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
47 struct mlx5dr_ste_htbl *new_htbl;
48 struct mlx5dr_ste *ste;
50 /* Create new table for miss entry */
51 new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
53 MLX5DR_STE_LU_TYPE_DONT_CARE,
56 mlx5dr_dbg(dmn, "Failed allocating collision table\n");
60 /* One and only entry, never grows */
61 ste = new_htbl->ste_arr;
62 mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste,
63 nic_matcher->e_anchor->chunk->icm_addr);
64 mlx5dr_htbl_get(new_htbl);
69 static struct mlx5dr_ste *
70 dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
71 struct mlx5dr_matcher_rx_tx *nic_matcher,
73 struct mlx5dr_ste *orig_ste)
75 struct mlx5dr_ste *ste;
77 ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
79 mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
83 ste->ste_chain_location = orig_ste->ste_chain_location;
85 /* In collision entry, all members share the same miss_list_head */
86 ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
89 if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
91 mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
98 mlx5dr_ste_free(ste, matcher, nic_matcher);
103 dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
104 struct mlx5dr_domain *dmn)
108 list_del(&ste_info->send_list);
110 /* Copy data to ste, only reduced size or control, the last 16B (mask)
111 * is already written to the hw.
113 if (ste_info->size == DR_STE_SIZE_CTRL)
114 memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_CTRL);
116 memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
118 ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
119 ste_info->size, ste_info->offset);
128 static int dr_rule_send_update_list(struct list_head *send_ste_list,
129 struct mlx5dr_domain *dmn,
132 struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
136 list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
137 send_ste_list, send_list) {
138 ret = dr_rule_handle_one_ste_in_update_list(ste_info,
144 list_for_each_entry_safe(ste_info, tmp_ste_info,
145 send_ste_list, send_list) {
146 ret = dr_rule_handle_one_ste_in_update_list(ste_info,
156 static struct mlx5dr_ste *
157 dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
159 struct mlx5dr_ste *ste;
161 if (list_empty(miss_list))
164 /* Check if hw_ste is present in the list */
165 list_for_each_entry(ste, miss_list, miss_list_node) {
166 if (mlx5dr_ste_equal_tag(ste->hw_ste, hw_ste))
173 static struct mlx5dr_ste *
174 dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
175 struct mlx5dr_matcher_rx_tx *nic_matcher,
176 struct list_head *update_list,
177 struct mlx5dr_ste *col_ste,
180 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
181 struct mlx5dr_ste *new_ste;
184 new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
188 /* In collision entry, all members share the same miss_list_head */
189 new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
191 /* Update the previous from the list */
192 ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
193 mlx5dr_ste_get_miss_list(col_ste),
196 mlx5dr_dbg(dmn, "Failed update dup entry\n");
203 mlx5dr_ste_free(new_ste, matcher, nic_matcher);
207 static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
208 struct mlx5dr_matcher_rx_tx *nic_matcher,
209 struct mlx5dr_ste *cur_ste,
210 struct mlx5dr_ste *new_ste)
212 new_ste->next_htbl = cur_ste->next_htbl;
213 new_ste->ste_chain_location = cur_ste->ste_chain_location;
215 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, new_ste->ste_chain_location))
216 new_ste->next_htbl->pointing_ste = new_ste;
218 /* We need to copy the refcount since this ste
219 * may have been traversed several times
221 new_ste->refcount = cur_ste->refcount;
223 /* Link old STEs rule_mem list to the new ste */
224 mlx5dr_rule_update_rule_member(cur_ste, new_ste);
225 INIT_LIST_HEAD(&new_ste->rule_list);
226 list_splice_tail_init(&cur_ste->rule_list, &new_ste->rule_list);
229 static struct mlx5dr_ste *
230 dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
231 struct mlx5dr_matcher_rx_tx *nic_matcher,
232 struct mlx5dr_ste *cur_ste,
233 struct mlx5dr_ste_htbl *new_htbl,
234 struct list_head *update_list)
236 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
237 struct mlx5dr_ste_send_info *ste_info;
238 bool use_update_list = false;
239 u8 hw_ste[DR_STE_SIZE] = {};
240 struct mlx5dr_ste *new_ste;
244 /* Copy STE mask from the matcher */
245 sb_idx = cur_ste->ste_chain_location - 1;
246 mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
248 /* Copy STE control and tag */
249 memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
250 mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
251 nic_matcher->e_anchor->chunk->icm_addr);
253 new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
254 new_ste = &new_htbl->ste_arr[new_idx];
256 if (mlx5dr_ste_is_not_used(new_ste)) {
257 mlx5dr_htbl_get(new_htbl);
258 list_add_tail(&new_ste->miss_list_node,
259 mlx5dr_ste_get_miss_list(new_ste));
261 new_ste = dr_rule_rehash_handle_collision(matcher,
267 mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
271 new_htbl->ctrl.num_of_collisions++;
272 use_update_list = true;
275 memcpy(new_ste->hw_ste, hw_ste, DR_STE_SIZE_REDUCED);
277 new_htbl->ctrl.num_of_valid_entries++;
279 if (use_update_list) {
280 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
284 mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
289 dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
294 mlx5dr_ste_free(new_ste, matcher, nic_matcher);
298 static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
299 struct mlx5dr_matcher_rx_tx *nic_matcher,
300 struct list_head *cur_miss_list,
301 struct mlx5dr_ste_htbl *new_htbl,
302 struct list_head *update_list)
304 struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
306 if (list_empty(cur_miss_list))
309 list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
310 new_ste = dr_rule_rehash_copy_ste(matcher,
318 list_del(&cur_ste->miss_list_node);
319 mlx5dr_htbl_put(cur_ste->htbl);
324 mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
329 static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
330 struct mlx5dr_matcher_rx_tx *nic_matcher,
331 struct mlx5dr_ste_htbl *cur_htbl,
332 struct mlx5dr_ste_htbl *new_htbl,
333 struct list_head *update_list)
335 struct mlx5dr_ste *cur_ste;
340 cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk_size);
342 if (cur_entries < 1) {
343 mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
347 for (i = 0; i < cur_entries; i++) {
348 cur_ste = &cur_htbl->ste_arr[i];
349 if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
352 err = dr_rule_rehash_copy_miss_list(matcher,
354 mlx5dr_ste_get_miss_list(cur_ste),
365 static struct mlx5dr_ste_htbl *
366 dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
367 struct mlx5dr_rule_rx_tx *nic_rule,
368 struct mlx5dr_ste_htbl *cur_htbl,
370 struct list_head *update_list,
371 enum mlx5dr_icm_chunk_size new_size)
373 struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
374 struct mlx5dr_matcher *matcher = rule->matcher;
375 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
376 struct mlx5dr_matcher_rx_tx *nic_matcher;
377 struct mlx5dr_ste_send_info *ste_info;
378 struct mlx5dr_htbl_connect_info info;
379 struct mlx5dr_domain_rx_tx *nic_dmn;
380 u8 formatted_ste[DR_STE_SIZE] = {};
381 LIST_HEAD(rehash_table_send_list);
382 struct mlx5dr_ste *ste_to_update;
383 struct mlx5dr_ste_htbl *new_htbl;
386 nic_matcher = nic_rule->nic_matcher;
387 nic_dmn = nic_matcher->nic_tbl->nic_dmn;
389 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
393 new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
396 cur_htbl->byte_mask);
398 mlx5dr_err(dmn, "Failed to allocate new hash table\n");
402 /* Write new table to HW */
403 info.type = CONNECT_MISS;
404 info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
405 mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
412 new_htbl->pointing_ste = cur_htbl->pointing_ste;
413 new_htbl->pointing_ste->next_htbl = new_htbl;
414 err = dr_rule_rehash_copy_htbl(matcher,
418 &rehash_table_send_list);
422 if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
423 nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
424 mlx5dr_err(dmn, "Failed writing table to HW\n");
428 /* Writing to the hw is done in regular order of rehash_table_send_list,
429 * in order to have the origin data written before the miss address of
430 * collision entries, if exists.
432 if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
433 mlx5dr_err(dmn, "Failed updating table to HW\n");
437 /* Connect previous hash table to current */
438 if (ste_location == 1) {
439 /* The previous table is an anchor, anchors size is always one STE */
440 struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
442 /* On matcher s_anchor we keep an extra refcount */
443 mlx5dr_htbl_get(new_htbl);
444 mlx5dr_htbl_put(cur_htbl);
446 nic_matcher->s_htbl = new_htbl;
448 /* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
449 * (48B len) which works only on first 32B
451 mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
452 prev_htbl->ste_arr[0].hw_ste,
453 new_htbl->chunk->icm_addr,
454 new_htbl->chunk->num_of_entries);
456 ste_to_update = &prev_htbl->ste_arr[0];
458 mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
459 cur_htbl->pointing_ste->hw_ste,
461 ste_to_update = cur_htbl->pointing_ste;
464 mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
465 0, ste_to_update->hw_ste, ste_info,
471 /* Clean all ste_info's from the new table */
472 list_for_each_entry_safe(del_ste_info, tmp_ste_info,
473 &rehash_table_send_list, send_list) {
474 list_del(&del_ste_info->send_list);
479 mlx5dr_ste_htbl_free(new_htbl);
482 mlx5dr_info(dmn, "Failed creating rehash table\n");
486 static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
487 struct mlx5dr_rule_rx_tx *nic_rule,
488 struct mlx5dr_ste_htbl *cur_htbl,
490 struct list_head *update_list)
492 struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
493 enum mlx5dr_icm_chunk_size new_size;
495 new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk_size);
496 new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
498 if (new_size == cur_htbl->chunk_size)
499 return NULL; /* Skip rehash, we already at the max size */
501 return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
502 update_list, new_size);
505 static struct mlx5dr_ste *
506 dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
507 struct mlx5dr_matcher_rx_tx *nic_matcher,
508 struct mlx5dr_ste *ste,
510 struct list_head *miss_list,
511 struct list_head *send_list)
513 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
514 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
515 struct mlx5dr_ste_send_info *ste_info;
516 struct mlx5dr_ste *new_ste;
518 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
522 new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
526 if (dr_rule_append_to_miss_list(ste_ctx, new_ste,
527 miss_list, send_list)) {
528 mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
532 mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
533 ste_info, send_list, false);
535 ste->htbl->ctrl.num_of_collisions++;
536 ste->htbl->ctrl.num_of_valid_entries++;
541 mlx5dr_ste_free(new_ste, matcher, nic_matcher);
547 static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
549 struct mlx5dr_rule_action_member *action_mem;
550 struct mlx5dr_rule_action_member *tmp;
552 list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
553 list_del(&action_mem->list);
554 refcount_dec(&action_mem->action->refcount);
559 static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
561 struct mlx5dr_action *actions[])
563 struct mlx5dr_rule_action_member *action_mem;
566 for (i = 0; i < num_actions; i++) {
567 action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
569 goto free_action_members;
571 action_mem->action = actions[i];
572 INIT_LIST_HEAD(&action_mem->list);
573 list_add_tail(&action_mem->list, &rule->rule_actions_list);
574 refcount_inc(&action_mem->action->refcount);
580 dr_rule_remove_action_members(rule);
584 /* While the pointer of ste is no longer valid, like while moving ste to be
585 * the first in the miss_list, and to be in the origin table,
586 * all rule-members that are attached to this ste should update their ste member
589 void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste,
590 struct mlx5dr_ste *new_ste)
592 struct mlx5dr_rule_member *rule_mem;
594 list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list)
595 rule_mem->ste = new_ste;
598 static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
599 struct mlx5dr_rule_rx_tx *nic_rule)
601 struct mlx5dr_rule_member *rule_mem;
602 struct mlx5dr_rule_member *tmp_mem;
604 if (list_empty(&nic_rule->rule_members_list))
606 list_for_each_entry_safe(rule_mem, tmp_mem, &nic_rule->rule_members_list, list) {
607 list_del(&rule_mem->list);
608 list_del(&rule_mem->use_ste_list);
609 mlx5dr_ste_put(rule_mem->ste, rule->matcher, nic_rule->nic_matcher);
614 static u16 dr_get_bits_per_mask(u16 byte_mask)
619 byte_mask = byte_mask & (byte_mask - 1);
626 static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
627 struct mlx5dr_domain *dmn,
628 struct mlx5dr_domain_rx_tx *nic_dmn)
630 struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
632 if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size)
638 if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
641 if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
642 (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
648 static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule,
649 struct mlx5dr_ste *ste)
651 struct mlx5dr_rule_member *rule_mem;
653 rule_mem = kvzalloc(sizeof(*rule_mem), GFP_KERNEL);
657 INIT_LIST_HEAD(&rule_mem->list);
658 INIT_LIST_HEAD(&rule_mem->use_ste_list);
661 list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
663 list_add_tail(&rule_mem->use_ste_list, &ste->rule_list);
668 static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
669 struct mlx5dr_rule_rx_tx *nic_rule,
670 struct list_head *send_ste_list,
671 struct mlx5dr_ste *last_ste,
673 u32 new_hw_ste_arr_sz)
675 struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
676 struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
677 u8 num_of_builders = nic_matcher->num_of_builders;
678 struct mlx5dr_matcher *matcher = rule->matcher;
679 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
680 u8 *curr_hw_ste, *prev_hw_ste;
681 struct mlx5dr_ste *action_ste;
685 * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
686 * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
687 * to support the action.
689 if (num_of_builders == new_hw_ste_arr_sz)
692 for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
693 curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
694 prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
695 action_ste = dr_rule_create_collision_htbl(matcher,
701 mlx5dr_ste_get(action_ste);
703 /* While free ste we go over the miss list, so add this ste to the list */
704 list_add_tail(&action_ste->miss_list_node,
705 mlx5dr_ste_get_miss_list(action_ste));
707 ste_info_arr[k] = kzalloc(sizeof(*ste_info_arr[k]),
709 if (!ste_info_arr[k])
712 /* Point current ste to the new action */
713 mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
716 ret = dr_rule_add_member(nic_rule, action_ste);
718 mlx5dr_dbg(dmn, "Failed adding rule member\n");
721 mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
724 send_ste_list, false);
730 kfree(ste_info_arr[k]);
732 mlx5dr_ste_put(action_ste, matcher, nic_matcher);
736 static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
737 struct mlx5dr_matcher_rx_tx *nic_matcher,
738 struct mlx5dr_ste_htbl *cur_htbl,
739 struct mlx5dr_ste *ste,
742 struct list_head *miss_list,
743 struct list_head *send_list)
745 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
746 struct mlx5dr_ste_send_info *ste_info;
748 /* Take ref on table, only on first time this ste is used */
749 mlx5dr_htbl_get(cur_htbl);
751 /* new entry -> new branch */
752 list_add_tail(&ste->miss_list_node, miss_list);
754 mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
755 nic_matcher->e_anchor->chunk->icm_addr);
757 ste->ste_chain_location = ste_location;
759 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
761 goto clean_ste_setting;
763 if (mlx5dr_ste_create_next_htbl(matcher,
768 mlx5dr_dbg(dmn, "Failed allocating table\n");
772 cur_htbl->ctrl.num_of_valid_entries++;
774 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
775 ste_info, send_list, false);
782 list_del_init(&ste->miss_list_node);
783 mlx5dr_htbl_put(cur_htbl);
788 static struct mlx5dr_ste *
789 dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
790 struct mlx5dr_rule_rx_tx *nic_rule,
791 struct list_head *send_ste_list,
792 struct mlx5dr_ste_htbl *cur_htbl,
795 struct mlx5dr_ste_htbl **put_htbl)
797 struct mlx5dr_matcher *matcher = rule->matcher;
798 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
799 struct mlx5dr_matcher_rx_tx *nic_matcher;
800 struct mlx5dr_domain_rx_tx *nic_dmn;
801 struct mlx5dr_ste_htbl *new_htbl;
802 struct mlx5dr_ste *matched_ste;
803 struct list_head *miss_list;
804 bool skip_rehash = false;
805 struct mlx5dr_ste *ste;
808 nic_matcher = nic_rule->nic_matcher;
809 nic_dmn = nic_matcher->nic_tbl->nic_dmn;
812 index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
813 miss_list = &cur_htbl->chunk->miss_list[index];
814 ste = &cur_htbl->ste_arr[index];
816 if (mlx5dr_ste_is_not_used(ste)) {
817 if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
823 /* Hash table index in use, check if this ste is in the miss list */
824 matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
826 /* If it is last STE in the chain, and has the same tag
827 * it means that all the previous stes are the same,
828 * if so, this rule is duplicated.
830 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
833 mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
836 if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
837 /* Hash table index in use, try to resize of the hash */
840 /* Hold the table till we update.
841 * Release in dr_rule_create_rule()
843 *put_htbl = cur_htbl;
844 mlx5dr_htbl_get(cur_htbl);
846 new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
847 ste_location, send_ste_list);
849 mlx5dr_htbl_put(cur_htbl);
850 mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
851 cur_htbl->chunk_size);
857 /* Hash table index in use, add another collision (miss) */
858 ste = dr_rule_handle_collision(matcher,
865 mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
874 static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
875 u32 s_idx, u32 e_idx)
879 for (i = s_idx; i < e_idx; i++) {
880 if (value[i] & ~mask[i]) {
881 pr_info("Rule parameters contains a value not specified by mask\n");
888 static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
889 struct mlx5dr_match_parameters *value,
890 struct mlx5dr_match_param *param)
892 u8 match_criteria = matcher->match_criteria;
893 size_t value_size = value->match_sz;
894 u8 *mask_p = (u8 *)&matcher->mask;
895 u8 *param_p = (u8 *)param;
899 (value_size > DR_SZ_MATCH_PARAM || (value_size % sizeof(u32)))) {
900 mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
904 mlx5dr_ste_copy_param(matcher->match_criteria, param, value);
906 if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
907 s_idx = offsetof(struct mlx5dr_match_param, outer);
908 e_idx = min(s_idx + sizeof(param->outer), value_size);
910 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
911 mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
916 if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
917 s_idx = offsetof(struct mlx5dr_match_param, misc);
918 e_idx = min(s_idx + sizeof(param->misc), value_size);
920 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
921 mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
926 if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
927 s_idx = offsetof(struct mlx5dr_match_param, inner);
928 e_idx = min(s_idx + sizeof(param->inner), value_size);
930 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
931 mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
936 if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
937 s_idx = offsetof(struct mlx5dr_match_param, misc2);
938 e_idx = min(s_idx + sizeof(param->misc2), value_size);
940 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
941 mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
946 if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
947 s_idx = offsetof(struct mlx5dr_match_param, misc3);
948 e_idx = min(s_idx + sizeof(param->misc3), value_size);
950 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
951 mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
956 if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
957 s_idx = offsetof(struct mlx5dr_match_param, misc4);
958 e_idx = min(s_idx + sizeof(param->misc4), value_size);
960 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
961 mlx5dr_err(matcher->tbl->dmn,
962 "Rule misc4 parameters contains a value not specified by mask\n");
969 static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
970 struct mlx5dr_rule_rx_tx *nic_rule)
972 mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
973 dr_rule_clean_rule_members(rule, nic_rule);
974 mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
979 static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
981 dr_rule_destroy_rule_nic(rule, &rule->rx);
982 dr_rule_destroy_rule_nic(rule, &rule->tx);
986 static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
988 struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
991 case MLX5DR_DOMAIN_TYPE_NIC_RX:
992 dr_rule_destroy_rule_nic(rule, &rule->rx);
994 case MLX5DR_DOMAIN_TYPE_NIC_TX:
995 dr_rule_destroy_rule_nic(rule, &rule->tx);
997 case MLX5DR_DOMAIN_TYPE_FDB:
998 dr_rule_destroy_rule_fdb(rule);
1004 dr_rule_remove_action_members(rule);
1009 static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
1011 if (spec->ip_version == 6 || spec->ethertype == ETH_P_IPV6)
1012 return DR_RULE_IPV6;
1014 return DR_RULE_IPV4;
1017 static bool dr_rule_skip(enum mlx5dr_domain_type domain,
1018 enum mlx5dr_ste_entry_type ste_type,
1019 struct mlx5dr_match_param *mask,
1020 struct mlx5dr_match_param *value,
1023 bool rx = ste_type == MLX5DR_STE_TYPE_RX;
1025 if (domain != MLX5DR_DOMAIN_TYPE_FDB)
1028 if (mask->misc.source_port) {
1029 if (rx && value->misc.source_port != WIRE_PORT)
1032 if (!rx && value->misc.source_port == WIRE_PORT)
1036 if (rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT)
1039 if (!rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK)
1046 dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
1047 struct mlx5dr_rule_rx_tx *nic_rule,
1048 struct mlx5dr_match_param *param,
1050 struct mlx5dr_action *actions[])
1052 struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
1053 struct mlx5dr_matcher *matcher = rule->matcher;
1054 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1055 struct mlx5dr_matcher_rx_tx *nic_matcher;
1056 struct mlx5dr_domain_rx_tx *nic_dmn;
1057 struct mlx5dr_ste_htbl *htbl = NULL;
1058 struct mlx5dr_ste_htbl *cur_htbl;
1059 struct mlx5dr_ste *ste = NULL;
1060 LIST_HEAD(send_ste_list);
1061 u8 *hw_ste_arr = NULL;
1062 u32 new_hw_ste_arr_sz;
1065 nic_matcher = nic_rule->nic_matcher;
1066 nic_dmn = nic_matcher->nic_tbl->nic_dmn;
1068 INIT_LIST_HEAD(&nic_rule->rule_members_list);
1070 if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param,
1074 hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
1078 mlx5dr_domain_nic_lock(nic_dmn);
1080 ret = mlx5dr_matcher_select_builders(matcher,
1082 dr_rule_get_ipv(¶m->outer),
1083 dr_rule_get_ipv(¶m->inner));
1087 /* Set the tag values inside the ste array */
1088 ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
1092 /* Set the actions values/addresses inside the ste array */
1093 ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
1094 num_actions, hw_ste_arr,
1095 &new_hw_ste_arr_sz);
1099 cur_htbl = nic_matcher->s_htbl;
1101 /* Go over the array of STEs, and build dr_ste accordingly.
1102 * The loop is over only the builders which are equal or less to the
1103 * number of stes, in case we have actions that lives in other stes.
1105 for (i = 0; i < nic_matcher->num_of_builders; i++) {
1106 /* Calculate CRC and keep new ste entry */
1107 u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
1109 ste = dr_rule_handle_ste_branch(rule,
1117 mlx5dr_err(dmn, "Failed creating next branch\n");
1122 cur_htbl = ste->next_htbl;
1124 /* Keep all STEs in the rule struct */
1125 ret = dr_rule_add_member(nic_rule, ste);
1127 mlx5dr_dbg(dmn, "Failed adding rule member index %d\n", i);
1131 mlx5dr_ste_get(ste);
1134 /* Connect actions */
1135 ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
1136 ste, hw_ste_arr, new_hw_ste_arr_sz);
1138 mlx5dr_dbg(dmn, "Failed apply actions\n");
1141 ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
1143 mlx5dr_err(dmn, "Failed sending ste!\n");
1148 mlx5dr_htbl_put(htbl);
1150 mlx5dr_domain_nic_unlock(nic_dmn);
1157 mlx5dr_ste_put(ste, matcher, nic_matcher);
1159 dr_rule_clean_rule_members(rule, nic_rule);
1160 /* Clean all ste_info's */
1161 list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
1162 list_del(&ste_info->send_list);
1166 mlx5dr_domain_nic_unlock(nic_dmn);
1172 dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
1173 struct mlx5dr_match_param *param,
1175 struct mlx5dr_action *actions[])
1177 struct mlx5dr_match_param copy_param = {};
1180 /* Copy match_param since they will be consumed during the first
1181 * nic_rule insertion.
1183 memcpy(©_param, param, sizeof(struct mlx5dr_match_param));
1185 ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
1186 num_actions, actions);
1190 ret = dr_rule_create_rule_nic(rule, &rule->tx, ©_param,
1191 num_actions, actions);
1193 goto destroy_rule_nic_rx;
1197 destroy_rule_nic_rx:
1198 dr_rule_destroy_rule_nic(rule, &rule->rx);
1202 static struct mlx5dr_rule *
1203 dr_rule_create_rule(struct mlx5dr_matcher *matcher,
1204 struct mlx5dr_match_parameters *value,
1206 struct mlx5dr_action *actions[],
1209 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1210 struct mlx5dr_match_param param = {};
1211 struct mlx5dr_rule *rule;
1214 if (!dr_rule_verify(matcher, value, ¶m))
1217 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1221 rule->matcher = matcher;
1222 rule->flow_source = flow_source;
1223 INIT_LIST_HEAD(&rule->rule_actions_list);
1225 ret = dr_rule_add_action_members(rule, num_actions, actions);
1229 switch (dmn->type) {
1230 case MLX5DR_DOMAIN_TYPE_NIC_RX:
1231 rule->rx.nic_matcher = &matcher->rx;
1232 ret = dr_rule_create_rule_nic(rule, &rule->rx, ¶m,
1233 num_actions, actions);
1235 case MLX5DR_DOMAIN_TYPE_NIC_TX:
1236 rule->tx.nic_matcher = &matcher->tx;
1237 ret = dr_rule_create_rule_nic(rule, &rule->tx, ¶m,
1238 num_actions, actions);
1240 case MLX5DR_DOMAIN_TYPE_FDB:
1241 rule->rx.nic_matcher = &matcher->rx;
1242 rule->tx.nic_matcher = &matcher->tx;
1243 ret = dr_rule_create_rule_fdb(rule, ¶m,
1244 num_actions, actions);
1252 goto remove_action_members;
1256 remove_action_members:
1257 dr_rule_remove_action_members(rule);
1260 mlx5dr_err(dmn, "Failed creating rule\n");
1264 struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
1265 struct mlx5dr_match_parameters *value,
1267 struct mlx5dr_action *actions[],
1270 struct mlx5dr_rule *rule;
1272 refcount_inc(&matcher->refcount);
1274 rule = dr_rule_create_rule(matcher, value, num_actions, actions, flow_source);
1276 refcount_dec(&matcher->refcount);
1281 int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
1283 struct mlx5dr_matcher *matcher = rule->matcher;
1286 ret = dr_rule_destroy_rule(rule);
1288 refcount_dec(&matcher->refcount);