1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
9 static void esw_acl_ingress_lgcy_rules_destroy(struct mlx5_vport *vport)
11 if (vport->ingress.legacy.drop_rule) {
12 mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
13 vport->ingress.legacy.drop_rule = NULL;
15 esw_acl_ingress_allow_rule_destroy(vport);
18 static int esw_acl_ingress_lgcy_groups_create(struct mlx5_eswitch *esw,
19 struct mlx5_vport *vport)
21 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
22 struct mlx5_core_dev *dev = esw->dev;
23 struct mlx5_flow_group *g;
28 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
32 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
34 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
35 MLX5_MATCH_OUTER_HEADERS);
36 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
37 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
38 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
39 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
40 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
42 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
45 esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
49 vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
51 memset(flow_group_in, 0, inlen);
52 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
53 MLX5_MATCH_OUTER_HEADERS);
54 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
55 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
56 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
58 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
61 esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
65 vport->ingress.legacy.allow_untagged_only_grp = g;
67 memset(flow_group_in, 0, inlen);
68 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
69 MLX5_MATCH_OUTER_HEADERS);
70 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
71 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
72 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
73 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
75 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
78 esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
82 vport->ingress.legacy.allow_spoofchk_only_grp = g;
84 memset(flow_group_in, 0, inlen);
85 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
86 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
88 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
91 esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
95 vport->ingress.legacy.drop_grp = g;
96 kvfree(flow_group_in);
100 if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
101 mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
102 vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
105 if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
106 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
107 vport->ingress.legacy.allow_untagged_only_grp = NULL;
110 if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
111 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
112 vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
115 kvfree(flow_group_in);
119 static void esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport *vport)
121 if (vport->ingress.legacy.allow_spoofchk_only_grp) {
122 mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
123 vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
125 if (vport->ingress.legacy.allow_untagged_only_grp) {
126 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
127 vport->ingress.legacy.allow_untagged_only_grp = NULL;
129 if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
130 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
131 vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
133 if (vport->ingress.legacy.drop_grp) {
134 mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
135 vport->ingress.legacy.drop_grp = NULL;
139 int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
140 struct mlx5_vport *vport)
142 struct mlx5_flow_destination drop_ctr_dst = {};
143 struct mlx5_flow_destination *dst = NULL;
144 struct mlx5_flow_act flow_act = {};
145 struct mlx5_flow_spec *spec = NULL;
146 struct mlx5_fc *counter = NULL;
147 /* The ingress acl table contains 4 groups
148 * (2 active rules at the same time -
149 * 1 allow rule from one of the first 3 groups.
150 * 1 drop rule from the last group):
151 * 1)Allow untagged traffic with smac=original mac.
152 * 2)Allow untagged traffic.
153 * 3)Allow traffic with smac=original mac.
154 * 4)Drop all other traffic.
161 esw_acl_ingress_lgcy_rules_destroy(vport);
163 if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
164 counter = mlx5_fc_create(esw->dev, false);
167 "vport[%d] configure ingress drop rule counter failed\n",
169 vport->ingress.legacy.drop_counter = counter;
172 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
173 esw_acl_ingress_lgcy_cleanup(esw, vport);
177 if (!vport->ingress.acl) {
178 vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
179 MLX5_FLOW_NAMESPACE_ESW_INGRESS,
181 if (IS_ERR_OR_NULL(vport->ingress.acl)) {
182 err = PTR_ERR(vport->ingress.acl);
183 vport->ingress.acl = NULL;
187 err = esw_acl_ingress_lgcy_groups_create(esw, vport);
193 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
194 vport->vport, vport->info.vlan, vport->info.qos);
196 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
202 if (vport->info.vlan || vport->info.qos)
203 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
204 outer_headers.cvlan_tag);
206 if (vport->info.spoofchk) {
207 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
208 outer_headers.smac_47_16);
209 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
210 outer_headers.smac_15_0);
211 smac_v = MLX5_ADDR_OF(fte_match_param,
213 outer_headers.smac_47_16);
214 ether_addr_copy(smac_v, vport->info.mac);
217 /* Create ingress allow rule */
218 memset(spec, 0, sizeof(*spec));
219 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
220 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
221 vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
223 if (IS_ERR(vport->ingress.allow_rule)) {
224 err = PTR_ERR(vport->ingress.allow_rule);
226 "vport[%d] configure ingress allow rule, err(%d)\n",
228 vport->ingress.allow_rule = NULL;
232 memset(&flow_act, 0, sizeof(flow_act));
233 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
234 /* Attach drop flow counter */
236 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
237 drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
238 drop_ctr_dst.counter_id = mlx5_fc_id(counter);
242 vport->ingress.legacy.drop_rule =
243 mlx5_add_flow_rules(vport->ingress.acl, NULL,
244 &flow_act, dst, dest_num);
245 if (IS_ERR(vport->ingress.legacy.drop_rule)) {
246 err = PTR_ERR(vport->ingress.legacy.drop_rule);
248 "vport[%d] configure ingress drop rule, err(%d)\n",
250 vport->ingress.legacy.drop_rule = NULL;
257 esw_acl_ingress_lgcy_cleanup(esw, vport);
262 void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw,
263 struct mlx5_vport *vport)
265 if (IS_ERR_OR_NULL(vport->ingress.acl))
266 goto clean_drop_counter;
268 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
270 esw_acl_ingress_lgcy_rules_destroy(vport);
271 esw_acl_ingress_lgcy_groups_destroy(vport);
272 esw_acl_ingress_table_destroy(vport);
275 if (!IS_ERR_OR_NULL(vport->ingress.legacy.drop_counter)) {
276 mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
277 vport->ingress.legacy.drop_counter = NULL;