1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
10 esw_acl_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
11 const struct mlx5_vport *vport)
13 return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
14 mlx5_eswitch_is_vf_vport(esw, vport->vport));
17 static int esw_acl_ingress_prio_tag_create(struct mlx5_eswitch *esw,
18 struct mlx5_vport *vport)
20 struct mlx5_flow_act flow_act = {};
21 struct mlx5_flow_spec *spec;
24 /* For prio tag mode, there is only 1 FTEs:
25 * 1) Untagged packets - push prio tag VLAN and modify metadata if
27 * Unmatched traffic is allowed by default
29 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
33 /* Untagged packets - push prio tag VLAN, allow */
34 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
35 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
36 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
37 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
38 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
39 flow_act.vlan[0].ethtype = ETH_P_8021Q;
40 flow_act.vlan[0].vid = 0;
41 flow_act.vlan[0].prio = 0;
43 if (vport->ingress.offloads.modify_metadata_rule) {
44 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
45 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
48 vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
50 if (IS_ERR(vport->ingress.allow_rule)) {
51 err = PTR_ERR(vport->ingress.allow_rule);
53 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
55 vport->ingress.allow_rule = NULL;
62 static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw,
63 struct mlx5_vport *vport)
65 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
66 struct mlx5_flow_act flow_act = {};
70 key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
71 key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
73 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
74 MLX5_SET(set_action_in, action, field,
75 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
76 MLX5_SET(set_action_in, action, data, key);
77 MLX5_SET(set_action_in, action, offset,
78 ESW_SOURCE_PORT_METADATA_OFFSET);
79 MLX5_SET(set_action_in, action, length,
80 ESW_SOURCE_PORT_METADATA_BITS);
82 vport->ingress.offloads.modify_metadata =
83 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
85 if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
86 err = PTR_ERR(vport->ingress.offloads.modify_metadata);
88 "failed to alloc modify header for vport %d ingress acl (%d)\n",
93 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
94 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
95 vport->ingress.offloads.modify_metadata_rule =
96 mlx5_add_flow_rules(vport->ingress.acl,
97 NULL, &flow_act, NULL, 0);
98 if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
99 err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
101 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
103 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
104 vport->ingress.offloads.modify_metadata_rule = NULL;
109 static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw,
110 struct mlx5_vport *vport)
112 if (!vport->ingress.offloads.modify_metadata_rule)
115 mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
116 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
117 vport->ingress.offloads.modify_metadata_rule = NULL;
120 static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw,
121 struct mlx5_vport *vport)
125 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
126 err = esw_acl_ingress_mod_metadata_create(esw, vport);
129 "vport(%d) create ingress modify metadata, err(%d)\n",
135 if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
136 err = esw_acl_ingress_prio_tag_create(esw, vport);
139 "vport(%d) create ingress prio tag rule, err(%d)\n",
148 esw_acl_ingress_mod_metadata_destroy(esw, vport);
152 static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw,
153 struct mlx5_vport *vport)
155 esw_acl_ingress_allow_rule_destroy(vport);
156 esw_acl_ingress_mod_metadata_destroy(esw, vport);
159 static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
160 struct mlx5_vport *vport)
162 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
163 struct mlx5_flow_group *g;
164 void *match_criteria;
169 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
173 if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
174 /* This group is to hold FTE to match untagged packets when prio_tag
177 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
178 flow_group_in, match_criteria);
179 MLX5_SET(create_flow_group_in, flow_group_in,
180 match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
181 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
182 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
183 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
185 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
188 esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
192 vport->ingress.offloads.metadata_prio_tag_grp = g;
196 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
197 /* This group holds an FTE with no match to add metadata for
198 * tagged packets if prio-tag is enabled, or for all untagged
199 * traffic in case prio-tag is disabled.
201 memset(flow_group_in, 0, inlen);
202 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
203 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
205 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
208 esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
212 vport->ingress.offloads.metadata_allmatch_grp = g;
215 kvfree(flow_group_in);
219 if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
220 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
221 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
224 kvfree(flow_group_in);
228 static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
230 if (vport->ingress.offloads.metadata_allmatch_grp) {
231 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
232 vport->ingress.offloads.metadata_allmatch_grp = NULL;
235 if (vport->ingress.offloads.metadata_prio_tag_grp) {
236 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
237 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
241 int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
242 struct mlx5_vport *vport)
247 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
248 !esw_acl_ingress_prio_tag_enabled(esw, vport))
251 esw_acl_ingress_allow_rule_destroy(vport);
253 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
255 if (esw_acl_ingress_prio_tag_enabled(esw, vport))
258 vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
259 MLX5_FLOW_NAMESPACE_ESW_INGRESS,
261 if (IS_ERR_OR_NULL(vport->ingress.acl)) {
262 err = PTR_ERR(vport->ingress.acl);
263 vport->ingress.acl = NULL;
267 err = esw_acl_ingress_ofld_groups_create(esw, vport);
272 "vport[%d] configure ingress rules\n", vport->vport);
274 err = esw_acl_ingress_ofld_rules_create(esw, vport);
281 esw_acl_ingress_ofld_groups_destroy(vport);
283 esw_acl_ingress_table_destroy(vport);
287 void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
288 struct mlx5_vport *vport)
290 esw_acl_ingress_ofld_rules_destroy(esw, vport);
291 esw_acl_ingress_ofld_groups_destroy(vport);
292 esw_acl_ingress_table_destroy(vport);