1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
5 #include "prestera_acl.h"
6 #include "prestera_flow.h"
7 #include "prestera_flower.h"
9 struct prestera_flower_template {
10 struct prestera_acl_ruleset *ruleset;
13 void prestera_flower_template_cleanup(struct prestera_flow_block *block)
16 /* put the reference to the ruleset kept in create */
17 prestera_acl_ruleset_put(block->tmplt->ruleset);
24 static int prestera_flower_parse_actions(struct prestera_flow_block *block,
25 struct prestera_acl_rule *rule,
26 struct flow_action *flow_action,
27 struct netlink_ext_ack *extack)
29 const struct flow_action_entry *act;
32 /* whole struct (rule->re_arg) must be initialized with 0 */
33 if (!flow_action_has_entries(flow_action))
36 flow_action_for_each(i, act, flow_action) {
38 case FLOW_ACTION_ACCEPT:
39 if (rule->re_arg.accept.valid)
42 rule->re_arg.accept.valid = 1;
44 case FLOW_ACTION_DROP:
45 if (rule->re_arg.drop.valid)
48 rule->re_arg.drop.valid = 1;
50 case FLOW_ACTION_TRAP:
51 if (rule->re_arg.trap.valid)
54 rule->re_arg.trap.valid = 1;
57 NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
58 pr_err("Unsupported action\n");
66 static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
67 struct flow_cls_offload *f,
68 struct prestera_flow_block *block)
69 { struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
70 struct prestera_acl_match *r_match = &rule->re_key.match;
71 struct prestera_port *port;
72 struct net_device *ingress_dev;
73 struct flow_match_meta match;
76 flow_rule_match_meta(f_rule, &match);
77 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
78 NL_SET_ERR_MSG_MOD(f->common.extack,
79 "Unsupported ingress ifindex mask");
83 ingress_dev = __dev_get_by_index(block->net,
84 match.key->ingress_ifindex);
86 NL_SET_ERR_MSG_MOD(f->common.extack,
87 "Can't find specified ingress port to match on");
91 if (!prestera_netdev_check(ingress_dev)) {
92 NL_SET_ERR_MSG_MOD(f->common.extack,
93 "Can't match on switchdev ingress port");
96 port = netdev_priv(ingress_dev);
99 key = htons(port->hw_id);
100 rule_match_set(r_match->key, SYS_PORT, key);
101 rule_match_set(r_match->mask, SYS_PORT, mask);
104 key = htons(port->dev_id);
105 rule_match_set(r_match->key, SYS_DEV, key);
106 rule_match_set(r_match->mask, SYS_DEV, mask);
112 static int prestera_flower_parse(struct prestera_flow_block *block,
113 struct prestera_acl_rule *rule,
114 struct flow_cls_offload *f)
115 { struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
116 struct flow_dissector *dissector = f_rule->match.dissector;
117 struct prestera_acl_match *r_match = &rule->re_key.match;
118 __be16 n_proto_mask = 0;
119 __be16 n_proto_key = 0;
124 if (dissector->used_keys &
125 ~(BIT(FLOW_DISSECTOR_KEY_META) |
126 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
127 BIT(FLOW_DISSECTOR_KEY_BASIC) |
128 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
129 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
130 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
131 BIT(FLOW_DISSECTOR_KEY_ICMP) |
132 BIT(FLOW_DISSECTOR_KEY_PORTS) |
133 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
134 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
138 prestera_acl_rule_priority_set(rule, f->common.prio);
140 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) {
141 err = prestera_flower_parse_meta(rule, f, block);
146 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) {
147 struct flow_match_control match;
149 flow_rule_match_control(f_rule, &match);
150 addr_type = match.key->addr_type;
153 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) {
154 struct flow_match_basic match;
156 flow_rule_match_basic(f_rule, &match);
157 n_proto_key = match.key->n_proto;
158 n_proto_mask = match.mask->n_proto;
160 if (ntohs(match.key->n_proto) == ETH_P_ALL) {
165 rule_match_set(r_match->key, ETH_TYPE, n_proto_key);
166 rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask);
168 rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto);
169 rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto);
170 ip_proto = match.key->ip_proto;
173 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
174 struct flow_match_eth_addrs match;
176 flow_rule_match_eth_addrs(f_rule, &match);
179 rule_match_set_n(r_match->key,
180 ETH_DMAC_0, &match.key->dst[0], 4);
181 rule_match_set_n(r_match->key,
182 ETH_DMAC_1, &match.key->dst[4], 2);
184 rule_match_set_n(r_match->mask,
185 ETH_DMAC_0, &match.mask->dst[0], 4);
186 rule_match_set_n(r_match->mask,
187 ETH_DMAC_1, &match.mask->dst[4], 2);
190 rule_match_set_n(r_match->key,
191 ETH_SMAC_0, &match.key->src[0], 4);
192 rule_match_set_n(r_match->key,
193 ETH_SMAC_1, &match.key->src[4], 2);
195 rule_match_set_n(r_match->mask,
196 ETH_SMAC_0, &match.mask->src[0], 4);
197 rule_match_set_n(r_match->mask,
198 ETH_SMAC_1, &match.mask->src[4], 2);
201 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
202 struct flow_match_ipv4_addrs match;
204 flow_rule_match_ipv4_addrs(f_rule, &match);
206 rule_match_set(r_match->key, IP_SRC, match.key->src);
207 rule_match_set(r_match->mask, IP_SRC, match.mask->src);
209 rule_match_set(r_match->key, IP_DST, match.key->dst);
210 rule_match_set(r_match->mask, IP_DST, match.mask->dst);
213 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) {
214 struct flow_match_ports match;
216 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
219 "Only UDP and TCP keys are supported");
223 flow_rule_match_ports(f_rule, &match);
225 rule_match_set(r_match->key, L4_PORT_SRC, match.key->src);
226 rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src);
228 rule_match_set(r_match->key, L4_PORT_DST, match.key->dst);
229 rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst);
232 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
233 struct flow_match_vlan match;
235 flow_rule_match_vlan(f_rule, &match);
237 if (match.mask->vlan_id != 0) {
238 __be16 key = cpu_to_be16(match.key->vlan_id);
239 __be16 mask = cpu_to_be16(match.mask->vlan_id);
241 rule_match_set(r_match->key, VLAN_ID, key);
242 rule_match_set(r_match->mask, VLAN_ID, mask);
245 rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid);
246 rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid);
249 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) {
250 struct flow_match_icmp match;
252 flow_rule_match_icmp(f_rule, &match);
254 rule_match_set(r_match->key, ICMP_TYPE, match.key->type);
255 rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type);
257 rule_match_set(r_match->key, ICMP_CODE, match.key->code);
258 rule_match_set(r_match->mask, ICMP_CODE, match.mask->code);
261 return prestera_flower_parse_actions(block, rule, &f->rule->action,
265 int prestera_flower_replace(struct prestera_flow_block *block,
266 struct flow_cls_offload *f)
268 struct prestera_acl_ruleset *ruleset;
269 struct prestera_acl *acl = block->sw->acl;
270 struct prestera_acl_rule *rule;
273 ruleset = prestera_acl_ruleset_get(acl, block);
275 return PTR_ERR(ruleset);
277 /* increments the ruleset reference */
278 rule = prestera_acl_rule_create(ruleset, f->cookie);
281 goto err_rule_create;
284 err = prestera_flower_parse(block, rule, f);
288 if (!prestera_acl_ruleset_is_offload(ruleset)) {
289 err = prestera_acl_ruleset_offload(ruleset);
291 goto err_ruleset_offload;
294 err = prestera_acl_rule_add(block->sw, rule);
298 prestera_acl_ruleset_put(ruleset);
303 prestera_acl_rule_destroy(rule);
305 prestera_acl_ruleset_put(ruleset);
309 void prestera_flower_destroy(struct prestera_flow_block *block,
310 struct flow_cls_offload *f)
312 struct prestera_acl_ruleset *ruleset;
313 struct prestera_acl_rule *rule;
315 ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block);
319 rule = prestera_acl_rule_lookup(ruleset, f->cookie);
321 prestera_acl_rule_del(block->sw, rule);
322 prestera_acl_rule_destroy(rule);
324 prestera_acl_ruleset_put(ruleset);
328 int prestera_flower_tmplt_create(struct prestera_flow_block *block,
329 struct flow_cls_offload *f)
331 struct prestera_flower_template *template;
332 struct prestera_acl_ruleset *ruleset;
333 struct prestera_acl_rule rule;
336 memset(&rule, 0, sizeof(rule));
337 err = prestera_flower_parse(block, &rule, f);
341 template = kmalloc(sizeof(*template), GFP_KERNEL);
347 prestera_acl_rule_keymask_pcl_id_set(&rule, 0);
348 ruleset = prestera_acl_ruleset_get(block->sw->acl, block);
349 if (IS_ERR_OR_NULL(ruleset)) {
351 goto err_ruleset_get;
354 /* preserve keymask/template to this ruleset */
355 prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
357 /* skip error, as it is not possible to reject template operation,
358 * so, keep the reference to the ruleset for rules to be added
359 * to that ruleset later. In case of offload fail, the ruleset
360 * will be offloaded again during adding a new rule. Also,
361 * unlikly possble that ruleset is already offloaded at this staage.
363 prestera_acl_ruleset_offload(ruleset);
365 /* keep the reference to the ruleset */
366 template->ruleset = ruleset;
367 block->tmplt = template;
373 NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed");
377 void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
378 struct flow_cls_offload *f)
380 prestera_flower_template_cleanup(block);
383 int prestera_flower_stats(struct prestera_flow_block *block,
384 struct flow_cls_offload *f)
386 struct prestera_acl_ruleset *ruleset;
387 struct prestera_acl_rule *rule;
393 ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block);
395 return PTR_ERR(ruleset);
397 rule = prestera_acl_rule_lookup(ruleset, f->cookie);
400 goto err_rule_get_stats;
403 err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets,
406 goto err_rule_get_stats;
408 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
409 FLOW_ACTION_HW_STATS_DELAYED);
412 prestera_acl_ruleset_put(ruleset);