1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <net/flow_offload.h>
6 #include <net/netfilter/nf_tables.h>
7 #include <net/netfilter/nf_tables_offload.h>
8 #include <net/pkt_cls.h>
10 static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
12 struct nft_flow_rule *flow;
14 flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL);
18 flow->rule = flow_rule_alloc(num_actions);
24 flow->rule->match.dissector = &flow->match.dissector;
25 flow->rule->match.mask = &flow->match.mask;
26 flow->rule->match.key = &flow->match.key;
31 void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
32 enum flow_dissector_key_id addr_type)
34 struct nft_flow_match *match = &flow->match;
35 struct nft_flow_key *mask = &match->mask;
36 struct nft_flow_key *key = &match->key;
38 if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL))
41 key->control.addr_type = addr_type;
42 mask->control.addr_type = 0xffff;
43 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
44 match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] =
45 offsetof(struct nft_flow_key, control);
48 struct nft_offload_ethertype {
53 static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
54 struct nft_flow_rule *flow)
56 struct nft_flow_match *match = &flow->match;
57 struct nft_offload_ethertype ethertype;
59 if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL) &&
60 match->key.basic.n_proto != htons(ETH_P_8021Q) &&
61 match->key.basic.n_proto != htons(ETH_P_8021AD))
64 ethertype.value = match->key.basic.n_proto;
65 ethertype.mask = match->mask.basic.n_proto;
67 if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) &&
68 (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
69 match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) {
70 match->key.basic.n_proto = match->key.cvlan.vlan_tpid;
71 match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid;
72 match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid;
73 match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid;
74 match->key.vlan.vlan_tpid = ethertype.value;
75 match->mask.vlan.vlan_tpid = ethertype.mask;
76 match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
77 offsetof(struct nft_flow_key, cvlan);
78 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
80 match->key.basic.n_proto = match->key.vlan.vlan_tpid;
81 match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
82 match->key.vlan.vlan_tpid = ethertype.value;
83 match->mask.vlan.vlan_tpid = ethertype.mask;
84 match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
85 offsetof(struct nft_flow_key, vlan);
86 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
90 struct nft_flow_rule *nft_flow_rule_create(struct net *net,
91 const struct nft_rule *rule)
93 struct nft_offload_ctx *ctx;
94 struct nft_flow_rule *flow;
95 int num_actions = 0, err;
96 struct nft_expr *expr;
98 expr = nft_expr_first(rule);
99 while (nft_expr_more(rule, expr)) {
100 if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
103 expr = nft_expr_next(expr);
106 if (num_actions == 0)
107 return ERR_PTR(-EOPNOTSUPP);
109 flow = nft_flow_rule_alloc(num_actions);
111 return ERR_PTR(-ENOMEM);
113 expr = nft_expr_first(rule);
115 ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL);
121 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
123 while (nft_expr_more(rule, expr)) {
124 if (!expr->ops->offload) {
128 err = expr->ops->offload(ctx, flow, expr);
132 expr = nft_expr_next(expr);
134 nft_flow_rule_transfer_vlan(ctx, flow);
136 flow->proto = ctx->dep.l3num;
142 nft_flow_rule_destroy(flow);
147 void nft_flow_rule_destroy(struct nft_flow_rule *flow)
149 struct flow_action_entry *entry;
152 flow_action_for_each(i, entry, &flow->rule->action) {
154 case FLOW_ACTION_REDIRECT:
155 case FLOW_ACTION_MIRRED:
166 void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
167 enum nft_offload_dep_type type)
169 ctx->dep.type = type;
172 void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
173 const void *data, u32 len)
175 switch (ctx->dep.type) {
176 case NFT_OFFLOAD_DEP_NETWORK:
177 WARN_ON(len != sizeof(__u16));
178 memcpy(&ctx->dep.l3num, data, sizeof(__u16));
180 case NFT_OFFLOAD_DEP_TRANSPORT:
181 WARN_ON(len != sizeof(__u8));
182 memcpy(&ctx->dep.protonum, data, sizeof(__u8));
187 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
190 static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
191 __be16 proto, int priority,
192 struct netlink_ext_ack *extack)
194 common->protocol = proto;
195 common->prio = priority;
196 common->extack = extack;
199 static int nft_setup_cb_call(enum tc_setup_type type, void *type_data,
200 struct list_head *cb_list)
202 struct flow_block_cb *block_cb;
205 list_for_each_entry(block_cb, cb_list, list) {
206 err = block_cb->cb(type, type_data, block_cb->cb_priv);
213 int nft_chain_offload_priority(struct nft_base_chain *basechain)
215 if (basechain->ops.priority <= 0 ||
216 basechain->ops.priority > USHRT_MAX)
222 static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
223 const struct nft_base_chain *basechain,
224 const struct nft_rule *rule,
225 const struct nft_flow_rule *flow,
226 struct netlink_ext_ack *extack,
227 enum flow_cls_command command)
229 __be16 proto = ETH_P_ALL;
231 memset(cls_flow, 0, sizeof(*cls_flow));
236 nft_flow_offload_common_init(&cls_flow->common, proto,
237 basechain->ops.priority, extack);
238 cls_flow->command = command;
239 cls_flow->cookie = (unsigned long) rule;
241 cls_flow->rule = flow->rule;
244 static int nft_flow_offload_cmd(const struct nft_chain *chain,
245 const struct nft_rule *rule,
246 struct nft_flow_rule *flow,
247 enum flow_cls_command command,
248 struct flow_cls_offload *cls_flow)
250 struct netlink_ext_ack extack = {};
251 struct nft_base_chain *basechain;
253 if (!nft_is_base_chain(chain))
256 basechain = nft_base_chain(chain);
257 nft_flow_cls_offload_setup(cls_flow, basechain, rule, flow, &extack,
260 return nft_setup_cb_call(TC_SETUP_CLSFLOWER, cls_flow,
261 &basechain->flow_block.cb_list);
264 static int nft_flow_offload_rule(const struct nft_chain *chain,
265 struct nft_rule *rule,
266 struct nft_flow_rule *flow,
267 enum flow_cls_command command)
269 struct flow_cls_offload cls_flow;
271 return nft_flow_offload_cmd(chain, rule, flow, command, &cls_flow);
274 int nft_flow_rule_stats(const struct nft_chain *chain,
275 const struct nft_rule *rule)
277 struct flow_cls_offload cls_flow = {};
278 struct nft_expr *expr, *next;
281 err = nft_flow_offload_cmd(chain, rule, NULL, FLOW_CLS_STATS,
286 nft_rule_for_each_expr(expr, next, rule) {
287 if (expr->ops->offload_stats)
288 expr->ops->offload_stats(expr, &cls_flow.stats);
294 static int nft_flow_offload_bind(struct flow_block_offload *bo,
295 struct nft_base_chain *basechain)
297 list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
301 static int nft_flow_offload_unbind(struct flow_block_offload *bo,
302 struct nft_base_chain *basechain)
304 struct flow_block_cb *block_cb, *next;
305 struct flow_cls_offload cls_flow;
306 struct netlink_ext_ack extack;
307 struct nft_chain *chain;
308 struct nft_rule *rule;
310 chain = &basechain->chain;
311 list_for_each_entry(rule, &chain->rules, list) {
312 memset(&extack, 0, sizeof(extack));
313 nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL,
314 &extack, FLOW_CLS_DESTROY);
315 nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list);
318 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
319 list_del(&block_cb->list);
320 flow_block_cb_free(block_cb);
326 static int nft_block_setup(struct nft_base_chain *basechain,
327 struct flow_block_offload *bo,
328 enum flow_block_command cmd)
333 case FLOW_BLOCK_BIND:
334 err = nft_flow_offload_bind(bo, basechain);
336 case FLOW_BLOCK_UNBIND:
337 err = nft_flow_offload_unbind(bo, basechain);
347 static void nft_flow_block_offload_init(struct flow_block_offload *bo,
349 enum flow_block_command cmd,
350 struct nft_base_chain *basechain,
351 struct netlink_ext_ack *extack)
353 memset(bo, 0, sizeof(*bo));
355 bo->block = &basechain->flow_block;
357 bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
359 INIT_LIST_HEAD(&bo->cb_list);
362 static int nft_block_offload_cmd(struct nft_base_chain *chain,
363 struct net_device *dev,
364 enum flow_block_command cmd)
366 struct netlink_ext_ack extack = {};
367 struct flow_block_offload bo;
370 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
372 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
376 return nft_block_setup(chain, &bo, cmd);
379 static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
381 struct nft_base_chain *basechain = block_cb->indr.data;
382 struct net_device *dev = block_cb->indr.dev;
383 struct netlink_ext_ack extack = {};
384 struct nftables_pernet *nft_net;
385 struct net *net = dev_net(dev);
386 struct flow_block_offload bo;
388 nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
390 nft_net = nft_pernet(net);
391 mutex_lock(&nft_net->commit_mutex);
392 list_del(&block_cb->driver_list);
393 list_move(&block_cb->list, &bo.cb_list);
394 nft_flow_offload_unbind(&bo, basechain);
395 mutex_unlock(&nft_net->commit_mutex);
398 static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
399 struct net_device *dev,
400 enum flow_block_command cmd)
402 struct netlink_ext_ack extack = {};
403 struct flow_block_offload bo;
406 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack);
408 err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo,
409 nft_indr_block_cleanup);
413 if (list_empty(&bo.cb_list))
416 return nft_block_setup(basechain, &bo, cmd);
419 static int nft_chain_offload_cmd(struct nft_base_chain *basechain,
420 struct net_device *dev,
421 enum flow_block_command cmd)
425 if (dev->netdev_ops->ndo_setup_tc)
426 err = nft_block_offload_cmd(basechain, dev, cmd);
428 err = nft_indr_block_offload_cmd(basechain, dev, cmd);
433 static int nft_flow_block_chain(struct nft_base_chain *basechain,
434 const struct net_device *this_dev,
435 enum flow_block_command cmd)
437 struct net_device *dev;
438 struct nft_hook *hook;
441 list_for_each_entry(hook, &basechain->hook_list, list) {
443 if (this_dev && this_dev != dev)
446 err = nft_chain_offload_cmd(basechain, dev, cmd);
447 if (err < 0 && cmd == FLOW_BLOCK_BIND) {
459 list_for_each_entry(hook, &basechain->hook_list, list) {
464 nft_chain_offload_cmd(basechain, dev, FLOW_BLOCK_UNBIND);
469 static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy,
470 enum flow_block_command cmd)
472 struct nft_base_chain *basechain;
475 if (!nft_is_base_chain(chain))
478 basechain = nft_base_chain(chain);
479 policy = ppolicy ? *ppolicy : basechain->policy;
481 /* Only default policy to accept is supported for now. */
482 if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP)
485 return nft_flow_block_chain(basechain, NULL, cmd);
488 static void nft_flow_rule_offload_abort(struct net *net,
489 struct nft_trans *trans)
491 struct nftables_pernet *nft_net = nft_pernet(net);
494 list_for_each_entry_continue_reverse(trans, &nft_net->commit_list, list) {
495 if (trans->ctx.family != NFPROTO_NETDEV)
498 switch (trans->msg_type) {
499 case NFT_MSG_NEWCHAIN:
500 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
501 nft_trans_chain_update(trans))
504 err = nft_flow_offload_chain(trans->ctx.chain, NULL,
507 case NFT_MSG_DELCHAIN:
508 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
511 err = nft_flow_offload_chain(trans->ctx.chain, NULL,
514 case NFT_MSG_NEWRULE:
515 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
518 err = nft_flow_offload_rule(trans->ctx.chain,
519 nft_trans_rule(trans),
520 NULL, FLOW_CLS_DESTROY);
522 case NFT_MSG_DELRULE:
523 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
526 err = nft_flow_offload_rule(trans->ctx.chain,
527 nft_trans_rule(trans),
528 nft_trans_flow_rule(trans),
533 if (WARN_ON_ONCE(err))
538 int nft_flow_rule_offload_commit(struct net *net)
540 struct nftables_pernet *nft_net = nft_pernet(net);
541 struct nft_trans *trans;
545 list_for_each_entry(trans, &nft_net->commit_list, list) {
546 if (trans->ctx.family != NFPROTO_NETDEV)
549 switch (trans->msg_type) {
550 case NFT_MSG_NEWCHAIN:
551 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
552 nft_trans_chain_update(trans))
555 policy = nft_trans_chain_policy(trans);
556 err = nft_flow_offload_chain(trans->ctx.chain, &policy,
559 case NFT_MSG_DELCHAIN:
560 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
563 policy = nft_trans_chain_policy(trans);
564 err = nft_flow_offload_chain(trans->ctx.chain, &policy,
567 case NFT_MSG_NEWRULE:
568 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
571 if (trans->ctx.flags & NLM_F_REPLACE ||
572 !(trans->ctx.flags & NLM_F_APPEND)) {
576 err = nft_flow_offload_rule(trans->ctx.chain,
577 nft_trans_rule(trans),
578 nft_trans_flow_rule(trans),
581 case NFT_MSG_DELRULE:
582 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
585 err = nft_flow_offload_rule(trans->ctx.chain,
586 nft_trans_rule(trans),
587 NULL, FLOW_CLS_DESTROY);
592 nft_flow_rule_offload_abort(net, trans);
597 list_for_each_entry(trans, &nft_net->commit_list, list) {
598 if (trans->ctx.family != NFPROTO_NETDEV)
601 switch (trans->msg_type) {
602 case NFT_MSG_NEWRULE:
603 case NFT_MSG_DELRULE:
604 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
607 nft_flow_rule_destroy(nft_trans_flow_rule(trans));
617 static struct nft_chain *__nft_offload_get_chain(const struct nftables_pernet *nft_net,
618 struct net_device *dev)
620 struct nft_base_chain *basechain;
621 struct nft_hook *hook, *found;
622 const struct nft_table *table;
623 struct nft_chain *chain;
625 list_for_each_entry(table, &nft_net->tables, list) {
626 if (table->family != NFPROTO_NETDEV)
629 list_for_each_entry(chain, &table->chains, list) {
630 if (!nft_is_base_chain(chain) ||
631 !(chain->flags & NFT_CHAIN_HW_OFFLOAD))
635 basechain = nft_base_chain(chain);
636 list_for_each_entry(hook, &basechain->hook_list, list) {
637 if (hook->ops.dev != dev)
653 static int nft_offload_netdev_event(struct notifier_block *this,
654 unsigned long event, void *ptr)
656 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
657 struct nftables_pernet *nft_net;
658 struct net *net = dev_net(dev);
659 struct nft_chain *chain;
661 if (event != NETDEV_UNREGISTER)
664 nft_net = nft_pernet(net);
665 mutex_lock(&nft_net->commit_mutex);
666 chain = __nft_offload_get_chain(nft_net, dev);
668 nft_flow_block_chain(nft_base_chain(chain), dev,
671 mutex_unlock(&nft_net->commit_mutex);
676 static struct notifier_block nft_offload_netdev_notifier = {
677 .notifier_call = nft_offload_netdev_event,
680 int nft_offload_init(void)
682 return register_netdevice_notifier(&nft_offload_netdev_notifier);
685 void nft_offload_exit(void)
687 unregister_netdevice_notifier(&nft_offload_netdev_notifier);