1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <net/flow_offload.h>
6 #include <net/netfilter/nf_tables.h>
7 #include <net/netfilter/nf_tables_offload.h>
8 #include <net/pkt_cls.h>
10 static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
12 struct nft_flow_rule *flow;
14 flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL);
18 flow->rule = flow_rule_alloc(num_actions);
24 flow->rule->match.dissector = &flow->match.dissector;
25 flow->rule->match.mask = &flow->match.mask;
26 flow->rule->match.key = &flow->match.key;
31 void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
32 enum flow_dissector_key_id addr_type)
34 struct nft_flow_match *match = &flow->match;
35 struct nft_flow_key *mask = &match->mask;
36 struct nft_flow_key *key = &match->key;
38 if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL))
41 key->control.addr_type = addr_type;
42 mask->control.addr_type = 0xffff;
43 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
44 match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] =
45 offsetof(struct nft_flow_key, control);
48 struct nft_offload_ethertype {
53 static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
54 struct nft_flow_rule *flow)
56 struct nft_flow_match *match = &flow->match;
57 struct nft_offload_ethertype ethertype = {
58 .value = match->key.basic.n_proto,
59 .mask = match->mask.basic.n_proto,
62 if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) &&
63 (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
64 match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) {
65 match->key.basic.n_proto = match->key.cvlan.vlan_tpid;
66 match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid;
67 match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid;
68 match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid;
69 match->key.vlan.vlan_tpid = ethertype.value;
70 match->mask.vlan.vlan_tpid = ethertype.mask;
71 match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
72 offsetof(struct nft_flow_key, cvlan);
73 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
74 } else if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC) &&
75 (match->key.basic.n_proto == htons(ETH_P_8021Q) ||
76 match->key.basic.n_proto == htons(ETH_P_8021AD))) {
77 match->key.basic.n_proto = match->key.vlan.vlan_tpid;
78 match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
79 match->key.vlan.vlan_tpid = ethertype.value;
80 match->mask.vlan.vlan_tpid = ethertype.mask;
81 match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
82 offsetof(struct nft_flow_key, vlan);
83 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
87 struct nft_flow_rule *nft_flow_rule_create(struct net *net,
88 const struct nft_rule *rule)
90 struct nft_offload_ctx *ctx;
91 struct nft_flow_rule *flow;
92 int num_actions = 0, err;
93 struct nft_expr *expr;
95 expr = nft_expr_first(rule);
96 while (nft_expr_more(rule, expr)) {
97 if (expr->ops->offload_action &&
98 expr->ops->offload_action(expr))
101 expr = nft_expr_next(expr);
104 if (num_actions == 0)
105 return ERR_PTR(-EOPNOTSUPP);
107 flow = nft_flow_rule_alloc(num_actions);
109 return ERR_PTR(-ENOMEM);
111 expr = nft_expr_first(rule);
113 ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL);
119 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
121 while (nft_expr_more(rule, expr)) {
122 if (!expr->ops->offload) {
126 err = expr->ops->offload(ctx, flow, expr);
130 expr = nft_expr_next(expr);
132 nft_flow_rule_transfer_vlan(ctx, flow);
134 flow->proto = ctx->dep.l3num;
140 nft_flow_rule_destroy(flow);
145 void nft_flow_rule_destroy(struct nft_flow_rule *flow)
147 struct flow_action_entry *entry;
150 flow_action_for_each(i, entry, &flow->rule->action) {
152 case FLOW_ACTION_REDIRECT:
153 case FLOW_ACTION_MIRRED:
164 void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
165 enum nft_offload_dep_type type)
167 ctx->dep.type = type;
170 void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
171 const void *data, u32 len)
173 switch (ctx->dep.type) {
174 case NFT_OFFLOAD_DEP_NETWORK:
175 WARN_ON(len != sizeof(__u16));
176 memcpy(&ctx->dep.l3num, data, sizeof(__u16));
178 case NFT_OFFLOAD_DEP_TRANSPORT:
179 WARN_ON(len != sizeof(__u8));
180 memcpy(&ctx->dep.protonum, data, sizeof(__u8));
185 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
188 static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
189 __be16 proto, int priority,
190 struct netlink_ext_ack *extack)
192 common->protocol = proto;
193 common->prio = priority;
194 common->extack = extack;
197 static int nft_setup_cb_call(enum tc_setup_type type, void *type_data,
198 struct list_head *cb_list)
200 struct flow_block_cb *block_cb;
203 list_for_each_entry(block_cb, cb_list, list) {
204 err = block_cb->cb(type, type_data, block_cb->cb_priv);
211 int nft_chain_offload_priority(struct nft_base_chain *basechain)
213 if (basechain->ops.priority <= 0 ||
214 basechain->ops.priority > USHRT_MAX)
220 static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
221 const struct nft_base_chain *basechain,
222 const struct nft_rule *rule,
223 const struct nft_flow_rule *flow,
224 struct netlink_ext_ack *extack,
225 enum flow_cls_command command)
227 __be16 proto = ETH_P_ALL;
229 memset(cls_flow, 0, sizeof(*cls_flow));
234 nft_flow_offload_common_init(&cls_flow->common, proto,
235 basechain->ops.priority, extack);
236 cls_flow->command = command;
237 cls_flow->cookie = (unsigned long) rule;
239 cls_flow->rule = flow->rule;
242 static int nft_flow_offload_cmd(const struct nft_chain *chain,
243 const struct nft_rule *rule,
244 struct nft_flow_rule *flow,
245 enum flow_cls_command command,
246 struct flow_cls_offload *cls_flow)
248 struct netlink_ext_ack extack = {};
249 struct nft_base_chain *basechain;
251 if (!nft_is_base_chain(chain))
254 basechain = nft_base_chain(chain);
255 nft_flow_cls_offload_setup(cls_flow, basechain, rule, flow, &extack,
258 return nft_setup_cb_call(TC_SETUP_CLSFLOWER, cls_flow,
259 &basechain->flow_block.cb_list);
262 static int nft_flow_offload_rule(const struct nft_chain *chain,
263 struct nft_rule *rule,
264 struct nft_flow_rule *flow,
265 enum flow_cls_command command)
267 struct flow_cls_offload cls_flow;
269 return nft_flow_offload_cmd(chain, rule, flow, command, &cls_flow);
272 int nft_flow_rule_stats(const struct nft_chain *chain,
273 const struct nft_rule *rule)
275 struct flow_cls_offload cls_flow = {};
276 struct nft_expr *expr, *next;
279 err = nft_flow_offload_cmd(chain, rule, NULL, FLOW_CLS_STATS,
284 nft_rule_for_each_expr(expr, next, rule) {
285 if (expr->ops->offload_stats)
286 expr->ops->offload_stats(expr, &cls_flow.stats);
292 static int nft_flow_offload_bind(struct flow_block_offload *bo,
293 struct nft_base_chain *basechain)
295 list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
299 static int nft_flow_offload_unbind(struct flow_block_offload *bo,
300 struct nft_base_chain *basechain)
302 struct flow_block_cb *block_cb, *next;
303 struct flow_cls_offload cls_flow;
304 struct netlink_ext_ack extack;
305 struct nft_chain *chain;
306 struct nft_rule *rule;
308 chain = &basechain->chain;
309 list_for_each_entry(rule, &chain->rules, list) {
310 memset(&extack, 0, sizeof(extack));
311 nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL,
312 &extack, FLOW_CLS_DESTROY);
313 nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list);
316 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
317 list_del(&block_cb->list);
318 flow_block_cb_free(block_cb);
324 static int nft_block_setup(struct nft_base_chain *basechain,
325 struct flow_block_offload *bo,
326 enum flow_block_command cmd)
331 case FLOW_BLOCK_BIND:
332 err = nft_flow_offload_bind(bo, basechain);
334 case FLOW_BLOCK_UNBIND:
335 err = nft_flow_offload_unbind(bo, basechain);
345 static void nft_flow_block_offload_init(struct flow_block_offload *bo,
347 enum flow_block_command cmd,
348 struct nft_base_chain *basechain,
349 struct netlink_ext_ack *extack)
351 memset(bo, 0, sizeof(*bo));
353 bo->block = &basechain->flow_block;
355 bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
357 bo->cb_list_head = &basechain->flow_block.cb_list;
358 INIT_LIST_HEAD(&bo->cb_list);
361 static int nft_block_offload_cmd(struct nft_base_chain *chain,
362 struct net_device *dev,
363 enum flow_block_command cmd)
365 struct netlink_ext_ack extack = {};
366 struct flow_block_offload bo;
369 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
371 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
375 return nft_block_setup(chain, &bo, cmd);
378 static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
380 struct nft_base_chain *basechain = block_cb->indr.data;
381 struct net_device *dev = block_cb->indr.dev;
382 struct netlink_ext_ack extack = {};
383 struct nftables_pernet *nft_net;
384 struct net *net = dev_net(dev);
385 struct flow_block_offload bo;
387 nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
389 nft_net = nft_pernet(net);
390 mutex_lock(&nft_net->commit_mutex);
391 list_del(&block_cb->driver_list);
392 list_move(&block_cb->list, &bo.cb_list);
393 nft_flow_offload_unbind(&bo, basechain);
394 mutex_unlock(&nft_net->commit_mutex);
397 static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
398 struct net_device *dev,
399 enum flow_block_command cmd)
401 struct netlink_ext_ack extack = {};
402 struct flow_block_offload bo;
405 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack);
407 err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo,
408 nft_indr_block_cleanup);
412 if (list_empty(&bo.cb_list))
415 return nft_block_setup(basechain, &bo, cmd);
418 static int nft_chain_offload_cmd(struct nft_base_chain *basechain,
419 struct net_device *dev,
420 enum flow_block_command cmd)
424 if (dev->netdev_ops->ndo_setup_tc)
425 err = nft_block_offload_cmd(basechain, dev, cmd);
427 err = nft_indr_block_offload_cmd(basechain, dev, cmd);
432 static int nft_flow_block_chain(struct nft_base_chain *basechain,
433 const struct net_device *this_dev,
434 enum flow_block_command cmd)
436 struct net_device *dev;
437 struct nft_hook *hook;
440 list_for_each_entry(hook, &basechain->hook_list, list) {
442 if (this_dev && this_dev != dev)
445 err = nft_chain_offload_cmd(basechain, dev, cmd);
446 if (err < 0 && cmd == FLOW_BLOCK_BIND) {
458 list_for_each_entry(hook, &basechain->hook_list, list) {
463 nft_chain_offload_cmd(basechain, dev, FLOW_BLOCK_UNBIND);
468 static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy,
469 enum flow_block_command cmd)
471 struct nft_base_chain *basechain;
474 if (!nft_is_base_chain(chain))
477 basechain = nft_base_chain(chain);
478 policy = ppolicy ? *ppolicy : basechain->policy;
480 /* Only default policy to accept is supported for now. */
481 if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP)
484 return nft_flow_block_chain(basechain, NULL, cmd);
487 static void nft_flow_rule_offload_abort(struct net *net,
488 struct nft_trans *trans)
490 struct nftables_pernet *nft_net = nft_pernet(net);
493 list_for_each_entry_continue_reverse(trans, &nft_net->commit_list, list) {
494 if (trans->ctx.family != NFPROTO_NETDEV)
497 switch (trans->msg_type) {
498 case NFT_MSG_NEWCHAIN:
499 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
500 nft_trans_chain_update(trans))
503 err = nft_flow_offload_chain(trans->ctx.chain, NULL,
506 case NFT_MSG_DELCHAIN:
507 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
510 err = nft_flow_offload_chain(trans->ctx.chain, NULL,
513 case NFT_MSG_NEWRULE:
514 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
517 err = nft_flow_offload_rule(trans->ctx.chain,
518 nft_trans_rule(trans),
519 NULL, FLOW_CLS_DESTROY);
521 case NFT_MSG_DELRULE:
522 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
525 err = nft_flow_offload_rule(trans->ctx.chain,
526 nft_trans_rule(trans),
527 nft_trans_flow_rule(trans),
532 if (WARN_ON_ONCE(err))
537 int nft_flow_rule_offload_commit(struct net *net)
539 struct nftables_pernet *nft_net = nft_pernet(net);
540 struct nft_trans *trans;
544 list_for_each_entry(trans, &nft_net->commit_list, list) {
545 if (trans->ctx.family != NFPROTO_NETDEV)
548 switch (trans->msg_type) {
549 case NFT_MSG_NEWCHAIN:
550 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
551 nft_trans_chain_update(trans))
554 policy = nft_trans_chain_policy(trans);
555 err = nft_flow_offload_chain(trans->ctx.chain, &policy,
558 case NFT_MSG_DELCHAIN:
559 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
562 policy = nft_trans_chain_policy(trans);
563 err = nft_flow_offload_chain(trans->ctx.chain, &policy,
566 case NFT_MSG_NEWRULE:
567 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
570 if (trans->ctx.flags & NLM_F_REPLACE ||
571 !(trans->ctx.flags & NLM_F_APPEND)) {
575 err = nft_flow_offload_rule(trans->ctx.chain,
576 nft_trans_rule(trans),
577 nft_trans_flow_rule(trans),
580 case NFT_MSG_DELRULE:
581 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
584 err = nft_flow_offload_rule(trans->ctx.chain,
585 nft_trans_rule(trans),
586 NULL, FLOW_CLS_DESTROY);
591 nft_flow_rule_offload_abort(net, trans);
599 static struct nft_chain *__nft_offload_get_chain(const struct nftables_pernet *nft_net,
600 struct net_device *dev)
602 struct nft_base_chain *basechain;
603 struct nft_hook *hook, *found;
604 const struct nft_table *table;
605 struct nft_chain *chain;
607 list_for_each_entry(table, &nft_net->tables, list) {
608 if (table->family != NFPROTO_NETDEV)
611 list_for_each_entry(chain, &table->chains, list) {
612 if (!nft_is_base_chain(chain) ||
613 !(chain->flags & NFT_CHAIN_HW_OFFLOAD))
617 basechain = nft_base_chain(chain);
618 list_for_each_entry(hook, &basechain->hook_list, list) {
619 if (hook->ops.dev != dev)
635 static int nft_offload_netdev_event(struct notifier_block *this,
636 unsigned long event, void *ptr)
638 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
639 struct nftables_pernet *nft_net;
640 struct net *net = dev_net(dev);
641 struct nft_chain *chain;
643 if (event != NETDEV_UNREGISTER)
646 nft_net = nft_pernet(net);
647 mutex_lock(&nft_net->commit_mutex);
648 chain = __nft_offload_get_chain(nft_net, dev);
650 nft_flow_block_chain(nft_base_chain(chain), dev,
653 mutex_unlock(&nft_net->commit_mutex);
658 static struct notifier_block nft_offload_netdev_notifier = {
659 .notifier_call = nft_offload_netdev_event,
662 int nft_offload_init(void)
664 return register_netdevice_notifier(&nft_offload_netdev_notifier);
667 void nft_offload_exit(void)
669 unregister_netdevice_notifier(&nft_offload_netdev_notifier);