1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <net/flow_offload.h>
6 #include <net/netfilter/nf_tables.h>
7 #include <net/netfilter/nf_tables_offload.h>
8 #include <net/pkt_cls.h>
10 static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
12 struct nft_flow_rule *flow;
14 flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL);
18 flow->rule = flow_rule_alloc(num_actions);
24 flow->rule->match.dissector = &flow->match.dissector;
25 flow->rule->match.mask = &flow->match.mask;
26 flow->rule->match.key = &flow->match.key;
31 struct nft_flow_rule *nft_flow_rule_create(struct net *net,
32 const struct nft_rule *rule)
34 struct nft_offload_ctx *ctx;
35 struct nft_flow_rule *flow;
36 int num_actions = 0, err;
37 struct nft_expr *expr;
39 expr = nft_expr_first(rule);
40 while (expr->ops && expr != nft_expr_last(rule)) {
41 if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
44 expr = nft_expr_next(expr);
48 return ERR_PTR(-EOPNOTSUPP);
50 flow = nft_flow_rule_alloc(num_actions);
52 return ERR_PTR(-ENOMEM);
54 expr = nft_expr_first(rule);
56 ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL);
62 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
64 while (expr->ops && expr != nft_expr_last(rule)) {
65 if (!expr->ops->offload) {
69 err = expr->ops->offload(ctx, flow, expr);
73 expr = nft_expr_next(expr);
75 flow->proto = ctx->dep.l3num;
81 nft_flow_rule_destroy(flow);
86 void nft_flow_rule_destroy(struct nft_flow_rule *flow)
88 struct flow_action_entry *entry;
91 flow_action_for_each(i, entry, &flow->rule->action) {
93 case FLOW_ACTION_REDIRECT:
94 case FLOW_ACTION_MIRRED:
105 void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
106 enum nft_offload_dep_type type)
108 ctx->dep.type = type;
111 void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
112 const void *data, u32 len)
114 switch (ctx->dep.type) {
115 case NFT_OFFLOAD_DEP_NETWORK:
116 WARN_ON(len != sizeof(__u16));
117 memcpy(&ctx->dep.l3num, data, sizeof(__u16));
119 case NFT_OFFLOAD_DEP_TRANSPORT:
120 WARN_ON(len != sizeof(__u8));
121 memcpy(&ctx->dep.protonum, data, sizeof(__u8));
126 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
129 static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
130 __be16 proto, int priority,
131 struct netlink_ext_ack *extack)
133 common->protocol = proto;
134 common->prio = priority;
135 common->extack = extack;
138 static int nft_setup_cb_call(enum tc_setup_type type, void *type_data,
139 struct list_head *cb_list)
141 struct flow_block_cb *block_cb;
144 list_for_each_entry(block_cb, cb_list, list) {
145 err = block_cb->cb(type, type_data, block_cb->cb_priv);
152 int nft_chain_offload_priority(struct nft_base_chain *basechain)
154 if (basechain->ops.priority <= 0 ||
155 basechain->ops.priority > USHRT_MAX)
161 static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
162 const struct nft_base_chain *basechain,
163 const struct nft_rule *rule,
164 const struct nft_flow_rule *flow,
165 struct netlink_ext_ack *extack,
166 enum flow_cls_command command)
168 __be16 proto = ETH_P_ALL;
170 memset(cls_flow, 0, sizeof(*cls_flow));
175 nft_flow_offload_common_init(&cls_flow->common, proto,
176 basechain->ops.priority, extack);
177 cls_flow->command = command;
178 cls_flow->cookie = (unsigned long) rule;
180 cls_flow->rule = flow->rule;
183 static int nft_flow_offload_rule(struct nft_chain *chain,
184 struct nft_rule *rule,
185 struct nft_flow_rule *flow,
186 enum flow_cls_command command)
188 struct netlink_ext_ack extack = {};
189 struct flow_cls_offload cls_flow;
190 struct nft_base_chain *basechain;
192 if (!nft_is_base_chain(chain))
195 basechain = nft_base_chain(chain);
196 nft_flow_cls_offload_setup(&cls_flow, basechain, rule, flow, &extack,
199 return nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow,
200 &basechain->flow_block.cb_list);
203 static int nft_flow_offload_bind(struct flow_block_offload *bo,
204 struct nft_base_chain *basechain)
206 list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
210 static int nft_flow_offload_unbind(struct flow_block_offload *bo,
211 struct nft_base_chain *basechain)
213 struct flow_block_cb *block_cb, *next;
214 struct flow_cls_offload cls_flow;
215 struct netlink_ext_ack extack;
216 struct nft_chain *chain;
217 struct nft_rule *rule;
219 chain = &basechain->chain;
220 list_for_each_entry(rule, &chain->rules, list) {
221 memset(&extack, 0, sizeof(extack));
222 nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL,
223 &extack, FLOW_CLS_DESTROY);
224 nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list);
227 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
228 list_del(&block_cb->list);
229 flow_block_cb_free(block_cb);
235 static int nft_block_setup(struct nft_base_chain *basechain,
236 struct flow_block_offload *bo,
237 enum flow_block_command cmd)
242 case FLOW_BLOCK_BIND:
243 err = nft_flow_offload_bind(bo, basechain);
245 case FLOW_BLOCK_UNBIND:
246 err = nft_flow_offload_unbind(bo, basechain);
256 static void nft_flow_block_offload_init(struct flow_block_offload *bo,
258 enum flow_block_command cmd,
259 struct nft_base_chain *basechain,
260 struct netlink_ext_ack *extack)
262 memset(bo, 0, sizeof(*bo));
264 bo->block = &basechain->flow_block;
266 bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
268 INIT_LIST_HEAD(&bo->cb_list);
271 static int nft_block_offload_cmd(struct nft_base_chain *chain,
272 struct net_device *dev,
273 enum flow_block_command cmd)
275 struct netlink_ext_ack extack = {};
276 struct flow_block_offload bo;
279 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
281 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
285 return nft_block_setup(chain, &bo, cmd);
288 static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
290 struct nft_base_chain *basechain = block_cb->indr.data;
291 struct net_device *dev = block_cb->indr.dev;
292 struct netlink_ext_ack extack = {};
293 struct net *net = dev_net(dev);
294 struct flow_block_offload bo;
296 nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
298 mutex_lock(&net->nft.commit_mutex);
299 list_del(&block_cb->driver_list);
300 list_move(&block_cb->list, &bo.cb_list);
301 nft_flow_offload_unbind(&bo, basechain);
302 mutex_unlock(&net->nft.commit_mutex);
305 static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
306 struct net_device *dev,
307 enum flow_block_command cmd)
309 struct netlink_ext_ack extack = {};
310 struct flow_block_offload bo;
313 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack);
315 err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo,
316 nft_indr_block_cleanup);
320 if (list_empty(&bo.cb_list))
323 return nft_block_setup(basechain, &bo, cmd);
326 #define FLOW_SETUP_BLOCK TC_SETUP_BLOCK
328 static int nft_chain_offload_cmd(struct nft_base_chain *basechain,
329 struct net_device *dev,
330 enum flow_block_command cmd)
334 if (dev->netdev_ops->ndo_setup_tc)
335 err = nft_block_offload_cmd(basechain, dev, cmd);
337 err = nft_indr_block_offload_cmd(basechain, dev, cmd);
342 static int nft_flow_block_chain(struct nft_base_chain *basechain,
343 const struct net_device *this_dev,
344 enum flow_block_command cmd)
346 struct net_device *dev;
347 struct nft_hook *hook;
350 list_for_each_entry(hook, &basechain->hook_list, list) {
352 if (this_dev && this_dev != dev)
355 err = nft_chain_offload_cmd(basechain, dev, cmd);
356 if (err < 0 && cmd == FLOW_BLOCK_BIND) {
368 list_for_each_entry(hook, &basechain->hook_list, list) {
373 nft_chain_offload_cmd(basechain, dev, FLOW_BLOCK_UNBIND);
378 static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy,
379 enum flow_block_command cmd)
381 struct nft_base_chain *basechain;
384 if (!nft_is_base_chain(chain))
387 basechain = nft_base_chain(chain);
388 policy = ppolicy ? *ppolicy : basechain->policy;
390 /* Only default policy to accept is supported for now. */
391 if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP)
394 return nft_flow_block_chain(basechain, NULL, cmd);
397 static void nft_flow_rule_offload_abort(struct net *net,
398 struct nft_trans *trans)
402 list_for_each_entry_continue_reverse(trans, &net->nft.commit_list, list) {
403 if (trans->ctx.family != NFPROTO_NETDEV)
406 switch (trans->msg_type) {
407 case NFT_MSG_NEWCHAIN:
408 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
409 nft_trans_chain_update(trans))
412 err = nft_flow_offload_chain(trans->ctx.chain, NULL,
415 case NFT_MSG_DELCHAIN:
416 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
419 err = nft_flow_offload_chain(trans->ctx.chain, NULL,
422 case NFT_MSG_NEWRULE:
423 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
426 err = nft_flow_offload_rule(trans->ctx.chain,
427 nft_trans_rule(trans),
428 NULL, FLOW_CLS_DESTROY);
430 case NFT_MSG_DELRULE:
431 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
434 err = nft_flow_offload_rule(trans->ctx.chain,
435 nft_trans_rule(trans),
436 nft_trans_flow_rule(trans),
441 if (WARN_ON_ONCE(err))
446 int nft_flow_rule_offload_commit(struct net *net)
448 struct nft_trans *trans;
452 list_for_each_entry(trans, &net->nft.commit_list, list) {
453 if (trans->ctx.family != NFPROTO_NETDEV)
456 switch (trans->msg_type) {
457 case NFT_MSG_NEWCHAIN:
458 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
459 nft_trans_chain_update(trans))
462 policy = nft_trans_chain_policy(trans);
463 err = nft_flow_offload_chain(trans->ctx.chain, &policy,
466 case NFT_MSG_DELCHAIN:
467 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
470 policy = nft_trans_chain_policy(trans);
471 err = nft_flow_offload_chain(trans->ctx.chain, &policy,
474 case NFT_MSG_NEWRULE:
475 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
478 if (trans->ctx.flags & NLM_F_REPLACE ||
479 !(trans->ctx.flags & NLM_F_APPEND)) {
483 err = nft_flow_offload_rule(trans->ctx.chain,
484 nft_trans_rule(trans),
485 nft_trans_flow_rule(trans),
488 case NFT_MSG_DELRULE:
489 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
492 err = nft_flow_offload_rule(trans->ctx.chain,
493 nft_trans_rule(trans),
494 NULL, FLOW_CLS_DESTROY);
499 nft_flow_rule_offload_abort(net, trans);
504 list_for_each_entry(trans, &net->nft.commit_list, list) {
505 if (trans->ctx.family != NFPROTO_NETDEV)
508 switch (trans->msg_type) {
509 case NFT_MSG_NEWRULE:
510 case NFT_MSG_DELRULE:
511 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
514 nft_flow_rule_destroy(nft_trans_flow_rule(trans));
524 static struct nft_chain *__nft_offload_get_chain(struct net_device *dev)
526 struct nft_base_chain *basechain;
527 struct net *net = dev_net(dev);
528 struct nft_hook *hook, *found;
529 const struct nft_table *table;
530 struct nft_chain *chain;
532 list_for_each_entry(table, &net->nft.tables, list) {
533 if (table->family != NFPROTO_NETDEV)
536 list_for_each_entry(chain, &table->chains, list) {
537 if (!nft_is_base_chain(chain) ||
538 !(chain->flags & NFT_CHAIN_HW_OFFLOAD))
542 basechain = nft_base_chain(chain);
543 list_for_each_entry(hook, &basechain->hook_list, list) {
544 if (hook->ops.dev != dev)
560 static int nft_offload_netdev_event(struct notifier_block *this,
561 unsigned long event, void *ptr)
563 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
564 struct net *net = dev_net(dev);
565 struct nft_chain *chain;
567 if (event != NETDEV_UNREGISTER)
570 mutex_lock(&net->nft.commit_mutex);
571 chain = __nft_offload_get_chain(dev);
573 nft_flow_block_chain(nft_base_chain(chain), dev,
576 mutex_unlock(&net->nft.commit_mutex);
581 static struct notifier_block nft_offload_netdev_notifier = {
582 .notifier_call = nft_offload_netdev_event,
585 int nft_offload_init(void)
587 return register_netdevice_notifier(&nft_offload_netdev_notifier);
590 void nft_offload_exit(void)
592 unregister_netdevice_notifier(&nft_offload_netdev_notifier);