1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008, Intel Corporation.
5 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/skbuff.h>
12 #include <linux/rtnetlink.h>
13 #include <net/netlink.h>
14 #include <net/pkt_sched.h>
17 #include <net/dsfield.h>
18 #include <net/pkt_cls.h>
20 #include <linux/tc_act/tc_skbedit.h>
21 #include <net/tc_act/tc_skbedit.h>
23 static struct tc_action_ops act_skbedit_ops;
25 static u16 tcf_skbedit_hash(struct tcf_skbedit_params *params,
28 u16 queue_mapping = params->queue_mapping;
30 if (params->flags & SKBEDIT_F_TXQ_SKBHASH) {
31 u32 hash = skb_get_hash(skb);
33 queue_mapping += hash % params->mapping_mod;
36 return netdev_cap_txqueue(skb->dev, queue_mapping);
39 static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
40 struct tcf_result *res)
42 struct tcf_skbedit *d = to_skbedit(a);
43 struct tcf_skbedit_params *params;
46 tcf_lastuse_update(&d->tcf_tm);
47 bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb);
49 params = rcu_dereference_bh(d->params);
50 action = READ_ONCE(d->tcf_action);
52 if (params->flags & SKBEDIT_F_PRIORITY)
53 skb->priority = params->priority;
54 if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
55 int wlen = skb_network_offset(skb);
57 switch (skb_protocol(skb, true)) {
59 wlen += sizeof(struct iphdr);
60 if (!pskb_may_pull(skb, wlen))
62 skb->priority = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
65 case htons(ETH_P_IPV6):
66 wlen += sizeof(struct ipv6hdr);
67 if (!pskb_may_pull(skb, wlen))
69 skb->priority = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
73 if (params->flags & SKBEDIT_F_QUEUE_MAPPING &&
74 skb->dev->real_num_tx_queues > params->queue_mapping) {
75 #ifdef CONFIG_NET_EGRESS
76 netdev_xmit_skip_txqueue(true);
78 skb_set_queue_mapping(skb, tcf_skbedit_hash(params, skb));
80 if (params->flags & SKBEDIT_F_MARK) {
81 skb->mark &= ~params->mask;
82 skb->mark |= params->mark & params->mask;
84 if (params->flags & SKBEDIT_F_PTYPE)
85 skb->pkt_type = params->ptype;
89 qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats));
93 static void tcf_skbedit_stats_update(struct tc_action *a, u64 bytes,
94 u64 packets, u64 drops,
97 struct tcf_skbedit *d = to_skbedit(a);
98 struct tcf_t *tm = &d->tcf_tm;
100 tcf_action_update_stats(a, bytes, packets, drops, hw);
101 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
104 static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
105 [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) },
106 [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) },
107 [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) },
108 [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) },
109 [TCA_SKBEDIT_PTYPE] = { .len = sizeof(u16) },
110 [TCA_SKBEDIT_MASK] = { .len = sizeof(u32) },
111 [TCA_SKBEDIT_FLAGS] = { .len = sizeof(u64) },
112 [TCA_SKBEDIT_QUEUE_MAPPING_MAX] = { .len = sizeof(u16) },
115 static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
116 struct nlattr *est, struct tc_action **a,
117 struct tcf_proto *tp, u32 act_flags,
118 struct netlink_ext_ack *extack)
120 struct tc_action_net *tn = net_generic(net, act_skbedit_ops.net_id);
121 bool bind = act_flags & TCA_ACT_FLAGS_BIND;
122 struct tcf_skbedit_params *params_new;
123 struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
124 struct tcf_chain *goto_ch = NULL;
125 struct tc_skbedit *parm;
126 struct tcf_skbedit *d;
127 u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL;
128 u16 *queue_mapping = NULL, *ptype = NULL;
137 err = nla_parse_nested_deprecated(tb, TCA_SKBEDIT_MAX, nla,
138 skbedit_policy, NULL);
142 if (tb[TCA_SKBEDIT_PARMS] == NULL)
145 if (tb[TCA_SKBEDIT_PRIORITY] != NULL) {
146 flags |= SKBEDIT_F_PRIORITY;
147 priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]);
150 if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) {
151 if (is_tcf_skbedit_ingress(act_flags) &&
152 !(act_flags & TCA_ACT_FLAGS_SKIP_SW)) {
153 NL_SET_ERR_MSG_MOD(extack, "\"queue_mapping\" option on receive side is hardware only, use skip_sw");
156 flags |= SKBEDIT_F_QUEUE_MAPPING;
157 queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
160 if (tb[TCA_SKBEDIT_PTYPE] != NULL) {
161 ptype = nla_data(tb[TCA_SKBEDIT_PTYPE]);
162 if (!skb_pkt_type_ok(*ptype))
164 flags |= SKBEDIT_F_PTYPE;
167 if (tb[TCA_SKBEDIT_MARK] != NULL) {
168 flags |= SKBEDIT_F_MARK;
169 mark = nla_data(tb[TCA_SKBEDIT_MARK]);
172 if (tb[TCA_SKBEDIT_MASK] != NULL) {
173 flags |= SKBEDIT_F_MASK;
174 mask = nla_data(tb[TCA_SKBEDIT_MASK]);
177 if (tb[TCA_SKBEDIT_FLAGS] != NULL) {
178 u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]);
180 if (*pure_flags & SKBEDIT_F_TXQ_SKBHASH) {
181 u16 *queue_mapping_max;
183 if (!tb[TCA_SKBEDIT_QUEUE_MAPPING] ||
184 !tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]) {
185 NL_SET_ERR_MSG_MOD(extack, "Missing required range of queue_mapping.");
190 nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]);
191 if (*queue_mapping_max < *queue_mapping) {
192 NL_SET_ERR_MSG_MOD(extack, "The range of queue_mapping is invalid, max < min.");
196 mapping_mod = *queue_mapping_max - *queue_mapping + 1;
197 flags |= SKBEDIT_F_TXQ_SKBHASH;
199 if (*pure_flags & SKBEDIT_F_INHERITDSFIELD)
200 flags |= SKBEDIT_F_INHERITDSFIELD;
203 parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
205 err = tcf_idr_check_alloc(tn, &index, a, bind);
214 tcf_idr_release(*a, bind);
216 tcf_idr_cleanup(tn, index);
221 ret = tcf_idr_create(tn, index, est, a,
222 &act_skbedit_ops, bind, true, act_flags);
224 tcf_idr_cleanup(tn, index);
232 if (!(act_flags & TCA_ACT_FLAGS_REPLACE)) {
233 tcf_idr_release(*a, bind);
237 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
241 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
242 if (unlikely(!params_new)) {
247 params_new->flags = flags;
248 if (flags & SKBEDIT_F_PRIORITY)
249 params_new->priority = *priority;
250 if (flags & SKBEDIT_F_QUEUE_MAPPING) {
251 params_new->queue_mapping = *queue_mapping;
252 params_new->mapping_mod = mapping_mod;
254 if (flags & SKBEDIT_F_MARK)
255 params_new->mark = *mark;
256 if (flags & SKBEDIT_F_PTYPE)
257 params_new->ptype = *ptype;
258 /* default behaviour is to use all the bits */
259 params_new->mask = 0xffffffff;
260 if (flags & SKBEDIT_F_MASK)
261 params_new->mask = *mask;
263 spin_lock_bh(&d->tcf_lock);
264 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
265 params_new = rcu_replace_pointer(d->params, params_new,
266 lockdep_is_held(&d->tcf_lock));
267 spin_unlock_bh(&d->tcf_lock);
269 kfree_rcu(params_new, rcu);
271 tcf_chain_put_by_act(goto_ch);
276 tcf_chain_put_by_act(goto_ch);
278 tcf_idr_release(*a, bind);
282 static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
285 unsigned char *b = skb_tail_pointer(skb);
286 struct tcf_skbedit *d = to_skbedit(a);
287 struct tcf_skbedit_params *params;
288 struct tc_skbedit opt = {
289 .index = d->tcf_index,
290 .refcnt = refcount_read(&d->tcf_refcnt) - ref,
291 .bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
296 spin_lock_bh(&d->tcf_lock);
297 params = rcu_dereference_protected(d->params,
298 lockdep_is_held(&d->tcf_lock));
299 opt.action = d->tcf_action;
301 if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
302 goto nla_put_failure;
303 if ((params->flags & SKBEDIT_F_PRIORITY) &&
304 nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, params->priority))
305 goto nla_put_failure;
306 if ((params->flags & SKBEDIT_F_QUEUE_MAPPING) &&
307 nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, params->queue_mapping))
308 goto nla_put_failure;
309 if ((params->flags & SKBEDIT_F_MARK) &&
310 nla_put_u32(skb, TCA_SKBEDIT_MARK, params->mark))
311 goto nla_put_failure;
312 if ((params->flags & SKBEDIT_F_PTYPE) &&
313 nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype))
314 goto nla_put_failure;
315 if ((params->flags & SKBEDIT_F_MASK) &&
316 nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask))
317 goto nla_put_failure;
318 if (params->flags & SKBEDIT_F_INHERITDSFIELD)
319 pure_flags |= SKBEDIT_F_INHERITDSFIELD;
320 if (params->flags & SKBEDIT_F_TXQ_SKBHASH) {
321 if (nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING_MAX,
322 params->queue_mapping + params->mapping_mod - 1))
323 goto nla_put_failure;
325 pure_flags |= SKBEDIT_F_TXQ_SKBHASH;
327 if (pure_flags != 0 &&
328 nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags))
329 goto nla_put_failure;
331 tcf_tm_dump(&t, &d->tcf_tm);
332 if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
333 goto nla_put_failure;
334 spin_unlock_bh(&d->tcf_lock);
339 spin_unlock_bh(&d->tcf_lock);
344 static void tcf_skbedit_cleanup(struct tc_action *a)
346 struct tcf_skbedit *d = to_skbedit(a);
347 struct tcf_skbedit_params *params;
349 params = rcu_dereference_protected(d->params, 1);
351 kfree_rcu(params, rcu);
354 static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
356 return nla_total_size(sizeof(struct tc_skbedit))
357 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */
358 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */
359 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING_MAX */
360 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */
361 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */
362 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */
363 + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
366 static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data,
367 u32 *index_inc, bool bind,
368 struct netlink_ext_ack *extack)
371 struct flow_action_entry *entry = entry_data;
373 if (is_tcf_skbedit_mark(act)) {
374 entry->id = FLOW_ACTION_MARK;
375 entry->mark = tcf_skbedit_mark(act);
376 } else if (is_tcf_skbedit_ptype(act)) {
377 entry->id = FLOW_ACTION_PTYPE;
378 entry->ptype = tcf_skbedit_ptype(act);
379 } else if (is_tcf_skbedit_priority(act)) {
380 entry->id = FLOW_ACTION_PRIORITY;
381 entry->priority = tcf_skbedit_priority(act);
382 } else if (is_tcf_skbedit_tx_queue_mapping(act)) {
383 NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"queue_mapping\" option is used on transmit side");
385 } else if (is_tcf_skbedit_rx_queue_mapping(act)) {
386 entry->id = FLOW_ACTION_RX_QUEUE_MAPPING;
387 entry->rx_queue = tcf_skbedit_rx_queue_mapping(act);
388 } else if (is_tcf_skbedit_inheritdsfield(act)) {
389 NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"inheritdsfield\" option is used");
392 NL_SET_ERR_MSG_MOD(extack, "Unsupported skbedit option offload");
397 struct flow_offload_action *fl_action = entry_data;
399 if (is_tcf_skbedit_mark(act))
400 fl_action->id = FLOW_ACTION_MARK;
401 else if (is_tcf_skbedit_ptype(act))
402 fl_action->id = FLOW_ACTION_PTYPE;
403 else if (is_tcf_skbedit_priority(act))
404 fl_action->id = FLOW_ACTION_PRIORITY;
405 else if (is_tcf_skbedit_rx_queue_mapping(act))
406 fl_action->id = FLOW_ACTION_RX_QUEUE_MAPPING;
414 static struct tc_action_ops act_skbedit_ops = {
416 .id = TCA_ID_SKBEDIT,
417 .owner = THIS_MODULE,
418 .act = tcf_skbedit_act,
419 .stats_update = tcf_skbedit_stats_update,
420 .dump = tcf_skbedit_dump,
421 .init = tcf_skbedit_init,
422 .cleanup = tcf_skbedit_cleanup,
423 .get_fill_size = tcf_skbedit_get_fill_size,
424 .offload_act_setup = tcf_skbedit_offload_act_setup,
425 .size = sizeof(struct tcf_skbedit),
428 static __net_init int skbedit_init_net(struct net *net)
430 struct tc_action_net *tn = net_generic(net, act_skbedit_ops.net_id);
432 return tc_action_net_init(net, tn, &act_skbedit_ops);
435 static void __net_exit skbedit_exit_net(struct list_head *net_list)
437 tc_action_net_exit(net_list, act_skbedit_ops.net_id);
440 static struct pernet_operations skbedit_net_ops = {
441 .init = skbedit_init_net,
442 .exit_batch = skbedit_exit_net,
443 .id = &act_skbedit_ops.net_id,
444 .size = sizeof(struct tc_action_net),
447 MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
448 MODULE_DESCRIPTION("SKB Editing");
449 MODULE_LICENSE("GPL");
451 static int __init skbedit_init_module(void)
453 return tcf_register_action(&act_skbedit_ops, &skbedit_net_ops);
456 static void __exit skbedit_cleanup_module(void)
458 tcf_unregister_action(&act_skbedit_ops, &skbedit_net_ops);
461 module_init(skbedit_init_module);
462 module_exit(skbedit_cleanup_module);