2 * net/sched/cls_flow.c Generic flow classifier
4 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/list.h>
15 #include <linux/jhash.h>
16 #include <linux/random.h>
17 #include <linux/pkt_cls.h>
18 #include <linux/skbuff.h>
21 #include <linux/ipv6.h>
22 #include <linux/if_vlan.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <net/inet_sock.h>
27 #include <net/pkt_cls.h>
29 #include <net/route.h>
30 #include <net/flow_dissector.h>
32 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
33 #include <net/netfilter/nf_conntrack.h>
37 struct list_head filters;
42 struct list_head list;
44 struct tcf_ematch_tree ematches;
46 struct timer_list perturb_timer;
61 struct work_struct work;
66 static inline u32 addr_fold(void *addr)
68 unsigned long a = (unsigned long)addr;
70 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
73 static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
75 __be32 src = flow_get_u32_src(flow);
80 return addr_fold(skb->sk);
83 static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
85 __be32 dst = flow_get_u32_dst(flow);
90 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
93 static u32 flow_get_proto(const struct sk_buff *skb,
94 const struct flow_keys *flow)
96 return flow->basic.ip_proto;
99 static u32 flow_get_proto_src(const struct sk_buff *skb,
100 const struct flow_keys *flow)
102 if (flow->ports.ports)
103 return ntohs(flow->ports.src);
105 return addr_fold(skb->sk);
108 static u32 flow_get_proto_dst(const struct sk_buff *skb,
109 const struct flow_keys *flow)
111 if (flow->ports.ports)
112 return ntohs(flow->ports.dst);
114 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
117 static u32 flow_get_iif(const struct sk_buff *skb)
122 static u32 flow_get_priority(const struct sk_buff *skb)
124 return skb->priority;
127 static u32 flow_get_mark(const struct sk_buff *skb)
132 static u32 flow_get_nfct(const struct sk_buff *skb)
134 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
135 return addr_fold(skb_nfct(skb));
141 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
142 #define CTTUPLE(skb, member) \
144 enum ip_conntrack_info ctinfo; \
145 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
148 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
151 #define CTTUPLE(skb, member) \
158 static u32 flow_get_nfct_src(const struct sk_buff *skb,
159 const struct flow_keys *flow)
161 switch (tc_skb_protocol(skb)) {
162 case htons(ETH_P_IP):
163 return ntohl(CTTUPLE(skb, src.u3.ip));
164 case htons(ETH_P_IPV6):
165 return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
168 return flow_get_src(skb, flow);
171 static u32 flow_get_nfct_dst(const struct sk_buff *skb,
172 const struct flow_keys *flow)
174 switch (tc_skb_protocol(skb)) {
175 case htons(ETH_P_IP):
176 return ntohl(CTTUPLE(skb, dst.u3.ip));
177 case htons(ETH_P_IPV6):
178 return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
181 return flow_get_dst(skb, flow);
184 static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
185 const struct flow_keys *flow)
187 return ntohs(CTTUPLE(skb, src.u.all));
189 return flow_get_proto_src(skb, flow);
192 static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
193 const struct flow_keys *flow)
195 return ntohs(CTTUPLE(skb, dst.u.all));
197 return flow_get_proto_dst(skb, flow);
200 static u32 flow_get_rtclassid(const struct sk_buff *skb)
202 #ifdef CONFIG_IP_ROUTE_CLASSID
204 return skb_dst(skb)->tclassid;
209 static u32 flow_get_skuid(const struct sk_buff *skb)
211 struct sock *sk = skb_to_full_sk(skb);
213 if (sk && sk->sk_socket && sk->sk_socket->file) {
214 kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
216 return from_kuid(&init_user_ns, skuid);
221 static u32 flow_get_skgid(const struct sk_buff *skb)
223 struct sock *sk = skb_to_full_sk(skb);
225 if (sk && sk->sk_socket && sk->sk_socket->file) {
226 kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
228 return from_kgid(&init_user_ns, skgid);
233 static u32 flow_get_vlan_tag(const struct sk_buff *skb)
235 u16 uninitialized_var(tag);
237 if (vlan_get_tag(skb, &tag) < 0)
239 return tag & VLAN_VID_MASK;
242 static u32 flow_get_rxhash(struct sk_buff *skb)
244 return skb_get_hash(skb);
247 static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
251 return flow_get_src(skb, flow);
253 return flow_get_dst(skb, flow);
255 return flow_get_proto(skb, flow);
256 case FLOW_KEY_PROTO_SRC:
257 return flow_get_proto_src(skb, flow);
258 case FLOW_KEY_PROTO_DST:
259 return flow_get_proto_dst(skb, flow);
261 return flow_get_iif(skb);
262 case FLOW_KEY_PRIORITY:
263 return flow_get_priority(skb);
265 return flow_get_mark(skb);
267 return flow_get_nfct(skb);
268 case FLOW_KEY_NFCT_SRC:
269 return flow_get_nfct_src(skb, flow);
270 case FLOW_KEY_NFCT_DST:
271 return flow_get_nfct_dst(skb, flow);
272 case FLOW_KEY_NFCT_PROTO_SRC:
273 return flow_get_nfct_proto_src(skb, flow);
274 case FLOW_KEY_NFCT_PROTO_DST:
275 return flow_get_nfct_proto_dst(skb, flow);
276 case FLOW_KEY_RTCLASSID:
277 return flow_get_rtclassid(skb);
279 return flow_get_skuid(skb);
281 return flow_get_skgid(skb);
282 case FLOW_KEY_VLAN_TAG:
283 return flow_get_vlan_tag(skb);
284 case FLOW_KEY_RXHASH:
285 return flow_get_rxhash(skb);
292 #define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \
293 (1 << FLOW_KEY_DST) | \
294 (1 << FLOW_KEY_PROTO) | \
295 (1 << FLOW_KEY_PROTO_SRC) | \
296 (1 << FLOW_KEY_PROTO_DST) | \
297 (1 << FLOW_KEY_NFCT_SRC) | \
298 (1 << FLOW_KEY_NFCT_DST) | \
299 (1 << FLOW_KEY_NFCT_PROTO_SRC) | \
300 (1 << FLOW_KEY_NFCT_PROTO_DST))
302 static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
303 struct tcf_result *res)
305 struct flow_head *head = rcu_dereference_bh(tp->root);
306 struct flow_filter *f;
312 list_for_each_entry_rcu(f, &head->filters, list) {
313 u32 keys[FLOW_KEY_MAX + 1];
314 struct flow_keys flow_keys;
316 if (!tcf_em_tree_match(skb, &f->ematches, NULL))
319 keymask = f->keymask;
320 if (keymask & FLOW_KEYS_NEEDED)
321 skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
323 for (n = 0; n < f->nkeys; n++) {
324 key = ffs(keymask) - 1;
325 keymask &= ~(1 << key);
326 keys[n] = flow_key_get(skb, key, &flow_keys);
329 if (f->mode == FLOW_MODE_HASH)
330 classid = jhash2(keys, f->nkeys, f->hashrnd);
333 classid = (classid & f->mask) ^ f->xor;
334 classid = (classid >> f->rshift) + f->addend;
338 classid %= f->divisor;
341 res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
343 r = tcf_exts_exec(skb, &f->exts, res);
351 static void flow_perturbation(unsigned long arg)
353 struct flow_filter *f = (struct flow_filter *)arg;
355 get_random_bytes(&f->hashrnd, 4);
356 if (f->perturb_period)
357 mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
360 static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
361 [TCA_FLOW_KEYS] = { .type = NLA_U32 },
362 [TCA_FLOW_MODE] = { .type = NLA_U32 },
363 [TCA_FLOW_BASECLASS] = { .type = NLA_U32 },
364 [TCA_FLOW_RSHIFT] = { .type = NLA_U32 },
365 [TCA_FLOW_ADDEND] = { .type = NLA_U32 },
366 [TCA_FLOW_MASK] = { .type = NLA_U32 },
367 [TCA_FLOW_XOR] = { .type = NLA_U32 },
368 [TCA_FLOW_DIVISOR] = { .type = NLA_U32 },
369 [TCA_FLOW_ACT] = { .type = NLA_NESTED },
370 [TCA_FLOW_POLICE] = { .type = NLA_NESTED },
371 [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED },
372 [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
375 static void flow_destroy_filter_work(struct work_struct *work)
377 struct flow_filter *f = container_of(work, struct flow_filter, work);
380 del_timer_sync(&f->perturb_timer);
381 tcf_exts_destroy(&f->exts);
382 tcf_em_tree_destroy(&f->ematches);
387 static void flow_destroy_filter(struct rcu_head *head)
389 struct flow_filter *f = container_of(head, struct flow_filter, rcu);
391 INIT_WORK(&f->work, flow_destroy_filter_work);
392 tcf_queue_work(&f->work);
395 static int flow_change(struct net *net, struct sk_buff *in_skb,
396 struct tcf_proto *tp, unsigned long base,
397 u32 handle, struct nlattr **tca,
398 void **arg, bool ovr)
400 struct flow_head *head = rtnl_dereference(tp->root);
401 struct flow_filter *fold, *fnew;
402 struct nlattr *opt = tca[TCA_OPTIONS];
403 struct nlattr *tb[TCA_FLOW_MAX + 1];
404 unsigned int nkeys = 0;
405 unsigned int perturb_period = 0;
414 err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy, NULL);
418 if (tb[TCA_FLOW_BASECLASS]) {
419 baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
420 if (TC_H_MIN(baseclass) == 0)
424 if (tb[TCA_FLOW_KEYS]) {
425 keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
427 nkeys = hweight32(keymask);
431 if (fls(keymask) - 1 > FLOW_KEY_MAX)
434 if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
435 sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
439 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
443 err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
447 err = tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
451 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr);
458 if (fold->handle != handle && handle)
461 /* Copy fold into fnew */
463 fnew->handle = fold->handle;
464 fnew->nkeys = fold->nkeys;
465 fnew->keymask = fold->keymask;
466 fnew->mode = fold->mode;
467 fnew->mask = fold->mask;
468 fnew->xor = fold->xor;
469 fnew->rshift = fold->rshift;
470 fnew->addend = fold->addend;
471 fnew->divisor = fold->divisor;
472 fnew->baseclass = fold->baseclass;
473 fnew->hashrnd = fold->hashrnd;
476 if (tb[TCA_FLOW_MODE])
477 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
478 if (mode != FLOW_MODE_HASH && nkeys > 1)
481 if (mode == FLOW_MODE_HASH)
482 perturb_period = fold->perturb_period;
483 if (tb[TCA_FLOW_PERTURB]) {
484 if (mode != FLOW_MODE_HASH)
486 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
492 if (!tb[TCA_FLOW_KEYS])
495 mode = FLOW_MODE_MAP;
496 if (tb[TCA_FLOW_MODE])
497 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
498 if (mode != FLOW_MODE_HASH && nkeys > 1)
501 if (tb[TCA_FLOW_PERTURB]) {
502 if (mode != FLOW_MODE_HASH)
504 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
507 if (TC_H_MAJ(baseclass) == 0)
508 baseclass = TC_H_MAKE(tp->q->handle, baseclass);
509 if (TC_H_MIN(baseclass) == 0)
510 baseclass = TC_H_MAKE(baseclass, 1);
512 fnew->handle = handle;
515 get_random_bytes(&fnew->hashrnd, 4);
518 setup_deferrable_timer(&fnew->perturb_timer, flow_perturbation,
519 (unsigned long)fnew);
521 netif_keep_dst(qdisc_dev(tp->q));
523 if (tb[TCA_FLOW_KEYS]) {
524 fnew->keymask = keymask;
530 if (tb[TCA_FLOW_MASK])
531 fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
532 if (tb[TCA_FLOW_XOR])
533 fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
534 if (tb[TCA_FLOW_RSHIFT])
535 fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
536 if (tb[TCA_FLOW_ADDEND])
537 fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
539 if (tb[TCA_FLOW_DIVISOR])
540 fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
542 fnew->baseclass = baseclass;
544 fnew->perturb_period = perturb_period;
546 mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
549 list_add_tail_rcu(&fnew->list, &head->filters);
551 list_replace_rcu(&fold->list, &fnew->list);
556 call_rcu(&fold->rcu, flow_destroy_filter);
560 tcf_exts_destroy(&fnew->exts);
561 tcf_em_tree_destroy(&fnew->ematches);
567 static int flow_delete(struct tcf_proto *tp, void *arg, bool *last)
569 struct flow_head *head = rtnl_dereference(tp->root);
570 struct flow_filter *f = arg;
572 list_del_rcu(&f->list);
573 call_rcu(&f->rcu, flow_destroy_filter);
574 *last = list_empty(&head->filters);
578 static int flow_init(struct tcf_proto *tp)
580 struct flow_head *head;
582 head = kzalloc(sizeof(*head), GFP_KERNEL);
585 INIT_LIST_HEAD(&head->filters);
586 rcu_assign_pointer(tp->root, head);
590 static void flow_destroy(struct tcf_proto *tp)
592 struct flow_head *head = rtnl_dereference(tp->root);
593 struct flow_filter *f, *next;
595 list_for_each_entry_safe(f, next, &head->filters, list) {
596 list_del_rcu(&f->list);
597 call_rcu(&f->rcu, flow_destroy_filter);
599 kfree_rcu(head, rcu);
602 static void *flow_get(struct tcf_proto *tp, u32 handle)
604 struct flow_head *head = rtnl_dereference(tp->root);
605 struct flow_filter *f;
607 list_for_each_entry(f, &head->filters, list)
608 if (f->handle == handle)
613 static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh,
614 struct sk_buff *skb, struct tcmsg *t)
616 struct flow_filter *f = fh;
622 t->tcm_handle = f->handle;
624 nest = nla_nest_start(skb, TCA_OPTIONS);
626 goto nla_put_failure;
628 if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
629 nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
630 goto nla_put_failure;
632 if (f->mask != ~0 || f->xor != 0) {
633 if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
634 nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
635 goto nla_put_failure;
638 nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
639 goto nla_put_failure;
641 nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
642 goto nla_put_failure;
645 nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
646 goto nla_put_failure;
648 nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
649 goto nla_put_failure;
651 if (f->perturb_period &&
652 nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
653 goto nla_put_failure;
655 if (tcf_exts_dump(skb, &f->exts) < 0)
656 goto nla_put_failure;
657 #ifdef CONFIG_NET_EMATCH
658 if (f->ematches.hdr.nmatches &&
659 tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
660 goto nla_put_failure;
662 nla_nest_end(skb, nest);
664 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
665 goto nla_put_failure;
670 nla_nest_cancel(skb, nest);
674 static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg)
676 struct flow_head *head = rtnl_dereference(tp->root);
677 struct flow_filter *f;
679 list_for_each_entry(f, &head->filters, list) {
680 if (arg->count < arg->skip)
682 if (arg->fn(tp, f, arg) < 0) {
691 static struct tcf_proto_ops cls_flow_ops __read_mostly = {
693 .classify = flow_classify,
695 .destroy = flow_destroy,
696 .change = flow_change,
697 .delete = flow_delete,
701 .owner = THIS_MODULE,
704 static int __init cls_flow_init(void)
706 return register_tcf_proto_ops(&cls_flow_ops);
709 static void __exit cls_flow_exit(void)
711 unregister_tcf_proto_ops(&cls_flow_ops);
714 module_init(cls_flow_init);
715 module_exit(cls_flow_exit);
717 MODULE_LICENSE("GPL");
718 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
719 MODULE_DESCRIPTION("TC flow classifier");