2 * inet fragments management
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
23 #include <linux/rhashtable.h>
26 #include <net/inet_frag.h>
27 #include <net/inet_ecn.h>
31 /* Use skb->cb to track consecutive/adjacent fragments coming at
32 * the end of the queue. Nodes in the rb-tree queue will
33 * contain "runs" of one or more adjacent fragments.
36 * - next_frag is NULL at the tail of a "run";
37 * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
39 struct ipfrag_skb_cb {
41 struct inet_skb_parm h4;
42 struct inet6_skb_parm h6;
44 struct sk_buff *next_frag;
48 #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
50 static void fragcb_clear(struct sk_buff *skb)
52 RB_CLEAR_NODE(&skb->rbnode);
53 FRAG_CB(skb)->next_frag = NULL;
54 FRAG_CB(skb)->frag_run_len = skb->len;
57 /* Append skb to the last "run". */
58 static void fragrun_append_to_last(struct inet_frag_queue *q,
63 FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
64 FRAG_CB(q->fragments_tail)->next_frag = skb;
65 q->fragments_tail = skb;
68 /* Create a new "run" with the skb. */
69 static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
71 BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
75 rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
76 &q->last_run_head->rbnode.rb_right);
78 rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
79 rb_insert_color(&skb->rbnode, &q->rb_fragments);
81 q->fragments_tail = skb;
82 q->last_run_head = skb;
85 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
86 * Value : 0xff if frame should be dropped.
87 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
89 const u8 ip_frag_ecn_table[16] = {
90 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
91 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
92 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
93 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
95 /* invalid combinations : drop frame */
96 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
97 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
98 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
99 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
100 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
101 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
102 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
104 EXPORT_SYMBOL(ip_frag_ecn_table);
106 int inet_frags_init(struct inet_frags *f)
108 f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
110 if (!f->frags_cachep)
113 refcount_set(&f->refcnt, 1);
114 init_completion(&f->completion);
117 EXPORT_SYMBOL(inet_frags_init);
119 void inet_frags_fini(struct inet_frags *f)
121 if (refcount_dec_and_test(&f->refcnt))
122 complete(&f->completion);
124 wait_for_completion(&f->completion);
126 kmem_cache_destroy(f->frags_cachep);
127 f->frags_cachep = NULL;
129 EXPORT_SYMBOL(inet_frags_fini);
131 /* called from rhashtable_free_and_destroy() at netns_frags dismantle */
132 static void inet_frags_free_cb(void *ptr, void *arg)
134 struct inet_frag_queue *fq = ptr;
137 count = del_timer_sync(&fq->timer) ? 1 : 0;
139 spin_lock_bh(&fq->lock);
140 if (!(fq->flags & INET_FRAG_COMPLETE)) {
141 fq->flags |= INET_FRAG_COMPLETE;
143 } else if (fq->flags & INET_FRAG_HASH_DEAD) {
146 spin_unlock_bh(&fq->lock);
148 if (refcount_sub_and_test(count, &fq->refcnt))
149 inet_frag_destroy(fq);
152 static void fqdir_rwork_fn(struct work_struct *work)
154 struct fqdir *fqdir = container_of(to_rcu_work(work),
155 struct fqdir, destroy_rwork);
156 struct inet_frags *f = fqdir->f;
158 rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
160 /* We need to make sure all ongoing call_rcu(..., inet_frag_destroy_rcu)
161 * have completed, since they need to dereference fqdir.
162 * Would it not be nice to have kfree_rcu_barrier() ? :)
166 if (refcount_dec_and_test(&f->refcnt))
167 complete(&f->completion);
172 int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
174 struct fqdir *fqdir = kzalloc(sizeof(*fqdir), GFP_KERNEL);
181 res = rhashtable_init(&fqdir->rhashtable, &fqdir->f->rhash_params);
186 refcount_inc(&f->refcnt);
190 EXPORT_SYMBOL(fqdir_init);
192 void fqdir_exit(struct fqdir *fqdir)
194 fqdir->high_thresh = 0; /* prevent creation of new frags */
198 /* call_rcu is supposed to provide memory barrier semantics,
199 * separating the setting of fqdir->dead with the destruction
200 * work. This implicit barrier is paired with inet_frag_kill().
203 INIT_RCU_WORK(&fqdir->destroy_rwork, fqdir_rwork_fn);
204 queue_rcu_work(system_wq, &fqdir->destroy_rwork);
207 EXPORT_SYMBOL(fqdir_exit);
209 void inet_frag_kill(struct inet_frag_queue *fq)
211 if (del_timer(&fq->timer))
212 refcount_dec(&fq->refcnt);
214 if (!(fq->flags & INET_FRAG_COMPLETE)) {
215 struct fqdir *fqdir = fq->fqdir;
217 fq->flags |= INET_FRAG_COMPLETE;
219 /* The RCU read lock provides a memory barrier
220 * guaranteeing that if fqdir->dead is false then
221 * the hash table destruction will not start until
222 * after we unlock. Paired with inet_frags_exit_net().
225 rhashtable_remove_fast(&fqdir->rhashtable, &fq->node,
226 fqdir->f->rhash_params);
227 refcount_dec(&fq->refcnt);
229 fq->flags |= INET_FRAG_HASH_DEAD;
234 EXPORT_SYMBOL(inet_frag_kill);
236 static void inet_frag_destroy_rcu(struct rcu_head *head)
238 struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
240 struct inet_frags *f = q->fqdir->f;
244 kmem_cache_free(f->frags_cachep, q);
247 unsigned int inet_frag_rbtree_purge(struct rb_root *root)
249 struct rb_node *p = rb_first(root);
250 unsigned int sum = 0;
253 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
256 rb_erase(&skb->rbnode, root);
258 struct sk_buff *next = FRAG_CB(skb)->next_frag;
260 sum += skb->truesize;
267 EXPORT_SYMBOL(inet_frag_rbtree_purge);
269 void inet_frag_destroy(struct inet_frag_queue *q)
272 unsigned int sum, sum_truesize = 0;
273 struct inet_frags *f;
275 WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
276 WARN_ON(del_timer(&q->timer) != 0);
278 /* Release all fragment data. */
281 sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
282 sum = sum_truesize + f->qsize;
284 call_rcu(&q->rcu, inet_frag_destroy_rcu);
286 sub_frag_mem_limit(fqdir, sum);
288 EXPORT_SYMBOL(inet_frag_destroy);
290 static struct inet_frag_queue *inet_frag_alloc(struct fqdir *fqdir,
291 struct inet_frags *f,
294 struct inet_frag_queue *q;
296 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
301 f->constructor(q, arg);
302 add_frag_mem_limit(fqdir, f->qsize);
304 timer_setup(&q->timer, f->frag_expire, 0);
305 spin_lock_init(&q->lock);
306 refcount_set(&q->refcnt, 3);
311 static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir,
313 struct inet_frag_queue **prev)
315 struct inet_frags *f = fqdir->f;
316 struct inet_frag_queue *q;
318 q = inet_frag_alloc(fqdir, f, arg);
320 *prev = ERR_PTR(-ENOMEM);
323 mod_timer(&q->timer, jiffies + fqdir->timeout);
325 *prev = rhashtable_lookup_get_insert_key(&fqdir->rhashtable, &q->key,
326 &q->node, f->rhash_params);
328 q->flags |= INET_FRAG_COMPLETE;
330 inet_frag_destroy(q);
336 /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
337 struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
339 struct inet_frag_queue *fq = NULL, *prev;
341 if (!fqdir->high_thresh || frag_mem_limit(fqdir) > fqdir->high_thresh)
346 prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params);
348 fq = inet_frag_create(fqdir, key, &prev);
349 if (prev && !IS_ERR(prev)) {
351 if (!refcount_inc_not_zero(&fq->refcnt))
357 EXPORT_SYMBOL(inet_frag_find);
359 int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
362 struct sk_buff *last = q->fragments_tail;
364 /* RFC5722, Section 4, amended by Errata ID : 3089
365 * When reassembling an IPv6 datagram, if
366 * one or more its constituent fragments is determined to be an
367 * overlapping fragment, the entire datagram (and any constituent
368 * fragments) MUST be silently discarded.
370 * Duplicates, however, should be ignored (i.e. skb dropped, but the
371 * queue/fragments kept for later reassembly).
374 fragrun_create(q, skb); /* First fragment. */
375 else if (last->ip_defrag_offset + last->len < end) {
376 /* This is the common case: skb goes to the end. */
377 /* Detect and discard overlaps. */
378 if (offset < last->ip_defrag_offset + last->len)
379 return IPFRAG_OVERLAP;
380 if (offset == last->ip_defrag_offset + last->len)
381 fragrun_append_to_last(q, skb);
383 fragrun_create(q, skb);
385 /* Binary search. Note that skb can become the first fragment,
386 * but not the last (covered above).
388 struct rb_node **rbn, *parent;
390 rbn = &q->rb_fragments.rb_node;
392 struct sk_buff *curr;
396 curr = rb_to_skb(parent);
397 curr_run_end = curr->ip_defrag_offset +
398 FRAG_CB(curr)->frag_run_len;
399 if (end <= curr->ip_defrag_offset)
400 rbn = &parent->rb_left;
401 else if (offset >= curr_run_end)
402 rbn = &parent->rb_right;
403 else if (offset >= curr->ip_defrag_offset &&
407 return IPFRAG_OVERLAP;
409 /* Here we have parent properly set, and rbn pointing to
410 * one of its NULL left/right children. Insert skb.
413 rb_link_node(&skb->rbnode, parent, rbn);
414 rb_insert_color(&skb->rbnode, &q->rb_fragments);
417 skb->ip_defrag_offset = offset;
421 EXPORT_SYMBOL(inet_frag_queue_insert);
423 void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
424 struct sk_buff *parent)
426 struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
427 struct sk_buff **nextp;
431 fp = skb_clone(skb, GFP_ATOMIC);
434 FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
435 if (RB_EMPTY_NODE(&skb->rbnode))
436 FRAG_CB(parent)->next_frag = fp;
438 rb_replace_node(&skb->rbnode, &fp->rbnode,
440 if (q->fragments_tail == skb)
441 q->fragments_tail = fp;
442 skb_morph(skb, head);
443 FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
444 rb_replace_node(&head->rbnode, &skb->rbnode,
449 WARN_ON(head->ip_defrag_offset != 0);
451 delta = -head->truesize;
453 /* Head of list must not be cloned. */
454 if (skb_unclone(head, GFP_ATOMIC))
457 delta += head->truesize;
459 add_frag_mem_limit(q->fqdir, delta);
461 /* If the first fragment is fragmented itself, we split
462 * it to two chunks: the first with data and paged part
463 * and the second, holding only fragments.
465 if (skb_has_frag_list(head)) {
466 struct sk_buff *clone;
469 clone = alloc_skb(0, GFP_ATOMIC);
472 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
473 skb_frag_list_init(head);
474 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
475 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
476 clone->data_len = head->data_len - plen;
477 clone->len = clone->data_len;
478 head->truesize += clone->truesize;
480 clone->ip_summed = head->ip_summed;
481 add_frag_mem_limit(q->fqdir, clone->truesize);
482 skb_shinfo(head)->frag_list = clone;
483 nextp = &clone->next;
485 nextp = &skb_shinfo(head)->frag_list;
490 EXPORT_SYMBOL(inet_frag_reasm_prepare);
492 void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
495 struct sk_buff **nextp = (struct sk_buff **)reasm_data;
499 skb_push(head, head->data - skb_network_header(head));
501 /* Traverse the tree in order, to build frag_list. */
502 fp = FRAG_CB(head)->next_frag;
503 rbn = rb_next(&head->rbnode);
504 rb_erase(&head->rbnode, &q->rb_fragments);
506 /* fp points to the next sk_buff in the current run;
507 * rbn points to the next run.
509 /* Go through the current run. */
514 memset(&fp->rbnode, 0, sizeof(fp->rbnode));
516 head->data_len += fp->len;
517 head->len += fp->len;
518 if (head->ip_summed != fp->ip_summed)
519 head->ip_summed = CHECKSUM_NONE;
520 else if (head->ip_summed == CHECKSUM_COMPLETE)
521 head->csum = csum_add(head->csum, fp->csum);
522 head->truesize += fp->truesize;
523 fp = FRAG_CB(fp)->next_frag;
525 /* Move to the next run. */
527 struct rb_node *rbnext = rb_next(rbn);
530 rb_erase(rbn, &q->rb_fragments);
534 sub_frag_mem_limit(q->fqdir, head->truesize);
537 skb_mark_not_on_list(head);
539 head->tstamp = q->stamp;
541 EXPORT_SYMBOL(inet_frag_reasm_finish);
543 struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
545 struct sk_buff *head, *skb;
547 head = skb_rb_first(&q->rb_fragments);
550 skb = FRAG_CB(head)->next_frag;
552 rb_replace_node(&head->rbnode, &skb->rbnode,
555 rb_erase(&head->rbnode, &q->rb_fragments);
556 memset(&head->rbnode, 0, sizeof(head->rbnode));
559 if (head == q->fragments_tail)
560 q->fragments_tail = NULL;
562 sub_frag_mem_limit(q->fqdir, head->truesize);
566 EXPORT_SYMBOL(inet_frag_pull_head);