1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * inet fragments management
5 * Authors: Pavel Emelyanov <xemul@openvz.org>
6 * Started as consolidation of ipv4/ip_fragment.c,
7 * ipv6/reassembly. and ipv6 nf conntrack reassembly
10 #include <linux/list.h>
11 #include <linux/spinlock.h>
12 #include <linux/module.h>
13 #include <linux/timer.h>
15 #include <linux/random.h>
16 #include <linux/skbuff.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/slab.h>
19 #include <linux/rhashtable.h>
22 #include <net/inet_frag.h>
23 #include <net/inet_ecn.h>
27 #include "../core/sock_destructor.h"
29 /* Use skb->cb to track consecutive/adjacent fragments coming at
30 * the end of the queue. Nodes in the rb-tree queue will
31 * contain "runs" of one or more adjacent fragments.
34 * - next_frag is NULL at the tail of a "run";
35 * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
37 struct ipfrag_skb_cb {
39 struct inet_skb_parm h4;
40 struct inet6_skb_parm h6;
42 struct sk_buff *next_frag;
47 #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
49 static void fragcb_clear(struct sk_buff *skb)
51 RB_CLEAR_NODE(&skb->rbnode);
52 FRAG_CB(skb)->next_frag = NULL;
53 FRAG_CB(skb)->frag_run_len = skb->len;
56 /* Append skb to the last "run". */
57 static void fragrun_append_to_last(struct inet_frag_queue *q,
62 FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
63 FRAG_CB(q->fragments_tail)->next_frag = skb;
64 q->fragments_tail = skb;
67 /* Create a new "run" with the skb. */
68 static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
70 BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
74 rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
75 &q->last_run_head->rbnode.rb_right);
77 rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
78 rb_insert_color(&skb->rbnode, &q->rb_fragments);
80 q->fragments_tail = skb;
81 q->last_run_head = skb;
84 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
85 * Value : 0xff if frame should be dropped.
86 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
88 const u8 ip_frag_ecn_table[16] = {
89 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
90 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
91 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
92 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
94 /* invalid combinations : drop frame */
95 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
96 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
97 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
98 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
99 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
100 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
101 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
103 EXPORT_SYMBOL(ip_frag_ecn_table);
105 int inet_frags_init(struct inet_frags *f)
107 f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
109 if (!f->frags_cachep)
112 refcount_set(&f->refcnt, 1);
113 init_completion(&f->completion);
116 EXPORT_SYMBOL(inet_frags_init);
118 void inet_frags_fini(struct inet_frags *f)
120 if (refcount_dec_and_test(&f->refcnt))
121 complete(&f->completion);
123 wait_for_completion(&f->completion);
125 kmem_cache_destroy(f->frags_cachep);
126 f->frags_cachep = NULL;
128 EXPORT_SYMBOL(inet_frags_fini);
130 /* called from rhashtable_free_and_destroy() at netns_frags dismantle */
131 static void inet_frags_free_cb(void *ptr, void *arg)
133 struct inet_frag_queue *fq = ptr;
136 count = del_timer_sync(&fq->timer) ? 1 : 0;
138 spin_lock_bh(&fq->lock);
139 if (!(fq->flags & INET_FRAG_COMPLETE)) {
140 fq->flags |= INET_FRAG_COMPLETE;
142 } else if (fq->flags & INET_FRAG_HASH_DEAD) {
145 spin_unlock_bh(&fq->lock);
147 if (refcount_sub_and_test(count, &fq->refcnt))
148 inet_frag_destroy(fq);
151 static LLIST_HEAD(fqdir_free_list);
153 static void fqdir_free_fn(struct work_struct *work)
155 struct llist_node *kill_list;
156 struct fqdir *fqdir, *tmp;
157 struct inet_frags *f;
159 /* Atomically snapshot the list of fqdirs to free */
160 kill_list = llist_del_all(&fqdir_free_list);
162 /* We need to make sure all ongoing call_rcu(..., inet_frag_destroy_rcu)
163 * have completed, since they need to dereference fqdir.
164 * Would it not be nice to have kfree_rcu_barrier() ? :)
168 llist_for_each_entry_safe(fqdir, tmp, kill_list, free_list) {
170 if (refcount_dec_and_test(&f->refcnt))
171 complete(&f->completion);
177 static DECLARE_WORK(fqdir_free_work, fqdir_free_fn);
179 static void fqdir_work_fn(struct work_struct *work)
181 struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work);
183 rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
185 if (llist_add(&fqdir->free_list, &fqdir_free_list))
186 queue_work(system_wq, &fqdir_free_work);
189 int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
191 struct fqdir *fqdir = kzalloc(sizeof(*fqdir), GFP_KERNEL);
198 res = rhashtable_init(&fqdir->rhashtable, &fqdir->f->rhash_params);
203 refcount_inc(&f->refcnt);
207 EXPORT_SYMBOL(fqdir_init);
209 static struct workqueue_struct *inet_frag_wq;
211 static int __init inet_frag_wq_init(void)
213 inet_frag_wq = create_workqueue("inet_frag_wq");
215 panic("Could not create inet frag workq");
219 pure_initcall(inet_frag_wq_init);
221 void fqdir_exit(struct fqdir *fqdir)
223 INIT_WORK(&fqdir->destroy_work, fqdir_work_fn);
224 queue_work(inet_frag_wq, &fqdir->destroy_work);
226 EXPORT_SYMBOL(fqdir_exit);
228 void inet_frag_kill(struct inet_frag_queue *fq)
230 if (del_timer(&fq->timer))
231 refcount_dec(&fq->refcnt);
233 if (!(fq->flags & INET_FRAG_COMPLETE)) {
234 struct fqdir *fqdir = fq->fqdir;
236 fq->flags |= INET_FRAG_COMPLETE;
238 /* The RCU read lock provides a memory barrier
239 * guaranteeing that if fqdir->dead is false then
240 * the hash table destruction will not start until
241 * after we unlock. Paired with fqdir_pre_exit().
243 if (!READ_ONCE(fqdir->dead)) {
244 rhashtable_remove_fast(&fqdir->rhashtable, &fq->node,
245 fqdir->f->rhash_params);
246 refcount_dec(&fq->refcnt);
248 fq->flags |= INET_FRAG_HASH_DEAD;
253 EXPORT_SYMBOL(inet_frag_kill);
255 static void inet_frag_destroy_rcu(struct rcu_head *head)
257 struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
259 struct inet_frags *f = q->fqdir->f;
263 kmem_cache_free(f->frags_cachep, q);
266 unsigned int inet_frag_rbtree_purge(struct rb_root *root)
268 struct rb_node *p = rb_first(root);
269 unsigned int sum = 0;
272 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
275 rb_erase(&skb->rbnode, root);
277 struct sk_buff *next = FRAG_CB(skb)->next_frag;
279 sum += skb->truesize;
286 EXPORT_SYMBOL(inet_frag_rbtree_purge);
288 void inet_frag_destroy(struct inet_frag_queue *q)
291 unsigned int sum, sum_truesize = 0;
292 struct inet_frags *f;
294 WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
295 WARN_ON(del_timer(&q->timer) != 0);
297 /* Release all fragment data. */
300 sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
301 sum = sum_truesize + f->qsize;
303 call_rcu(&q->rcu, inet_frag_destroy_rcu);
305 sub_frag_mem_limit(fqdir, sum);
307 EXPORT_SYMBOL(inet_frag_destroy);
309 static struct inet_frag_queue *inet_frag_alloc(struct fqdir *fqdir,
310 struct inet_frags *f,
313 struct inet_frag_queue *q;
315 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
320 f->constructor(q, arg);
321 add_frag_mem_limit(fqdir, f->qsize);
323 timer_setup(&q->timer, f->frag_expire, 0);
324 spin_lock_init(&q->lock);
325 refcount_set(&q->refcnt, 3);
330 static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir,
332 struct inet_frag_queue **prev)
334 struct inet_frags *f = fqdir->f;
335 struct inet_frag_queue *q;
337 q = inet_frag_alloc(fqdir, f, arg);
339 *prev = ERR_PTR(-ENOMEM);
342 mod_timer(&q->timer, jiffies + fqdir->timeout);
344 *prev = rhashtable_lookup_get_insert_key(&fqdir->rhashtable, &q->key,
345 &q->node, f->rhash_params);
347 q->flags |= INET_FRAG_COMPLETE;
349 inet_frag_destroy(q);
355 /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
356 struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
358 /* This pairs with WRITE_ONCE() in fqdir_pre_exit(). */
359 long high_thresh = READ_ONCE(fqdir->high_thresh);
360 struct inet_frag_queue *fq = NULL, *prev;
362 if (!high_thresh || frag_mem_limit(fqdir) > high_thresh)
367 prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params);
369 fq = inet_frag_create(fqdir, key, &prev);
370 if (!IS_ERR_OR_NULL(prev)) {
372 if (!refcount_inc_not_zero(&fq->refcnt))
378 EXPORT_SYMBOL(inet_frag_find);
380 int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
383 struct sk_buff *last = q->fragments_tail;
385 /* RFC5722, Section 4, amended by Errata ID : 3089
386 * When reassembling an IPv6 datagram, if
387 * one or more its constituent fragments is determined to be an
388 * overlapping fragment, the entire datagram (and any constituent
389 * fragments) MUST be silently discarded.
391 * Duplicates, however, should be ignored (i.e. skb dropped, but the
392 * queue/fragments kept for later reassembly).
395 fragrun_create(q, skb); /* First fragment. */
396 else if (FRAG_CB(last)->ip_defrag_offset + last->len < end) {
397 /* This is the common case: skb goes to the end. */
398 /* Detect and discard overlaps. */
399 if (offset < FRAG_CB(last)->ip_defrag_offset + last->len)
400 return IPFRAG_OVERLAP;
401 if (offset == FRAG_CB(last)->ip_defrag_offset + last->len)
402 fragrun_append_to_last(q, skb);
404 fragrun_create(q, skb);
406 /* Binary search. Note that skb can become the first fragment,
407 * but not the last (covered above).
409 struct rb_node **rbn, *parent;
411 rbn = &q->rb_fragments.rb_node;
413 struct sk_buff *curr;
417 curr = rb_to_skb(parent);
418 curr_run_end = FRAG_CB(curr)->ip_defrag_offset +
419 FRAG_CB(curr)->frag_run_len;
420 if (end <= FRAG_CB(curr)->ip_defrag_offset)
421 rbn = &parent->rb_left;
422 else if (offset >= curr_run_end)
423 rbn = &parent->rb_right;
424 else if (offset >= FRAG_CB(curr)->ip_defrag_offset &&
428 return IPFRAG_OVERLAP;
430 /* Here we have parent properly set, and rbn pointing to
431 * one of its NULL left/right children. Insert skb.
434 rb_link_node(&skb->rbnode, parent, rbn);
435 rb_insert_color(&skb->rbnode, &q->rb_fragments);
438 FRAG_CB(skb)->ip_defrag_offset = offset;
442 EXPORT_SYMBOL(inet_frag_queue_insert);
444 void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
445 struct sk_buff *parent)
447 struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
448 void (*destructor)(struct sk_buff *);
449 unsigned int orig_truesize = 0;
450 struct sk_buff **nextp = NULL;
451 struct sock *sk = skb->sk;
454 if (sk && is_skb_wmem(skb)) {
455 /* TX: skb->sk might have been passed as argument to
456 * dst->output and must remain valid until tx completes.
458 * Move sk to reassembled skb and fix up wmem accounting.
460 orig_truesize = skb->truesize;
461 destructor = skb->destructor;
465 fp = skb_clone(skb, GFP_ATOMIC);
470 FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
471 if (RB_EMPTY_NODE(&skb->rbnode))
472 FRAG_CB(parent)->next_frag = fp;
474 rb_replace_node(&skb->rbnode, &fp->rbnode,
476 if (q->fragments_tail == skb)
477 q->fragments_tail = fp;
480 /* prevent skb_morph from releasing sk */
482 skb->destructor = NULL;
484 skb_morph(skb, head);
485 FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
486 rb_replace_node(&head->rbnode, &skb->rbnode,
491 WARN_ON(FRAG_CB(head)->ip_defrag_offset != 0);
493 delta = -head->truesize;
495 /* Head of list must not be cloned. */
496 if (skb_unclone(head, GFP_ATOMIC))
499 delta += head->truesize;
501 add_frag_mem_limit(q->fqdir, delta);
503 /* If the first fragment is fragmented itself, we split
504 * it to two chunks: the first with data and paged part
505 * and the second, holding only fragments.
507 if (skb_has_frag_list(head)) {
508 struct sk_buff *clone;
511 clone = alloc_skb(0, GFP_ATOMIC);
514 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
515 skb_frag_list_init(head);
516 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
517 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
518 clone->data_len = head->data_len - plen;
519 clone->len = clone->data_len;
520 head->truesize += clone->truesize;
522 clone->ip_summed = head->ip_summed;
523 add_frag_mem_limit(q->fqdir, clone->truesize);
524 skb_shinfo(head)->frag_list = clone;
525 nextp = &clone->next;
527 nextp = &skb_shinfo(head)->frag_list;
532 int ts_delta = head->truesize - orig_truesize;
534 /* if this reassembled skb is fragmented later,
535 * fraglist skbs will get skb->sk assigned from head->sk,
536 * and each frag skb will be released via sock_wfree.
538 * Update sk_wmem_alloc.
541 head->destructor = destructor;
542 refcount_add(ts_delta, &sk->sk_wmem_alloc);
547 EXPORT_SYMBOL(inet_frag_reasm_prepare);
549 void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
550 void *reasm_data, bool try_coalesce)
552 struct sock *sk = is_skb_wmem(head) ? head->sk : NULL;
553 const unsigned int head_truesize = head->truesize;
554 struct sk_buff **nextp = reasm_data;
559 skb_push(head, head->data - skb_network_header(head));
561 /* Traverse the tree in order, to build frag_list. */
562 fp = FRAG_CB(head)->next_frag;
563 rbn = rb_next(&head->rbnode);
564 rb_erase(&head->rbnode, &q->rb_fragments);
566 sum_truesize = head->truesize;
568 /* fp points to the next sk_buff in the current run;
569 * rbn points to the next run.
571 /* Go through the current run. */
573 struct sk_buff *next_frag = FRAG_CB(fp)->next_frag;
577 sum_truesize += fp->truesize;
578 if (head->ip_summed != fp->ip_summed)
579 head->ip_summed = CHECKSUM_NONE;
580 else if (head->ip_summed == CHECKSUM_COMPLETE)
581 head->csum = csum_add(head->csum, fp->csum);
583 if (try_coalesce && skb_try_coalesce(head, fp, &stolen,
585 kfree_skb_partial(fp, stolen);
588 memset(&fp->rbnode, 0, sizeof(fp->rbnode));
591 head->data_len += fp->len;
592 head->len += fp->len;
593 head->truesize += fp->truesize;
601 /* Move to the next run. */
603 struct rb_node *rbnext = rb_next(rbn);
606 rb_erase(rbn, &q->rb_fragments);
610 sub_frag_mem_limit(q->fqdir, sum_truesize);
613 skb_mark_not_on_list(head);
615 head->tstamp = q->stamp;
616 head->mono_delivery_time = q->mono_delivery_time;
619 refcount_add(sum_truesize - head_truesize, &sk->sk_wmem_alloc);
621 EXPORT_SYMBOL(inet_frag_reasm_finish);
623 struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
625 struct sk_buff *head, *skb;
627 head = skb_rb_first(&q->rb_fragments);
630 skb = FRAG_CB(head)->next_frag;
632 rb_replace_node(&head->rbnode, &skb->rbnode,
635 rb_erase(&head->rbnode, &q->rb_fragments);
636 memset(&head->rbnode, 0, sizeof(head->rbnode));
639 if (head == q->fragments_tail)
640 q->fragments_tail = NULL;
642 sub_frag_mem_limit(q->fqdir, head->truesize);
646 EXPORT_SYMBOL(inet_frag_pull_head);