1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * IPv6 fragment reassembly for connection tracking
4  *
5  * Copyright (C)2004 USAGI/WIDE Project
6  *
7  * Author:
8  *	Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
9  *
10  * Based on: net/ipv6/reassembly.c
11  */
12 
13 #define pr_fmt(fmt) "IPv6-nf: " fmt
14 
15 #include <linux/errno.h>
16 #include <linux/types.h>
17 #include <linux/string.h>
18 #include <linux/net.h>
19 #include <linux/netdevice.h>
20 #include <linux/ipv6.h>
21 #include <linux/slab.h>
22 
23 #include <net/ipv6_frag.h>
24 
25 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
26 #include <linux/sysctl.h>
27 #include <linux/netfilter.h>
28 #include <linux/netfilter_ipv6.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
32 #include <net/netns/generic.h>
33 
34 static const char nf_frags_cache_name[] = "nf-frags";
35 
36 static unsigned int nf_frag_pernet_id __read_mostly;
37 static struct inet_frags nf_frags;
38 
nf_frag_pernet(struct net * net)39 static struct nft_ct_frag6_pernet *nf_frag_pernet(struct net *net)
40 {
41 	return net_generic(net, nf_frag_pernet_id);
42 }
43 
44 #ifdef CONFIG_SYSCTL
45 
46 static struct ctl_table nf_ct_frag6_sysctl_table[] = {
47 	{
48 		.procname	= "nf_conntrack_frag6_timeout",
49 		.maxlen		= sizeof(unsigned int),
50 		.mode		= 0644,
51 		.proc_handler	= proc_dointvec_jiffies,
52 	},
53 	{
54 		.procname	= "nf_conntrack_frag6_low_thresh",
55 		.maxlen		= sizeof(unsigned long),
56 		.mode		= 0644,
57 		.proc_handler	= proc_doulongvec_minmax,
58 	},
59 	{
60 		.procname	= "nf_conntrack_frag6_high_thresh",
61 		.maxlen		= sizeof(unsigned long),
62 		.mode		= 0644,
63 		.proc_handler	= proc_doulongvec_minmax,
64 	},
65 };
66 
nf_ct_frag6_sysctl_register(struct net * net)67 static int nf_ct_frag6_sysctl_register(struct net *net)
68 {
69 	struct nft_ct_frag6_pernet *nf_frag;
70 	struct ctl_table *table;
71 	struct ctl_table_header *hdr;
72 
73 	table = nf_ct_frag6_sysctl_table;
74 	if (!net_eq(net, &init_net)) {
75 		table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
76 				GFP_KERNEL);
77 		if (table == NULL)
78 			goto err_alloc;
79 	}
80 
81 	nf_frag = nf_frag_pernet(net);
82 
83 	table[0].data	= &nf_frag->fqdir->timeout;
84 	table[1].data	= &nf_frag->fqdir->low_thresh;
85 	table[1].extra2	= &nf_frag->fqdir->high_thresh;
86 	table[2].data	= &nf_frag->fqdir->high_thresh;
87 	table[2].extra1	= &nf_frag->fqdir->low_thresh;
88 
89 	hdr = register_net_sysctl_sz(net, "net/netfilter", table,
90 				     ARRAY_SIZE(nf_ct_frag6_sysctl_table));
91 	if (hdr == NULL)
92 		goto err_reg;
93 
94 	nf_frag->nf_frag_frags_hdr = hdr;
95 	return 0;
96 
97 err_reg:
98 	if (!net_eq(net, &init_net))
99 		kfree(table);
100 err_alloc:
101 	return -ENOMEM;
102 }
103 
nf_ct_frags6_sysctl_unregister(struct net * net)104 static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
105 {
106 	struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
107 	const struct ctl_table *table;
108 
109 	table = nf_frag->nf_frag_frags_hdr->ctl_table_arg;
110 	unregister_net_sysctl_table(nf_frag->nf_frag_frags_hdr);
111 	if (!net_eq(net, &init_net))
112 		kfree(table);
113 }
114 
115 #else
nf_ct_frag6_sysctl_register(struct net * net)116 static int nf_ct_frag6_sysctl_register(struct net *net)
117 {
118 	return 0;
119 }
nf_ct_frags6_sysctl_unregister(struct net * net)120 static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
121 {
122 }
123 #endif
124 
125 static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
126 			     struct sk_buff *prev_tail, struct net_device *dev);
127 
ip6_frag_ecn(const struct ipv6hdr * ipv6h)128 static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
129 {
130 	return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
131 }
132 
nf_ct_frag6_expire(struct timer_list * t)133 static void nf_ct_frag6_expire(struct timer_list *t)
134 {
135 	struct inet_frag_queue *frag = from_timer(frag, t, timer);
136 	struct frag_queue *fq;
137 
138 	fq = container_of(frag, struct frag_queue, q);
139 
140 	ip6frag_expire_frag_queue(fq->q.fqdir->net, fq);
141 }
142 
143 /* Creation primitives. */
fq_find(struct net * net,__be32 id,u32 user,const struct ipv6hdr * hdr,int iif)144 static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
145 				  const struct ipv6hdr *hdr, int iif)
146 {
147 	struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
148 	struct frag_v6_compare_key key = {
149 		.id = id,
150 		.saddr = hdr->saddr,
151 		.daddr = hdr->daddr,
152 		.user = user,
153 		.iif = iif,
154 	};
155 	struct inet_frag_queue *q;
156 
157 	if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
158 					    IPV6_ADDR_LINKLOCAL)))
159 		key.iif = 0;
160 
161 	q = inet_frag_find(nf_frag->fqdir, &key);
162 	if (!q)
163 		return NULL;
164 
165 	return container_of(q, struct frag_queue, q);
166 }
167 
168 
nf_ct_frag6_queue(struct frag_queue * fq,struct sk_buff * skb,const struct frag_hdr * fhdr,int nhoff)169 static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
170 			     const struct frag_hdr *fhdr, int nhoff)
171 {
172 	unsigned int payload_len;
173 	struct net_device *dev;
174 	struct sk_buff *prev;
175 	int offset, end, err;
176 	u8 ecn;
177 
178 	if (fq->q.flags & INET_FRAG_COMPLETE) {
179 		pr_debug("Already completed\n");
180 		goto err;
181 	}
182 
183 	payload_len = ntohs(ipv6_hdr(skb)->payload_len);
184 
185 	offset = ntohs(fhdr->frag_off) & ~0x7;
186 	end = offset + (payload_len -
187 			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
188 
189 	if ((unsigned int)end > IPV6_MAXPLEN) {
190 		pr_debug("offset is too large.\n");
191 		return -EINVAL;
192 	}
193 
194 	ecn = ip6_frag_ecn(ipv6_hdr(skb));
195 
196 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
197 		const unsigned char *nh = skb_network_header(skb);
198 		skb->csum = csum_sub(skb->csum,
199 				     csum_partial(nh, (u8 *)(fhdr + 1) - nh,
200 						  0));
201 	}
202 
203 	/* Is this the final fragment? */
204 	if (!(fhdr->frag_off & htons(IP6_MF))) {
205 		/* If we already have some bits beyond end
206 		 * or have different end, the segment is corrupted.
207 		 */
208 		if (end < fq->q.len ||
209 		    ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) {
210 			pr_debug("already received last fragment\n");
211 			goto err;
212 		}
213 		fq->q.flags |= INET_FRAG_LAST_IN;
214 		fq->q.len = end;
215 	} else {
216 		/* Check if the fragment is rounded to 8 bytes.
217 		 * Required by the RFC.
218 		 */
219 		if (end & 0x7) {
220 			/* RFC2460 says always send parameter problem in
221 			 * this case. -DaveM
222 			 */
223 			pr_debug("end of fragment not rounded to 8 bytes.\n");
224 			inet_frag_kill(&fq->q);
225 			return -EPROTO;
226 		}
227 		if (end > fq->q.len) {
228 			/* Some bits beyond end -> corruption. */
229 			if (fq->q.flags & INET_FRAG_LAST_IN) {
230 				pr_debug("last packet already reached.\n");
231 				goto err;
232 			}
233 			fq->q.len = end;
234 		}
235 	}
236 
237 	if (end == offset)
238 		goto err;
239 
240 	/* Point into the IP datagram 'data' part. */
241 	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
242 		pr_debug("queue: message is too short.\n");
243 		goto err;
244 	}
245 	if (pskb_trim_rcsum(skb, end - offset)) {
246 		pr_debug("Can't trim\n");
247 		goto err;
248 	}
249 
250 	/* Note : skb->rbnode and skb->dev share the same location. */
251 	dev = skb->dev;
252 	/* Makes sure compiler wont do silly aliasing games */
253 	barrier();
254 
255 	prev = fq->q.fragments_tail;
256 	err = inet_frag_queue_insert(&fq->q, skb, offset, end);
257 	if (err) {
258 		if (err == IPFRAG_DUP) {
259 			/* No error for duplicates, pretend they got queued. */
260 			kfree_skb_reason(skb, SKB_DROP_REASON_DUP_FRAG);
261 			return -EINPROGRESS;
262 		}
263 		goto insert_error;
264 	}
265 
266 	if (dev)
267 		fq->iif = dev->ifindex;
268 
269 	fq->q.stamp = skb->tstamp;
270 	fq->q.tstamp_type = skb->tstamp_type;
271 	fq->q.meat += skb->len;
272 	fq->ecn |= ecn;
273 	if (payload_len > fq->q.max_size)
274 		fq->q.max_size = payload_len;
275 	add_frag_mem_limit(fq->q.fqdir, skb->truesize);
276 
277 	/* The first fragment.
278 	 * nhoffset is obtained from the first fragment, of course.
279 	 */
280 	if (offset == 0) {
281 		fq->nhoffset = nhoff;
282 		fq->q.flags |= INET_FRAG_FIRST_IN;
283 	}
284 
285 	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
286 	    fq->q.meat == fq->q.len) {
287 		unsigned long orefdst = skb->_skb_refdst;
288 
289 		skb->_skb_refdst = 0UL;
290 		err = nf_ct_frag6_reasm(fq, skb, prev, dev);
291 		skb->_skb_refdst = orefdst;
292 
293 		/* After queue has assumed skb ownership, only 0 or
294 		 * -EINPROGRESS must be returned.
295 		 */
296 		return err ? -EINPROGRESS : 0;
297 	}
298 
299 	skb_dst_drop(skb);
300 	skb_orphan(skb);
301 	return -EINPROGRESS;
302 
303 insert_error:
304 	inet_frag_kill(&fq->q);
305 err:
306 	skb_dst_drop(skb);
307 	return -EINVAL;
308 }
309 
310 /*
311  *	Check if this packet is complete.
312  *
313  *	It is called with locked fq, and caller must check that
314  *	queue is eligible for reassembly i.e. it is not COMPLETE,
315  *	the last and the first frames arrived and all the bits are here.
316  */
nf_ct_frag6_reasm(struct frag_queue * fq,struct sk_buff * skb,struct sk_buff * prev_tail,struct net_device * dev)317 static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
318 			     struct sk_buff *prev_tail, struct net_device *dev)
319 {
320 	void *reasm_data;
321 	int payload_len;
322 	u8 ecn;
323 
324 	inet_frag_kill(&fq->q);
325 
326 	ecn = ip_frag_ecn_table[fq->ecn];
327 	if (unlikely(ecn == 0xff))
328 		goto err;
329 
330 	reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
331 	if (!reasm_data)
332 		goto err;
333 
334 	payload_len = -skb_network_offset(skb) -
335 		       sizeof(struct ipv6hdr) + fq->q.len -
336 		       sizeof(struct frag_hdr);
337 	if (payload_len > IPV6_MAXPLEN) {
338 		net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
339 				    payload_len);
340 		goto err;
341 	}
342 
343 	/* We have to remove fragment header from datagram and to relocate
344 	 * header in order to calculate ICV correctly. */
345 	skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
346 	memmove(skb->head + sizeof(struct frag_hdr), skb->head,
347 		(skb->data - skb->head) - sizeof(struct frag_hdr));
348 	skb->mac_header += sizeof(struct frag_hdr);
349 	skb->network_header += sizeof(struct frag_hdr);
350 
351 	skb_reset_transport_header(skb);
352 
353 	inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
354 
355 	skb->ignore_df = 1;
356 	skb->dev = dev;
357 	ipv6_hdr(skb)->payload_len = htons(payload_len);
358 	ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
359 	IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
360 	IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
361 
362 	/* Yes, and fold redundant checksum back. 8) */
363 	if (skb->ip_summed == CHECKSUM_COMPLETE)
364 		skb->csum = csum_partial(skb_network_header(skb),
365 					 skb_network_header_len(skb),
366 					 skb->csum);
367 
368 	fq->q.rb_fragments = RB_ROOT;
369 	fq->q.fragments_tail = NULL;
370 	fq->q.last_run_head = NULL;
371 
372 	return 0;
373 
374 err:
375 	inet_frag_kill(&fq->q);
376 	return -EINVAL;
377 }
378 
379 /*
380  * find the header just before Fragment Header.
381  *
382  * if success return 0 and set ...
383  * (*prevhdrp): the value of "Next Header Field" in the header
384  *		just before Fragment Header.
385  * (*prevhoff): the offset of "Next Header Field" in the header
386  *		just before Fragment Header.
387  * (*fhoff)   : the offset of Fragment Header.
388  *
389  * Based on ipv6_skip_hdr() in net/ipv6/exthdr.c
390  *
391  */
392 static int
find_prev_fhdr(struct sk_buff * skb,u8 * prevhdrp,int * prevhoff,int * fhoff)393 find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
394 {
395 	u8 nexthdr = ipv6_hdr(skb)->nexthdr;
396 	const int netoff = skb_network_offset(skb);
397 	u8 prev_nhoff = netoff + offsetof(struct ipv6hdr, nexthdr);
398 	int start = netoff + sizeof(struct ipv6hdr);
399 	int len = skb->len - start;
400 	u8 prevhdr = NEXTHDR_IPV6;
401 
402 	while (nexthdr != NEXTHDR_FRAGMENT) {
403 		struct ipv6_opt_hdr hdr;
404 		int hdrlen;
405 
406 		if (!ipv6_ext_hdr(nexthdr)) {
407 			return -1;
408 		}
409 		if (nexthdr == NEXTHDR_NONE) {
410 			pr_debug("next header is none\n");
411 			return -1;
412 		}
413 		if (len < (int)sizeof(struct ipv6_opt_hdr)) {
414 			pr_debug("too short\n");
415 			return -1;
416 		}
417 		if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
418 			BUG();
419 		if (nexthdr == NEXTHDR_AUTH)
420 			hdrlen = ipv6_authlen(&hdr);
421 		else
422 			hdrlen = ipv6_optlen(&hdr);
423 
424 		prevhdr = nexthdr;
425 		prev_nhoff = start;
426 
427 		nexthdr = hdr.nexthdr;
428 		len -= hdrlen;
429 		start += hdrlen;
430 	}
431 
432 	if (len < 0)
433 		return -1;
434 
435 	*prevhdrp = prevhdr;
436 	*prevhoff = prev_nhoff;
437 	*fhoff = start;
438 
439 	return 0;
440 }
441 
nf_ct_frag6_gather(struct net * net,struct sk_buff * skb,u32 user)442 int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
443 {
444 	u16 savethdr = skb->transport_header;
445 	u8 nexthdr = NEXTHDR_FRAGMENT;
446 	int fhoff, nhoff, ret;
447 	struct frag_hdr *fhdr;
448 	struct frag_queue *fq;
449 	struct ipv6hdr *hdr;
450 	u8 prevhdr;
451 
452 	/* Jumbo payload inhibits frag. header */
453 	if (ipv6_hdr(skb)->payload_len == 0) {
454 		pr_debug("payload len = 0\n");
455 		return 0;
456 	}
457 
458 	if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
459 		return 0;
460 
461 	/* Discard the first fragment if it does not include all headers
462 	 * RFC 8200, Section 4.5
463 	 */
464 	if (ipv6frag_thdr_truncated(skb, fhoff, &nexthdr)) {
465 		pr_debug("Drop incomplete fragment\n");
466 		return 0;
467 	}
468 
469 	if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
470 		return -ENOMEM;
471 
472 	skb_set_transport_header(skb, fhoff);
473 	hdr = ipv6_hdr(skb);
474 	fhdr = (struct frag_hdr *)skb_transport_header(skb);
475 
476 	fq = fq_find(net, fhdr->identification, user, hdr,
477 		     skb->dev ? skb->dev->ifindex : 0);
478 	if (fq == NULL) {
479 		pr_debug("Can't find and can't create new queue\n");
480 		return -ENOMEM;
481 	}
482 
483 	spin_lock_bh(&fq->q.lock);
484 
485 	ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
486 	if (ret == -EPROTO) {
487 		skb->transport_header = savethdr;
488 		ret = 0;
489 	}
490 
491 	spin_unlock_bh(&fq->q.lock);
492 	inet_frag_put(&fq->q);
493 	return ret;
494 }
495 EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
496 
nf_ct_net_init(struct net * net)497 static int nf_ct_net_init(struct net *net)
498 {
499 	struct nft_ct_frag6_pernet *nf_frag  = nf_frag_pernet(net);
500 	int res;
501 
502 	res = fqdir_init(&nf_frag->fqdir, &nf_frags, net);
503 	if (res < 0)
504 		return res;
505 
506 	nf_frag->fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH;
507 	nf_frag->fqdir->low_thresh = IPV6_FRAG_LOW_THRESH;
508 	nf_frag->fqdir->timeout = IPV6_FRAG_TIMEOUT;
509 
510 	res = nf_ct_frag6_sysctl_register(net);
511 	if (res < 0)
512 		fqdir_exit(nf_frag->fqdir);
513 	return res;
514 }
515 
nf_ct_net_pre_exit(struct net * net)516 static void nf_ct_net_pre_exit(struct net *net)
517 {
518 	struct nft_ct_frag6_pernet *nf_frag  = nf_frag_pernet(net);
519 
520 	fqdir_pre_exit(nf_frag->fqdir);
521 }
522 
nf_ct_net_exit(struct net * net)523 static void nf_ct_net_exit(struct net *net)
524 {
525 	struct nft_ct_frag6_pernet *nf_frag  = nf_frag_pernet(net);
526 
527 	nf_ct_frags6_sysctl_unregister(net);
528 	fqdir_exit(nf_frag->fqdir);
529 }
530 
531 static struct pernet_operations nf_ct_net_ops = {
532 	.init		= nf_ct_net_init,
533 	.pre_exit	= nf_ct_net_pre_exit,
534 	.exit		= nf_ct_net_exit,
535 	.id		= &nf_frag_pernet_id,
536 	.size		= sizeof(struct nft_ct_frag6_pernet),
537 };
538 
539 static const struct rhashtable_params nfct_rhash_params = {
540 	.head_offset		= offsetof(struct inet_frag_queue, node),
541 	.hashfn			= ip6frag_key_hashfn,
542 	.obj_hashfn		= ip6frag_obj_hashfn,
543 	.obj_cmpfn		= ip6frag_obj_cmpfn,
544 	.automatic_shrinking	= true,
545 };
546 
nf_ct_frag6_init(void)547 int nf_ct_frag6_init(void)
548 {
549 	int ret = 0;
550 
551 	nf_frags.constructor = ip6frag_init;
552 	nf_frags.destructor = NULL;
553 	nf_frags.qsize = sizeof(struct frag_queue);
554 	nf_frags.frag_expire = nf_ct_frag6_expire;
555 	nf_frags.frags_cache_name = nf_frags_cache_name;
556 	nf_frags.rhash_params = nfct_rhash_params;
557 	ret = inet_frags_init(&nf_frags);
558 	if (ret)
559 		goto out;
560 	ret = register_pernet_subsys(&nf_ct_net_ops);
561 	if (ret)
562 		inet_frags_fini(&nf_frags);
563 
564 out:
565 	return ret;
566 }
567 
nf_ct_frag6_cleanup(void)568 void nf_ct_frag6_cleanup(void)
569 {
570 	unregister_pernet_subsys(&nf_ct_net_ops);
571 	inet_frags_fini(&nf_frags);
572 }
573