1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   *	IPv6 fragment reassembly
4   *	Linux INET6 implementation
5   *
6   *	Authors:
7   *	Pedro Roque		<roque@di.fc.ul.pt>
8   *
9   *	Based on: net/ipv4/ip_fragment.c
10   */
11  
12  /*
13   *	Fixes:
14   *	Andi Kleen	Make it work with multiple hosts.
15   *			More RFC compliance.
16   *
17   *      Horst von Brand Add missing #include <linux/string.h>
18   *	Alexey Kuznetsov	SMP races, threading, cleanup.
19   *	Patrick McHardy		LRU queue of frag heads for evictor.
20   *	Mitsuru KANDA @USAGI	Register inet6_protocol{}.
21   *	David Stevens and
22   *	YOSHIFUJI,H. @USAGI	Always remove fragment header to
23   *				calculate ICV correctly.
24   */
25  
26  #define pr_fmt(fmt) "IPv6: " fmt
27  
28  #include <linux/errno.h>
29  #include <linux/types.h>
30  #include <linux/string.h>
31  #include <linux/socket.h>
32  #include <linux/sockios.h>
33  #include <linux/jiffies.h>
34  #include <linux/net.h>
35  #include <linux/list.h>
36  #include <linux/netdevice.h>
37  #include <linux/in6.h>
38  #include <linux/ipv6.h>
39  #include <linux/icmpv6.h>
40  #include <linux/random.h>
41  #include <linux/jhash.h>
42  #include <linux/skbuff.h>
43  #include <linux/slab.h>
44  #include <linux/export.h>
45  #include <linux/tcp.h>
46  #include <linux/udp.h>
47  
48  #include <net/sock.h>
49  #include <net/snmp.h>
50  
51  #include <net/ipv6.h>
52  #include <net/ip6_route.h>
53  #include <net/protocol.h>
54  #include <net/transp_v6.h>
55  #include <net/rawv6.h>
56  #include <net/ndisc.h>
57  #include <net/addrconf.h>
58  #include <net/ipv6_frag.h>
59  #include <net/inet_ecn.h>
60  
61  static const char ip6_frag_cache_name[] = "ip6-frags";
62  
ip6_frag_ecn(const struct ipv6hdr * ipv6h)63  static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
64  {
65  	return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
66  }
67  
68  static struct inet_frags ip6_frags;
69  
70  static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
71  			  struct sk_buff *prev_tail, struct net_device *dev);
72  
ip6_frag_expire(struct timer_list * t)73  static void ip6_frag_expire(struct timer_list *t)
74  {
75  	struct inet_frag_queue *frag = from_timer(frag, t, timer);
76  	struct frag_queue *fq;
77  
78  	fq = container_of(frag, struct frag_queue, q);
79  
80  	ip6frag_expire_frag_queue(fq->q.fqdir->net, fq);
81  }
82  
83  static struct frag_queue *
fq_find(struct net * net,__be32 id,const struct ipv6hdr * hdr,int iif)84  fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif)
85  {
86  	struct frag_v6_compare_key key = {
87  		.id = id,
88  		.saddr = hdr->saddr,
89  		.daddr = hdr->daddr,
90  		.user = IP6_DEFRAG_LOCAL_DELIVER,
91  		.iif = iif,
92  	};
93  	struct inet_frag_queue *q;
94  
95  	if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
96  					    IPV6_ADDR_LINKLOCAL)))
97  		key.iif = 0;
98  
99  	q = inet_frag_find(net->ipv6.fqdir, &key);
100  	if (!q)
101  		return NULL;
102  
103  	return container_of(q, struct frag_queue, q);
104  }
105  
ip6_frag_queue(struct frag_queue * fq,struct sk_buff * skb,struct frag_hdr * fhdr,int nhoff,u32 * prob_offset)106  static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
107  			  struct frag_hdr *fhdr, int nhoff,
108  			  u32 *prob_offset)
109  {
110  	struct net *net = dev_net(skb_dst(skb)->dev);
111  	int offset, end, fragsize;
112  	struct sk_buff *prev_tail;
113  	struct net_device *dev;
114  	int err = -ENOENT;
115  	SKB_DR(reason);
116  	u8 ecn;
117  
118  	/* If reassembly is already done, @skb must be a duplicate frag. */
119  	if (fq->q.flags & INET_FRAG_COMPLETE) {
120  		SKB_DR_SET(reason, DUP_FRAG);
121  		goto err;
122  	}
123  
124  	err = -EINVAL;
125  	offset = ntohs(fhdr->frag_off) & ~0x7;
126  	end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
127  			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
128  
129  	if ((unsigned int)end > IPV6_MAXPLEN) {
130  		*prob_offset = (u8 *)&fhdr->frag_off - skb_network_header(skb);
131  		/* note that if prob_offset is set, the skb is freed elsewhere,
132  		 * we do not free it here.
133  		 */
134  		return -1;
135  	}
136  
137  	ecn = ip6_frag_ecn(ipv6_hdr(skb));
138  
139  	if (skb->ip_summed == CHECKSUM_COMPLETE) {
140  		const unsigned char *nh = skb_network_header(skb);
141  		skb->csum = csum_sub(skb->csum,
142  				     csum_partial(nh, (u8 *)(fhdr + 1) - nh,
143  						  0));
144  	}
145  
146  	/* Is this the final fragment? */
147  	if (!(fhdr->frag_off & htons(IP6_MF))) {
148  		/* If we already have some bits beyond end
149  		 * or have different end, the segment is corrupted.
150  		 */
151  		if (end < fq->q.len ||
152  		    ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
153  			goto discard_fq;
154  		fq->q.flags |= INET_FRAG_LAST_IN;
155  		fq->q.len = end;
156  	} else {
157  		/* Check if the fragment is rounded to 8 bytes.
158  		 * Required by the RFC.
159  		 */
160  		if (end & 0x7) {
161  			/* RFC2460 says always send parameter problem in
162  			 * this case. -DaveM
163  			 */
164  			*prob_offset = offsetof(struct ipv6hdr, payload_len);
165  			return -1;
166  		}
167  		if (end > fq->q.len) {
168  			/* Some bits beyond end -> corruption. */
169  			if (fq->q.flags & INET_FRAG_LAST_IN)
170  				goto discard_fq;
171  			fq->q.len = end;
172  		}
173  	}
174  
175  	if (end == offset)
176  		goto discard_fq;
177  
178  	err = -ENOMEM;
179  	/* Point into the IP datagram 'data' part. */
180  	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
181  		goto discard_fq;
182  
183  	err = pskb_trim_rcsum(skb, end - offset);
184  	if (err)
185  		goto discard_fq;
186  
187  	/* Note : skb->rbnode and skb->dev share the same location. */
188  	dev = skb->dev;
189  	/* Makes sure compiler wont do silly aliasing games */
190  	barrier();
191  
192  	prev_tail = fq->q.fragments_tail;
193  	err = inet_frag_queue_insert(&fq->q, skb, offset, end);
194  	if (err)
195  		goto insert_error;
196  
197  	if (dev)
198  		fq->iif = dev->ifindex;
199  
200  	fq->q.stamp = skb->tstamp;
201  	fq->q.tstamp_type = skb->tstamp_type;
202  	fq->q.meat += skb->len;
203  	fq->ecn |= ecn;
204  	add_frag_mem_limit(fq->q.fqdir, skb->truesize);
205  
206  	fragsize = -skb_network_offset(skb) + skb->len;
207  	if (fragsize > fq->q.max_size)
208  		fq->q.max_size = fragsize;
209  
210  	/* The first fragment.
211  	 * nhoffset is obtained from the first fragment, of course.
212  	 */
213  	if (offset == 0) {
214  		fq->nhoffset = nhoff;
215  		fq->q.flags |= INET_FRAG_FIRST_IN;
216  	}
217  
218  	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
219  	    fq->q.meat == fq->q.len) {
220  		unsigned long orefdst = skb->_skb_refdst;
221  
222  		skb->_skb_refdst = 0UL;
223  		err = ip6_frag_reasm(fq, skb, prev_tail, dev);
224  		skb->_skb_refdst = orefdst;
225  		return err;
226  	}
227  
228  	skb_dst_drop(skb);
229  	return -EINPROGRESS;
230  
231  insert_error:
232  	if (err == IPFRAG_DUP) {
233  		SKB_DR_SET(reason, DUP_FRAG);
234  		err = -EINVAL;
235  		goto err;
236  	}
237  	err = -EINVAL;
238  	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
239  			IPSTATS_MIB_REASM_OVERLAPS);
240  discard_fq:
241  	inet_frag_kill(&fq->q);
242  	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
243  			IPSTATS_MIB_REASMFAILS);
244  err:
245  	kfree_skb_reason(skb, reason);
246  	return err;
247  }
248  
249  /*
250   *	Check if this packet is complete.
251   *
252   *	It is called with locked fq, and caller must check that
253   *	queue is eligible for reassembly i.e. it is not COMPLETE,
254   *	the last and the first frames arrived and all the bits are here.
255   */
ip6_frag_reasm(struct frag_queue * fq,struct sk_buff * skb,struct sk_buff * prev_tail,struct net_device * dev)256  static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
257  			  struct sk_buff *prev_tail, struct net_device *dev)
258  {
259  	struct net *net = fq->q.fqdir->net;
260  	unsigned int nhoff;
261  	void *reasm_data;
262  	int payload_len;
263  	u8 ecn;
264  
265  	inet_frag_kill(&fq->q);
266  
267  	ecn = ip_frag_ecn_table[fq->ecn];
268  	if (unlikely(ecn == 0xff))
269  		goto out_fail;
270  
271  	reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
272  	if (!reasm_data)
273  		goto out_oom;
274  
275  	payload_len = -skb_network_offset(skb) -
276  		       sizeof(struct ipv6hdr) + fq->q.len -
277  		       sizeof(struct frag_hdr);
278  	if (payload_len > IPV6_MAXPLEN)
279  		goto out_oversize;
280  
281  	/* We have to remove fragment header from datagram and to relocate
282  	 * header in order to calculate ICV correctly. */
283  	nhoff = fq->nhoffset;
284  	skb_network_header(skb)[nhoff] = skb_transport_header(skb)[0];
285  	memmove(skb->head + sizeof(struct frag_hdr), skb->head,
286  		(skb->data - skb->head) - sizeof(struct frag_hdr));
287  	if (skb_mac_header_was_set(skb))
288  		skb->mac_header += sizeof(struct frag_hdr);
289  	skb->network_header += sizeof(struct frag_hdr);
290  
291  	skb_reset_transport_header(skb);
292  
293  	inet_frag_reasm_finish(&fq->q, skb, reasm_data, true);
294  
295  	skb->dev = dev;
296  	ipv6_hdr(skb)->payload_len = htons(payload_len);
297  	ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
298  	IP6CB(skb)->nhoff = nhoff;
299  	IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
300  	IP6CB(skb)->frag_max_size = fq->q.max_size;
301  
302  	/* Yes, and fold redundant checksum back. 8) */
303  	skb_postpush_rcsum(skb, skb_network_header(skb),
304  			   skb_network_header_len(skb));
305  
306  	rcu_read_lock();
307  	__IP6_INC_STATS(net, __in6_dev_stats_get(dev, skb), IPSTATS_MIB_REASMOKS);
308  	rcu_read_unlock();
309  	fq->q.rb_fragments = RB_ROOT;
310  	fq->q.fragments_tail = NULL;
311  	fq->q.last_run_head = NULL;
312  	return 1;
313  
314  out_oversize:
315  	net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len);
316  	goto out_fail;
317  out_oom:
318  	net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
319  out_fail:
320  	rcu_read_lock();
321  	__IP6_INC_STATS(net, __in6_dev_stats_get(dev, skb), IPSTATS_MIB_REASMFAILS);
322  	rcu_read_unlock();
323  	inet_frag_kill(&fq->q);
324  	return -1;
325  }
326  
ipv6_frag_rcv(struct sk_buff * skb)327  static int ipv6_frag_rcv(struct sk_buff *skb)
328  {
329  	struct frag_hdr *fhdr;
330  	struct frag_queue *fq;
331  	const struct ipv6hdr *hdr = ipv6_hdr(skb);
332  	struct net *net = dev_net(skb_dst(skb)->dev);
333  	u8 nexthdr;
334  	int iif;
335  
336  	if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
337  		goto fail_hdr;
338  
339  	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
340  
341  	/* Jumbo payload inhibits frag. header */
342  	if (hdr->payload_len == 0)
343  		goto fail_hdr;
344  
345  	if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
346  				 sizeof(struct frag_hdr))))
347  		goto fail_hdr;
348  
349  	hdr = ipv6_hdr(skb);
350  	fhdr = (struct frag_hdr *)skb_transport_header(skb);
351  
352  	if (!(fhdr->frag_off & htons(IP6_OFFSET | IP6_MF))) {
353  		/* It is not a fragmented frame */
354  		skb->transport_header += sizeof(struct frag_hdr);
355  		__IP6_INC_STATS(net,
356  				ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
357  
358  		IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
359  		IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
360  		IP6CB(skb)->frag_max_size = ntohs(hdr->payload_len) +
361  					    sizeof(struct ipv6hdr);
362  		return 1;
363  	}
364  
365  	/* RFC 8200, Section 4.5 Fragment Header:
366  	 * If the first fragment does not include all headers through an
367  	 * Upper-Layer header, then that fragment should be discarded and
368  	 * an ICMP Parameter Problem, Code 3, message should be sent to
369  	 * the source of the fragment, with the Pointer field set to zero.
370  	 */
371  	nexthdr = hdr->nexthdr;
372  	if (ipv6frag_thdr_truncated(skb, skb_network_offset(skb) + sizeof(struct ipv6hdr), &nexthdr)) {
373  		__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
374  				IPSTATS_MIB_INHDRERRORS);
375  		icmpv6_param_prob(skb, ICMPV6_HDR_INCOMP, 0);
376  		return -1;
377  	}
378  
379  	iif = skb->dev ? skb->dev->ifindex : 0;
380  	fq = fq_find(net, fhdr->identification, hdr, iif);
381  	if (fq) {
382  		u32 prob_offset = 0;
383  		int ret;
384  
385  		spin_lock(&fq->q.lock);
386  
387  		fq->iif = iif;
388  		ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff,
389  				     &prob_offset);
390  
391  		spin_unlock(&fq->q.lock);
392  		inet_frag_put(&fq->q);
393  		if (prob_offset) {
394  			__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
395  					IPSTATS_MIB_INHDRERRORS);
396  			/* icmpv6_param_prob() calls kfree_skb(skb) */
397  			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, prob_offset);
398  		}
399  		return ret;
400  	}
401  
402  	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
403  	kfree_skb(skb);
404  	return -1;
405  
406  fail_hdr:
407  	__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
408  			IPSTATS_MIB_INHDRERRORS);
409  	icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
410  	return -1;
411  }
412  
413  static const struct inet6_protocol frag_protocol = {
414  	.handler	=	ipv6_frag_rcv,
415  	.flags		=	INET6_PROTO_NOPOLICY,
416  };
417  
418  #ifdef CONFIG_SYSCTL
419  
420  static struct ctl_table ip6_frags_ns_ctl_table[] = {
421  	{
422  		.procname	= "ip6frag_high_thresh",
423  		.maxlen		= sizeof(unsigned long),
424  		.mode		= 0644,
425  		.proc_handler	= proc_doulongvec_minmax,
426  	},
427  	{
428  		.procname	= "ip6frag_low_thresh",
429  		.maxlen		= sizeof(unsigned long),
430  		.mode		= 0644,
431  		.proc_handler	= proc_doulongvec_minmax,
432  	},
433  	{
434  		.procname	= "ip6frag_time",
435  		.maxlen		= sizeof(int),
436  		.mode		= 0644,
437  		.proc_handler	= proc_dointvec_jiffies,
438  	},
439  };
440  
441  /* secret interval has been deprecated */
442  static int ip6_frags_secret_interval_unused;
443  static struct ctl_table ip6_frags_ctl_table[] = {
444  	{
445  		.procname	= "ip6frag_secret_interval",
446  		.data		= &ip6_frags_secret_interval_unused,
447  		.maxlen		= sizeof(int),
448  		.mode		= 0644,
449  		.proc_handler	= proc_dointvec_jiffies,
450  	},
451  };
452  
ip6_frags_ns_sysctl_register(struct net * net)453  static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
454  {
455  	struct ctl_table *table;
456  	struct ctl_table_header *hdr;
457  
458  	table = ip6_frags_ns_ctl_table;
459  	if (!net_eq(net, &init_net)) {
460  		table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
461  		if (!table)
462  			goto err_alloc;
463  
464  	}
465  	table[0].data	= &net->ipv6.fqdir->high_thresh;
466  	table[0].extra1	= &net->ipv6.fqdir->low_thresh;
467  	table[1].data	= &net->ipv6.fqdir->low_thresh;
468  	table[1].extra2	= &net->ipv6.fqdir->high_thresh;
469  	table[2].data	= &net->ipv6.fqdir->timeout;
470  
471  	hdr = register_net_sysctl_sz(net, "net/ipv6", table,
472  				     ARRAY_SIZE(ip6_frags_ns_ctl_table));
473  	if (!hdr)
474  		goto err_reg;
475  
476  	net->ipv6.sysctl.frags_hdr = hdr;
477  	return 0;
478  
479  err_reg:
480  	if (!net_eq(net, &init_net))
481  		kfree(table);
482  err_alloc:
483  	return -ENOMEM;
484  }
485  
ip6_frags_ns_sysctl_unregister(struct net * net)486  static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
487  {
488  	const struct ctl_table *table;
489  
490  	table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
491  	unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
492  	if (!net_eq(net, &init_net))
493  		kfree(table);
494  }
495  
496  static struct ctl_table_header *ip6_ctl_header;
497  
ip6_frags_sysctl_register(void)498  static int ip6_frags_sysctl_register(void)
499  {
500  	ip6_ctl_header = register_net_sysctl(&init_net, "net/ipv6",
501  			ip6_frags_ctl_table);
502  	return ip6_ctl_header == NULL ? -ENOMEM : 0;
503  }
504  
ip6_frags_sysctl_unregister(void)505  static void ip6_frags_sysctl_unregister(void)
506  {
507  	unregister_net_sysctl_table(ip6_ctl_header);
508  }
509  #else
ip6_frags_ns_sysctl_register(struct net * net)510  static int ip6_frags_ns_sysctl_register(struct net *net)
511  {
512  	return 0;
513  }
514  
ip6_frags_ns_sysctl_unregister(struct net * net)515  static void ip6_frags_ns_sysctl_unregister(struct net *net)
516  {
517  }
518  
ip6_frags_sysctl_register(void)519  static int ip6_frags_sysctl_register(void)
520  {
521  	return 0;
522  }
523  
ip6_frags_sysctl_unregister(void)524  static void ip6_frags_sysctl_unregister(void)
525  {
526  }
527  #endif
528  
ipv6_frags_init_net(struct net * net)529  static int __net_init ipv6_frags_init_net(struct net *net)
530  {
531  	int res;
532  
533  	res = fqdir_init(&net->ipv6.fqdir, &ip6_frags, net);
534  	if (res < 0)
535  		return res;
536  
537  	net->ipv6.fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH;
538  	net->ipv6.fqdir->low_thresh = IPV6_FRAG_LOW_THRESH;
539  	net->ipv6.fqdir->timeout = IPV6_FRAG_TIMEOUT;
540  
541  	res = ip6_frags_ns_sysctl_register(net);
542  	if (res < 0)
543  		fqdir_exit(net->ipv6.fqdir);
544  	return res;
545  }
546  
ipv6_frags_pre_exit_net(struct net * net)547  static void __net_exit ipv6_frags_pre_exit_net(struct net *net)
548  {
549  	fqdir_pre_exit(net->ipv6.fqdir);
550  }
551  
ipv6_frags_exit_net(struct net * net)552  static void __net_exit ipv6_frags_exit_net(struct net *net)
553  {
554  	ip6_frags_ns_sysctl_unregister(net);
555  	fqdir_exit(net->ipv6.fqdir);
556  }
557  
558  static struct pernet_operations ip6_frags_ops = {
559  	.init		= ipv6_frags_init_net,
560  	.pre_exit	= ipv6_frags_pre_exit_net,
561  	.exit		= ipv6_frags_exit_net,
562  };
563  
564  static const struct rhashtable_params ip6_rhash_params = {
565  	.head_offset		= offsetof(struct inet_frag_queue, node),
566  	.hashfn			= ip6frag_key_hashfn,
567  	.obj_hashfn		= ip6frag_obj_hashfn,
568  	.obj_cmpfn		= ip6frag_obj_cmpfn,
569  	.automatic_shrinking	= true,
570  };
571  
ipv6_frag_init(void)572  int __init ipv6_frag_init(void)
573  {
574  	int ret;
575  
576  	ip6_frags.constructor = ip6frag_init;
577  	ip6_frags.destructor = NULL;
578  	ip6_frags.qsize = sizeof(struct frag_queue);
579  	ip6_frags.frag_expire = ip6_frag_expire;
580  	ip6_frags.frags_cache_name = ip6_frag_cache_name;
581  	ip6_frags.rhash_params = ip6_rhash_params;
582  	ret = inet_frags_init(&ip6_frags);
583  	if (ret)
584  		goto out;
585  
586  	ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
587  	if (ret)
588  		goto err_protocol;
589  
590  	ret = ip6_frags_sysctl_register();
591  	if (ret)
592  		goto err_sysctl;
593  
594  	ret = register_pernet_subsys(&ip6_frags_ops);
595  	if (ret)
596  		goto err_pernet;
597  
598  out:
599  	return ret;
600  
601  err_pernet:
602  	ip6_frags_sysctl_unregister();
603  err_sysctl:
604  	inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
605  err_protocol:
606  	inet_frags_fini(&ip6_frags);
607  	goto out;
608  }
609  
ipv6_frag_exit(void)610  void ipv6_frag_exit(void)
611  {
612  	ip6_frags_sysctl_unregister();
613  	unregister_pernet_subsys(&ip6_frags_ops);
614  	inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
615  	inet_frags_fini(&ip6_frags);
616  }
617