1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	TCP over IPv6
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *
9  *	Based on:
10  *	linux/net/ipv4/tcp.c
11  *	linux/net/ipv4/tcp_input.c
12  *	linux/net/ipv4/tcp_output.c
13  *
14  *	Fixes:
15  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
16  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
17  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
18  *					a single port at the same time.
19  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
20  */
21 
22 #include <linux/bottom_half.h>
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/types.h>
26 #include <linux/socket.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/jiffies.h>
30 #include <linux/in.h>
31 #include <linux/in6.h>
32 #include <linux/netdevice.h>
33 #include <linux/init.h>
34 #include <linux/jhash.h>
35 #include <linux/ipsec.h>
36 #include <linux/times.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
39 #include <linux/ipv6.h>
40 #include <linux/icmpv6.h>
41 #include <linux/random.h>
42 #include <linux/indirect_call_wrapper.h>
43 
44 #include <net/tcp.h>
45 #include <net/ndisc.h>
46 #include <net/inet6_hashtables.h>
47 #include <net/inet6_connection_sock.h>
48 #include <net/ipv6.h>
49 #include <net/transp_v6.h>
50 #include <net/addrconf.h>
51 #include <net/ip6_route.h>
52 #include <net/ip6_checksum.h>
53 #include <net/inet_ecn.h>
54 #include <net/protocol.h>
55 #include <net/xfrm.h>
56 #include <net/snmp.h>
57 #include <net/dsfield.h>
58 #include <net/timewait_sock.h>
59 #include <net/inet_common.h>
60 #include <net/secure_seq.h>
61 #include <net/hotdata.h>
62 #include <net/busy_poll.h>
63 #include <net/rstreason.h>
64 
65 #include <linux/proc_fs.h>
66 #include <linux/seq_file.h>
67 
68 #include <crypto/hash.h>
69 #include <linux/scatterlist.h>
70 
71 #include <trace/events/tcp.h>
72 
73 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb,
74 			      enum sk_rst_reason reason);
75 static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
76 				      struct request_sock *req);
77 
78 INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 
80 static const struct inet_connection_sock_af_ops ipv6_mapped;
81 const struct inet_connection_sock_af_ops ipv6_specific;
82 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 #endif
86 
87 /* Helper returning the inet6 address from a given tcp socket.
88  * It can be used in TCP stack instead of inet6_sk(sk).
89  * This avoids a dereference and allow compiler optimizations.
90  * It is a specialized version of inet6_sk_generic().
91  */
92 #define tcp_inet6_sk(sk) (&container_of_const(tcp_sk(sk), \
93 					      struct tcp6_sock, tcp)->inet6)
94 
inet6_sk_rx_dst_set(struct sock * sk,const struct sk_buff * skb)95 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
96 {
97 	struct dst_entry *dst = skb_dst(skb);
98 
99 	if (dst && dst_hold_safe(dst)) {
100 		rcu_assign_pointer(sk->sk_rx_dst, dst);
101 		sk->sk_rx_dst_ifindex = skb->skb_iif;
102 		sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
103 	}
104 }
105 
tcp_v6_init_seq(const struct sk_buff * skb)106 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
107 {
108 	return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
109 				ipv6_hdr(skb)->saddr.s6_addr32,
110 				tcp_hdr(skb)->dest,
111 				tcp_hdr(skb)->source);
112 }
113 
tcp_v6_init_ts_off(const struct net * net,const struct sk_buff * skb)114 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
115 {
116 	return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
117 				   ipv6_hdr(skb)->saddr.s6_addr32);
118 }
119 
tcp_v6_pre_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)120 static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
121 			      int addr_len)
122 {
123 	/* This check is replicated from tcp_v6_connect() and intended to
124 	 * prevent BPF program called below from accessing bytes that are out
125 	 * of the bound specified by user in addr_len.
126 	 */
127 	if (addr_len < SIN6_LEN_RFC2133)
128 		return -EINVAL;
129 
130 	sock_owned_by_me(sk);
131 
132 	return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, &addr_len);
133 }
134 
tcp_v6_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)135 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
136 			  int addr_len)
137 {
138 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
139 	struct inet_connection_sock *icsk = inet_csk(sk);
140 	struct in6_addr *saddr = NULL, *final_p, final;
141 	struct inet_timewait_death_row *tcp_death_row;
142 	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
143 	struct inet_sock *inet = inet_sk(sk);
144 	struct tcp_sock *tp = tcp_sk(sk);
145 	struct net *net = sock_net(sk);
146 	struct ipv6_txoptions *opt;
147 	struct dst_entry *dst;
148 	struct flowi6 fl6;
149 	int addr_type;
150 	int err;
151 
152 	if (addr_len < SIN6_LEN_RFC2133)
153 		return -EINVAL;
154 
155 	if (usin->sin6_family != AF_INET6)
156 		return -EAFNOSUPPORT;
157 
158 	memset(&fl6, 0, sizeof(fl6));
159 
160 	if (inet6_test_bit(SNDFLOW, sk)) {
161 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
162 		IP6_ECN_flow_init(fl6.flowlabel);
163 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
164 			struct ip6_flowlabel *flowlabel;
165 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
166 			if (IS_ERR(flowlabel))
167 				return -EINVAL;
168 			fl6_sock_release(flowlabel);
169 		}
170 	}
171 
172 	/*
173 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
174 	 */
175 
176 	if (ipv6_addr_any(&usin->sin6_addr)) {
177 		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
178 			ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
179 					       &usin->sin6_addr);
180 		else
181 			usin->sin6_addr = in6addr_loopback;
182 	}
183 
184 	addr_type = ipv6_addr_type(&usin->sin6_addr);
185 
186 	if (addr_type & IPV6_ADDR_MULTICAST)
187 		return -ENETUNREACH;
188 
189 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
190 		if (addr_len >= sizeof(struct sockaddr_in6) &&
191 		    usin->sin6_scope_id) {
192 			/* If interface is set while binding, indices
193 			 * must coincide.
194 			 */
195 			if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
196 				return -EINVAL;
197 
198 			sk->sk_bound_dev_if = usin->sin6_scope_id;
199 		}
200 
201 		/* Connect to link-local address requires an interface */
202 		if (!sk->sk_bound_dev_if)
203 			return -EINVAL;
204 	}
205 
206 	if (tp->rx_opt.ts_recent_stamp &&
207 	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
208 		tp->rx_opt.ts_recent = 0;
209 		tp->rx_opt.ts_recent_stamp = 0;
210 		WRITE_ONCE(tp->write_seq, 0);
211 	}
212 
213 	sk->sk_v6_daddr = usin->sin6_addr;
214 	np->flow_label = fl6.flowlabel;
215 
216 	/*
217 	 *	TCP over IPv4
218 	 */
219 
220 	if (addr_type & IPV6_ADDR_MAPPED) {
221 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
222 		struct sockaddr_in sin;
223 
224 		if (ipv6_only_sock(sk))
225 			return -ENETUNREACH;
226 
227 		sin.sin_family = AF_INET;
228 		sin.sin_port = usin->sin6_port;
229 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
230 
231 		/* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
232 		WRITE_ONCE(icsk->icsk_af_ops, &ipv6_mapped);
233 		if (sk_is_mptcp(sk))
234 			mptcpv6_handle_mapped(sk, true);
235 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
236 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
237 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
238 #endif
239 
240 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
241 
242 		if (err) {
243 			icsk->icsk_ext_hdr_len = exthdrlen;
244 			/* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
245 			WRITE_ONCE(icsk->icsk_af_ops, &ipv6_specific);
246 			if (sk_is_mptcp(sk))
247 				mptcpv6_handle_mapped(sk, false);
248 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
249 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
250 			tp->af_specific = &tcp_sock_ipv6_specific;
251 #endif
252 			goto failure;
253 		}
254 		np->saddr = sk->sk_v6_rcv_saddr;
255 
256 		return err;
257 	}
258 
259 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
260 		saddr = &sk->sk_v6_rcv_saddr;
261 
262 	fl6.flowi6_proto = IPPROTO_TCP;
263 	fl6.daddr = sk->sk_v6_daddr;
264 	fl6.saddr = saddr ? *saddr : np->saddr;
265 	fl6.flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
266 	fl6.flowi6_oif = sk->sk_bound_dev_if;
267 	fl6.flowi6_mark = sk->sk_mark;
268 	fl6.fl6_dport = usin->sin6_port;
269 	fl6.fl6_sport = inet->inet_sport;
270 	fl6.flowi6_uid = sk->sk_uid;
271 
272 	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
273 	final_p = fl6_update_dst(&fl6, opt, &final);
274 
275 	security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
276 
277 	dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p);
278 	if (IS_ERR(dst)) {
279 		err = PTR_ERR(dst);
280 		goto failure;
281 	}
282 
283 	tp->tcp_usec_ts = dst_tcp_usec_ts(dst);
284 	tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
285 
286 	if (!saddr) {
287 		saddr = &fl6.saddr;
288 
289 		err = inet_bhash2_update_saddr(sk, saddr, AF_INET6);
290 		if (err)
291 			goto failure;
292 	}
293 
294 	/* set the source address */
295 	np->saddr = *saddr;
296 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
297 
298 	sk->sk_gso_type = SKB_GSO_TCPV6;
299 	ip6_dst_store(sk, dst, NULL, NULL);
300 
301 	icsk->icsk_ext_hdr_len = 0;
302 	if (opt)
303 		icsk->icsk_ext_hdr_len = opt->opt_flen +
304 					 opt->opt_nflen;
305 
306 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
307 
308 	inet->inet_dport = usin->sin6_port;
309 
310 	tcp_set_state(sk, TCP_SYN_SENT);
311 	err = inet6_hash_connect(tcp_death_row, sk);
312 	if (err)
313 		goto late_failure;
314 
315 	sk_set_txhash(sk);
316 
317 	if (likely(!tp->repair)) {
318 		if (!tp->write_seq)
319 			WRITE_ONCE(tp->write_seq,
320 				   secure_tcpv6_seq(np->saddr.s6_addr32,
321 						    sk->sk_v6_daddr.s6_addr32,
322 						    inet->inet_sport,
323 						    inet->inet_dport));
324 		tp->tsoffset = secure_tcpv6_ts_off(net, np->saddr.s6_addr32,
325 						   sk->sk_v6_daddr.s6_addr32);
326 	}
327 
328 	if (tcp_fastopen_defer_connect(sk, &err))
329 		return err;
330 	if (err)
331 		goto late_failure;
332 
333 	err = tcp_connect(sk);
334 	if (err)
335 		goto late_failure;
336 
337 	return 0;
338 
339 late_failure:
340 	tcp_set_state(sk, TCP_CLOSE);
341 	inet_bhash2_reset_saddr(sk);
342 failure:
343 	inet->inet_dport = 0;
344 	sk->sk_route_caps = 0;
345 	return err;
346 }
347 
tcp_v6_mtu_reduced(struct sock * sk)348 static void tcp_v6_mtu_reduced(struct sock *sk)
349 {
350 	struct dst_entry *dst;
351 	u32 mtu;
352 
353 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
354 		return;
355 
356 	mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
357 
358 	/* Drop requests trying to increase our current mss.
359 	 * Check done in __ip6_rt_update_pmtu() is too late.
360 	 */
361 	if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
362 		return;
363 
364 	dst = inet6_csk_update_pmtu(sk, mtu);
365 	if (!dst)
366 		return;
367 
368 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
369 		tcp_sync_mss(sk, dst_mtu(dst));
370 		tcp_simple_retransmit(sk);
371 	}
372 }
373 
tcp_v6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)374 static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
375 		u8 type, u8 code, int offset, __be32 info)
376 {
377 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
378 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
379 	struct net *net = dev_net(skb->dev);
380 	struct request_sock *fastopen;
381 	struct ipv6_pinfo *np;
382 	struct tcp_sock *tp;
383 	__u32 seq, snd_una;
384 	struct sock *sk;
385 	bool fatal;
386 	int err;
387 
388 	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
389 					&hdr->daddr, th->dest,
390 					&hdr->saddr, ntohs(th->source),
391 					skb->dev->ifindex, inet6_sdif(skb));
392 
393 	if (!sk) {
394 		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
395 				  ICMP6_MIB_INERRORS);
396 		return -ENOENT;
397 	}
398 
399 	if (sk->sk_state == TCP_TIME_WAIT) {
400 		/* To increase the counter of ignored icmps for TCP-AO */
401 		tcp_ao_ignore_icmp(sk, AF_INET6, type, code);
402 		inet_twsk_put(inet_twsk(sk));
403 		return 0;
404 	}
405 	seq = ntohl(th->seq);
406 	fatal = icmpv6_err_convert(type, code, &err);
407 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
408 		tcp_req_err(sk, seq, fatal);
409 		return 0;
410 	}
411 
412 	if (tcp_ao_ignore_icmp(sk, AF_INET6, type, code)) {
413 		sock_put(sk);
414 		return 0;
415 	}
416 
417 	bh_lock_sock(sk);
418 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
419 		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
420 
421 	if (sk->sk_state == TCP_CLOSE)
422 		goto out;
423 
424 	if (static_branch_unlikely(&ip6_min_hopcount)) {
425 		/* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
426 		if (ipv6_hdr(skb)->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) {
427 			__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
428 			goto out;
429 		}
430 	}
431 
432 	tp = tcp_sk(sk);
433 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
434 	fastopen = rcu_dereference(tp->fastopen_rsk);
435 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
436 	if (sk->sk_state != TCP_LISTEN &&
437 	    !between(seq, snd_una, tp->snd_nxt)) {
438 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
439 		goto out;
440 	}
441 
442 	np = tcp_inet6_sk(sk);
443 
444 	if (type == NDISC_REDIRECT) {
445 		if (!sock_owned_by_user(sk)) {
446 			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
447 
448 			if (dst)
449 				dst->ops->redirect(dst, sk, skb);
450 		}
451 		goto out;
452 	}
453 
454 	if (type == ICMPV6_PKT_TOOBIG) {
455 		u32 mtu = ntohl(info);
456 
457 		/* We are not interested in TCP_LISTEN and open_requests
458 		 * (SYN-ACKs send out by Linux are always <576bytes so
459 		 * they should go through unfragmented).
460 		 */
461 		if (sk->sk_state == TCP_LISTEN)
462 			goto out;
463 
464 		if (!ip6_sk_accept_pmtu(sk))
465 			goto out;
466 
467 		if (mtu < IPV6_MIN_MTU)
468 			goto out;
469 
470 		WRITE_ONCE(tp->mtu_info, mtu);
471 
472 		if (!sock_owned_by_user(sk))
473 			tcp_v6_mtu_reduced(sk);
474 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
475 					   &sk->sk_tsq_flags))
476 			sock_hold(sk);
477 		goto out;
478 	}
479 
480 
481 	/* Might be for an request_sock */
482 	switch (sk->sk_state) {
483 	case TCP_SYN_SENT:
484 	case TCP_SYN_RECV:
485 		/* Only in fast or simultaneous open. If a fast open socket is
486 		 * already accepted it is treated as a connected one below.
487 		 */
488 		if (fastopen && !fastopen->sk)
489 			break;
490 
491 		ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
492 
493 		if (!sock_owned_by_user(sk))
494 			tcp_done_with_error(sk, err);
495 		else
496 			WRITE_ONCE(sk->sk_err_soft, err);
497 		goto out;
498 	case TCP_LISTEN:
499 		break;
500 	default:
501 		/* check if this ICMP message allows revert of backoff.
502 		 * (see RFC 6069)
503 		 */
504 		if (!fastopen && type == ICMPV6_DEST_UNREACH &&
505 		    code == ICMPV6_NOROUTE)
506 			tcp_ld_RTO_revert(sk, seq);
507 	}
508 
509 	if (!sock_owned_by_user(sk) && inet6_test_bit(RECVERR6, sk)) {
510 		WRITE_ONCE(sk->sk_err, err);
511 		sk_error_report(sk);
512 	} else {
513 		WRITE_ONCE(sk->sk_err_soft, err);
514 	}
515 out:
516 	bh_unlock_sock(sk);
517 	sock_put(sk);
518 	return 0;
519 }
520 
521 
tcp_v6_send_synack(const struct sock * sk,struct dst_entry * dst,struct flowi * fl,struct request_sock * req,struct tcp_fastopen_cookie * foc,enum tcp_synack_type synack_type,struct sk_buff * syn_skb)522 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
523 			      struct flowi *fl,
524 			      struct request_sock *req,
525 			      struct tcp_fastopen_cookie *foc,
526 			      enum tcp_synack_type synack_type,
527 			      struct sk_buff *syn_skb)
528 {
529 	struct inet_request_sock *ireq = inet_rsk(req);
530 	const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
531 	struct ipv6_txoptions *opt;
532 	struct flowi6 *fl6 = &fl->u.ip6;
533 	struct sk_buff *skb;
534 	int err = -ENOMEM;
535 	u8 tclass;
536 
537 	/* First, grab a route. */
538 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
539 					       IPPROTO_TCP)) == NULL)
540 		goto done;
541 
542 	skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
543 
544 	if (skb) {
545 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
546 				    &ireq->ir_v6_rmt_addr);
547 
548 		fl6->daddr = ireq->ir_v6_rmt_addr;
549 		if (inet6_test_bit(REPFLOW, sk) && ireq->pktopts)
550 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
551 
552 		tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
553 				(tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
554 				(np->tclass & INET_ECN_MASK) :
555 				np->tclass;
556 
557 		if (!INET_ECN_is_capable(tclass) &&
558 		    tcp_bpf_ca_needs_ecn((struct sock *)req))
559 			tclass |= INET_ECN_ECT_0;
560 
561 		rcu_read_lock();
562 		opt = ireq->ipv6_opt;
563 		if (!opt)
564 			opt = rcu_dereference(np->opt);
565 		err = ip6_xmit(sk, skb, fl6, skb->mark ? : READ_ONCE(sk->sk_mark),
566 			       opt, tclass, READ_ONCE(sk->sk_priority));
567 		rcu_read_unlock();
568 		err = net_xmit_eval(err);
569 	}
570 
571 done:
572 	return err;
573 }
574 
575 
tcp_v6_reqsk_destructor(struct request_sock * req)576 static void tcp_v6_reqsk_destructor(struct request_sock *req)
577 {
578 	kfree(inet_rsk(req)->ipv6_opt);
579 	consume_skb(inet_rsk(req)->pktopts);
580 }
581 
582 #ifdef CONFIG_TCP_MD5SIG
tcp_v6_md5_do_lookup(const struct sock * sk,const struct in6_addr * addr,int l3index)583 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
584 						   const struct in6_addr *addr,
585 						   int l3index)
586 {
587 	return tcp_md5_do_lookup(sk, l3index,
588 				 (union tcp_md5_addr *)addr, AF_INET6);
589 }
590 
tcp_v6_md5_lookup(const struct sock * sk,const struct sock * addr_sk)591 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
592 						const struct sock *addr_sk)
593 {
594 	int l3index;
595 
596 	l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
597 						 addr_sk->sk_bound_dev_if);
598 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr,
599 				    l3index);
600 }
601 
tcp_v6_parse_md5_keys(struct sock * sk,int optname,sockptr_t optval,int optlen)602 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
603 				 sockptr_t optval, int optlen)
604 {
605 	struct tcp_md5sig cmd;
606 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
607 	union tcp_ao_addr *addr;
608 	int l3index = 0;
609 	u8 prefixlen;
610 	bool l3flag;
611 	u8 flags;
612 
613 	if (optlen < sizeof(cmd))
614 		return -EINVAL;
615 
616 	if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
617 		return -EFAULT;
618 
619 	if (sin6->sin6_family != AF_INET6)
620 		return -EINVAL;
621 
622 	flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
623 	l3flag = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
624 
625 	if (optname == TCP_MD5SIG_EXT &&
626 	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
627 		prefixlen = cmd.tcpm_prefixlen;
628 		if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
629 					prefixlen > 32))
630 			return -EINVAL;
631 	} else {
632 		prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
633 	}
634 
635 	if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
636 	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
637 		struct net_device *dev;
638 
639 		rcu_read_lock();
640 		dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
641 		if (dev && netif_is_l3_master(dev))
642 			l3index = dev->ifindex;
643 		rcu_read_unlock();
644 
645 		/* ok to reference set/not set outside of rcu;
646 		 * right now device MUST be an L3 master
647 		 */
648 		if (!dev || !l3index)
649 			return -EINVAL;
650 	}
651 
652 	if (!cmd.tcpm_keylen) {
653 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
654 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
655 					      AF_INET, prefixlen,
656 					      l3index, flags);
657 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
658 				      AF_INET6, prefixlen, l3index, flags);
659 	}
660 
661 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
662 		return -EINVAL;
663 
664 	if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
665 		addr = (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3];
666 
667 		/* Don't allow keys for peers that have a matching TCP-AO key.
668 		 * See the comment in tcp_ao_add_cmd()
669 		 */
670 		if (tcp_ao_required(sk, addr, AF_INET,
671 				    l3flag ? l3index : -1, false))
672 			return -EKEYREJECTED;
673 		return tcp_md5_do_add(sk, addr,
674 				      AF_INET, prefixlen, l3index, flags,
675 				      cmd.tcpm_key, cmd.tcpm_keylen);
676 	}
677 
678 	addr = (union tcp_md5_addr *)&sin6->sin6_addr;
679 
680 	/* Don't allow keys for peers that have a matching TCP-AO key.
681 	 * See the comment in tcp_ao_add_cmd()
682 	 */
683 	if (tcp_ao_required(sk, addr, AF_INET6, l3flag ? l3index : -1, false))
684 		return -EKEYREJECTED;
685 
686 	return tcp_md5_do_add(sk, addr, AF_INET6, prefixlen, l3index, flags,
687 			      cmd.tcpm_key, cmd.tcpm_keylen);
688 }
689 
tcp_v6_md5_hash_headers(struct tcp_sigpool * hp,const struct in6_addr * daddr,const struct in6_addr * saddr,const struct tcphdr * th,int nbytes)690 static int tcp_v6_md5_hash_headers(struct tcp_sigpool *hp,
691 				   const struct in6_addr *daddr,
692 				   const struct in6_addr *saddr,
693 				   const struct tcphdr *th, int nbytes)
694 {
695 	struct tcp6_pseudohdr *bp;
696 	struct scatterlist sg;
697 	struct tcphdr *_th;
698 
699 	bp = hp->scratch;
700 	/* 1. TCP pseudo-header (RFC2460) */
701 	bp->saddr = *saddr;
702 	bp->daddr = *daddr;
703 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
704 	bp->len = cpu_to_be32(nbytes);
705 
706 	_th = (struct tcphdr *)(bp + 1);
707 	memcpy(_th, th, sizeof(*th));
708 	_th->check = 0;
709 
710 	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
711 	ahash_request_set_crypt(hp->req, &sg, NULL,
712 				sizeof(*bp) + sizeof(*th));
713 	return crypto_ahash_update(hp->req);
714 }
715 
tcp_v6_md5_hash_hdr(char * md5_hash,const struct tcp_md5sig_key * key,const struct in6_addr * daddr,struct in6_addr * saddr,const struct tcphdr * th)716 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
717 			       const struct in6_addr *daddr, struct in6_addr *saddr,
718 			       const struct tcphdr *th)
719 {
720 	struct tcp_sigpool hp;
721 
722 	if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp))
723 		goto clear_hash_nostart;
724 
725 	if (crypto_ahash_init(hp.req))
726 		goto clear_hash;
727 	if (tcp_v6_md5_hash_headers(&hp, daddr, saddr, th, th->doff << 2))
728 		goto clear_hash;
729 	if (tcp_md5_hash_key(&hp, key))
730 		goto clear_hash;
731 	ahash_request_set_crypt(hp.req, NULL, md5_hash, 0);
732 	if (crypto_ahash_final(hp.req))
733 		goto clear_hash;
734 
735 	tcp_sigpool_end(&hp);
736 	return 0;
737 
738 clear_hash:
739 	tcp_sigpool_end(&hp);
740 clear_hash_nostart:
741 	memset(md5_hash, 0, 16);
742 	return 1;
743 }
744 
tcp_v6_md5_hash_skb(char * md5_hash,const struct tcp_md5sig_key * key,const struct sock * sk,const struct sk_buff * skb)745 static int tcp_v6_md5_hash_skb(char *md5_hash,
746 			       const struct tcp_md5sig_key *key,
747 			       const struct sock *sk,
748 			       const struct sk_buff *skb)
749 {
750 	const struct tcphdr *th = tcp_hdr(skb);
751 	const struct in6_addr *saddr, *daddr;
752 	struct tcp_sigpool hp;
753 
754 	if (sk) { /* valid for establish/request sockets */
755 		saddr = &sk->sk_v6_rcv_saddr;
756 		daddr = &sk->sk_v6_daddr;
757 	} else {
758 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
759 		saddr = &ip6h->saddr;
760 		daddr = &ip6h->daddr;
761 	}
762 
763 	if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp))
764 		goto clear_hash_nostart;
765 
766 	if (crypto_ahash_init(hp.req))
767 		goto clear_hash;
768 
769 	if (tcp_v6_md5_hash_headers(&hp, daddr, saddr, th, skb->len))
770 		goto clear_hash;
771 	if (tcp_sigpool_hash_skb_data(&hp, skb, th->doff << 2))
772 		goto clear_hash;
773 	if (tcp_md5_hash_key(&hp, key))
774 		goto clear_hash;
775 	ahash_request_set_crypt(hp.req, NULL, md5_hash, 0);
776 	if (crypto_ahash_final(hp.req))
777 		goto clear_hash;
778 
779 	tcp_sigpool_end(&hp);
780 	return 0;
781 
782 clear_hash:
783 	tcp_sigpool_end(&hp);
784 clear_hash_nostart:
785 	memset(md5_hash, 0, 16);
786 	return 1;
787 }
788 #endif
789 
tcp_v6_init_req(struct request_sock * req,const struct sock * sk_listener,struct sk_buff * skb,u32 tw_isn)790 static void tcp_v6_init_req(struct request_sock *req,
791 			    const struct sock *sk_listener,
792 			    struct sk_buff *skb,
793 			    u32 tw_isn)
794 {
795 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
796 	struct inet_request_sock *ireq = inet_rsk(req);
797 	const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
798 
799 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
800 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
801 
802 	/* So that link locals have meaning */
803 	if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
804 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
805 		ireq->ir_iif = tcp_v6_iif(skb);
806 
807 	if (!tw_isn &&
808 	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
809 	     np->rxopt.bits.rxinfo ||
810 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
811 	     np->rxopt.bits.rxohlim || inet6_test_bit(REPFLOW, sk_listener))) {
812 		refcount_inc(&skb->users);
813 		ireq->pktopts = skb;
814 	}
815 }
816 
tcp_v6_route_req(const struct sock * sk,struct sk_buff * skb,struct flowi * fl,struct request_sock * req,u32 tw_isn)817 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
818 					  struct sk_buff *skb,
819 					  struct flowi *fl,
820 					  struct request_sock *req,
821 					  u32 tw_isn)
822 {
823 	tcp_v6_init_req(req, sk, skb, tw_isn);
824 
825 	if (security_inet_conn_request(sk, skb, req))
826 		return NULL;
827 
828 	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
829 }
830 
831 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
832 	.family		=	AF_INET6,
833 	.obj_size	=	sizeof(struct tcp6_request_sock),
834 	.rtx_syn_ack	=	tcp_rtx_synack,
835 	.send_ack	=	tcp_v6_reqsk_send_ack,
836 	.destructor	=	tcp_v6_reqsk_destructor,
837 	.send_reset	=	tcp_v6_send_reset,
838 	.syn_ack_timeout =	tcp_syn_ack_timeout,
839 };
840 
841 const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
842 	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
843 				sizeof(struct ipv6hdr),
844 #ifdef CONFIG_TCP_MD5SIG
845 	.req_md5_lookup	=	tcp_v6_md5_lookup,
846 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
847 #endif
848 #ifdef CONFIG_TCP_AO
849 	.ao_lookup	=	tcp_v6_ao_lookup_rsk,
850 	.ao_calc_key	=	tcp_v6_ao_calc_key_rsk,
851 	.ao_synack_hash =	tcp_v6_ao_synack_hash,
852 #endif
853 #ifdef CONFIG_SYN_COOKIES
854 	.cookie_init_seq =	cookie_v6_init_sequence,
855 #endif
856 	.route_req	=	tcp_v6_route_req,
857 	.init_seq	=	tcp_v6_init_seq,
858 	.init_ts_off	=	tcp_v6_init_ts_off,
859 	.send_synack	=	tcp_v6_send_synack,
860 };
861 
tcp_v6_send_response(const struct sock * sk,struct sk_buff * skb,u32 seq,u32 ack,u32 win,u32 tsval,u32 tsecr,int oif,int rst,u8 tclass,__be32 label,u32 priority,u32 txhash,struct tcp_key * key)862 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
863 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
864 				 int oif, int rst, u8 tclass, __be32 label,
865 				 u32 priority, u32 txhash, struct tcp_key *key)
866 {
867 	const struct tcphdr *th = tcp_hdr(skb);
868 	struct tcphdr *t1;
869 	struct sk_buff *buff;
870 	struct flowi6 fl6;
871 	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
872 	struct sock *ctl_sk = net->ipv6.tcp_sk;
873 	unsigned int tot_len = sizeof(struct tcphdr);
874 	__be32 mrst = 0, *topt;
875 	struct dst_entry *dst;
876 	__u32 mark = 0;
877 
878 	if (tsecr)
879 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
880 	if (tcp_key_is_md5(key))
881 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
882 	if (tcp_key_is_ao(key))
883 		tot_len += tcp_ao_len_aligned(key->ao_key);
884 
885 #ifdef CONFIG_MPTCP
886 	if (rst && !tcp_key_is_md5(key)) {
887 		mrst = mptcp_reset_option(skb);
888 
889 		if (mrst)
890 			tot_len += sizeof(__be32);
891 	}
892 #endif
893 
894 	buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
895 	if (!buff)
896 		return;
897 
898 	skb_reserve(buff, MAX_TCP_HEADER);
899 
900 	t1 = skb_push(buff, tot_len);
901 	skb_reset_transport_header(buff);
902 
903 	/* Swap the send and the receive. */
904 	memset(t1, 0, sizeof(*t1));
905 	t1->dest = th->source;
906 	t1->source = th->dest;
907 	t1->doff = tot_len / 4;
908 	t1->seq = htonl(seq);
909 	t1->ack_seq = htonl(ack);
910 	t1->ack = !rst || !th->ack;
911 	t1->rst = rst;
912 	t1->window = htons(win);
913 
914 	topt = (__be32 *)(t1 + 1);
915 
916 	if (tsecr) {
917 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
918 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
919 		*topt++ = htonl(tsval);
920 		*topt++ = htonl(tsecr);
921 	}
922 
923 	if (mrst)
924 		*topt++ = mrst;
925 
926 #ifdef CONFIG_TCP_MD5SIG
927 	if (tcp_key_is_md5(key)) {
928 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
929 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
930 		tcp_v6_md5_hash_hdr((__u8 *)topt, key->md5_key,
931 				    &ipv6_hdr(skb)->saddr,
932 				    &ipv6_hdr(skb)->daddr, t1);
933 	}
934 #endif
935 #ifdef CONFIG_TCP_AO
936 	if (tcp_key_is_ao(key)) {
937 		*topt++ = htonl((TCPOPT_AO << 24) |
938 				(tcp_ao_len(key->ao_key) << 16) |
939 				(key->ao_key->sndid << 8) |
940 				(key->rcv_next));
941 
942 		tcp_ao_hash_hdr(AF_INET6, (char *)topt, key->ao_key,
943 				key->traffic_key,
944 				(union tcp_ao_addr *)&ipv6_hdr(skb)->saddr,
945 				(union tcp_ao_addr *)&ipv6_hdr(skb)->daddr,
946 				t1, key->sne);
947 	}
948 #endif
949 
950 	memset(&fl6, 0, sizeof(fl6));
951 	fl6.daddr = ipv6_hdr(skb)->saddr;
952 	fl6.saddr = ipv6_hdr(skb)->daddr;
953 	fl6.flowlabel = label;
954 
955 	buff->ip_summed = CHECKSUM_PARTIAL;
956 
957 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
958 
959 	fl6.flowi6_proto = IPPROTO_TCP;
960 	if (rt6_need_strict(&fl6.daddr) && !oif)
961 		fl6.flowi6_oif = tcp_v6_iif(skb);
962 	else {
963 		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
964 			oif = skb->skb_iif;
965 
966 		fl6.flowi6_oif = oif;
967 	}
968 
969 	if (sk) {
970 		if (sk->sk_state == TCP_TIME_WAIT)
971 			mark = inet_twsk(sk)->tw_mark;
972 		else
973 			mark = READ_ONCE(sk->sk_mark);
974 		skb_set_delivery_time(buff, tcp_transmit_time(sk), SKB_CLOCK_MONOTONIC);
975 	}
976 	if (txhash) {
977 		/* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */
978 		skb_set_hash(buff, txhash, PKT_HASH_TYPE_L4);
979 	}
980 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
981 	fl6.fl6_dport = t1->dest;
982 	fl6.fl6_sport = t1->source;
983 	fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
984 	security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
985 
986 	/* Pass a socket to ip6_dst_lookup either it is for RST
987 	 * Underlying function will use this to retrieve the network
988 	 * namespace
989 	 */
990 	if (sk && sk->sk_state != TCP_TIME_WAIT)
991 		dst = ip6_dst_lookup_flow(net, sk, &fl6, NULL); /*sk's xfrm_policy can be referred*/
992 	else
993 		dst = ip6_dst_lookup_flow(net, ctl_sk, &fl6, NULL);
994 	if (!IS_ERR(dst)) {
995 		skb_dst_set(buff, dst);
996 		ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
997 			 tclass & ~INET_ECN_MASK, priority);
998 		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
999 		if (rst)
1000 			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
1001 		return;
1002 	}
1003 
1004 	kfree_skb(buff);
1005 }
1006 
tcp_v6_send_reset(const struct sock * sk,struct sk_buff * skb,enum sk_rst_reason reason)1007 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb,
1008 			      enum sk_rst_reason reason)
1009 {
1010 	const struct tcphdr *th = tcp_hdr(skb);
1011 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1012 	const __u8 *md5_hash_location = NULL;
1013 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1014 	bool allocated_traffic_key = false;
1015 #endif
1016 	const struct tcp_ao_hdr *aoh;
1017 	struct tcp_key key = {};
1018 	u32 seq = 0, ack_seq = 0;
1019 	__be32 label = 0;
1020 	u32 priority = 0;
1021 	struct net *net;
1022 	u32 txhash = 0;
1023 	int oif = 0;
1024 #ifdef CONFIG_TCP_MD5SIG
1025 	unsigned char newhash[16];
1026 	int genhash;
1027 	struct sock *sk1 = NULL;
1028 #endif
1029 
1030 	if (th->rst)
1031 		return;
1032 
1033 	/* If sk not NULL, it means we did a successful lookup and incoming
1034 	 * route had to be correct. prequeue might have dropped our dst.
1035 	 */
1036 	if (!sk && !ipv6_unicast_destination(skb))
1037 		return;
1038 
1039 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
1040 	/* Invalid TCP option size or twice included auth */
1041 	if (tcp_parse_auth_options(th, &md5_hash_location, &aoh))
1042 		return;
1043 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1044 	rcu_read_lock();
1045 #endif
1046 #ifdef CONFIG_TCP_MD5SIG
1047 	if (sk && sk_fullsock(sk)) {
1048 		int l3index;
1049 
1050 		/* sdif set, means packet ingressed via a device
1051 		 * in an L3 domain and inet_iif is set to it.
1052 		 */
1053 		l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1054 		key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
1055 		if (key.md5_key)
1056 			key.type = TCP_KEY_MD5;
1057 	} else if (md5_hash_location) {
1058 		int dif = tcp_v6_iif_l3_slave(skb);
1059 		int sdif = tcp_v6_sdif(skb);
1060 		int l3index;
1061 
1062 		/*
1063 		 * active side is lost. Try to find listening socket through
1064 		 * source port, and then find md5 key through listening socket.
1065 		 * we are not loose security here:
1066 		 * Incoming packet is checked with md5 hash with finding key,
1067 		 * no RST generated if md5 hash doesn't match.
1068 		 */
1069 		sk1 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
1070 					    NULL, 0, &ipv6h->saddr, th->source,
1071 					    &ipv6h->daddr, ntohs(th->source),
1072 					    dif, sdif);
1073 		if (!sk1)
1074 			goto out;
1075 
1076 		/* sdif set, means packet ingressed via a device
1077 		 * in an L3 domain and dif is set to it.
1078 		 */
1079 		l3index = tcp_v6_sdif(skb) ? dif : 0;
1080 
1081 		key.md5_key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
1082 		if (!key.md5_key)
1083 			goto out;
1084 		key.type = TCP_KEY_MD5;
1085 
1086 		genhash = tcp_v6_md5_hash_skb(newhash, key.md5_key, NULL, skb);
1087 		if (genhash || memcmp(md5_hash_location, newhash, 16) != 0)
1088 			goto out;
1089 	}
1090 #endif
1091 
1092 	if (th->ack)
1093 		seq = ntohl(th->ack_seq);
1094 	else
1095 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1096 			  (th->doff << 2);
1097 
1098 #ifdef CONFIG_TCP_AO
1099 	if (aoh) {
1100 		int l3index;
1101 
1102 		l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1103 		if (tcp_ao_prepare_reset(sk, skb, aoh, l3index, seq,
1104 					 &key.ao_key, &key.traffic_key,
1105 					 &allocated_traffic_key,
1106 					 &key.rcv_next, &key.sne))
1107 			goto out;
1108 		key.type = TCP_KEY_AO;
1109 	}
1110 #endif
1111 
1112 	if (sk) {
1113 		oif = sk->sk_bound_dev_if;
1114 		if (sk_fullsock(sk)) {
1115 			if (inet6_test_bit(REPFLOW, sk))
1116 				label = ip6_flowlabel(ipv6h);
1117 			priority = READ_ONCE(sk->sk_priority);
1118 			txhash = sk->sk_txhash;
1119 		}
1120 		if (sk->sk_state == TCP_TIME_WAIT) {
1121 			label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
1122 			priority = inet_twsk(sk)->tw_priority;
1123 			txhash = inet_twsk(sk)->tw_txhash;
1124 		}
1125 	} else {
1126 		if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
1127 			label = ip6_flowlabel(ipv6h);
1128 	}
1129 
1130 	trace_tcp_send_reset(sk, skb, reason);
1131 
1132 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, 1,
1133 			     ipv6_get_dsfield(ipv6h), label, priority, txhash,
1134 			     &key);
1135 
1136 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1137 out:
1138 	if (allocated_traffic_key)
1139 		kfree(key.traffic_key);
1140 	rcu_read_unlock();
1141 #endif
1142 }
1143 
tcp_v6_send_ack(const struct sock * sk,struct sk_buff * skb,u32 seq,u32 ack,u32 win,u32 tsval,u32 tsecr,int oif,struct tcp_key * key,u8 tclass,__be32 label,u32 priority,u32 txhash)1144 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
1145 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1146 			    struct tcp_key *key, u8 tclass,
1147 			    __be32 label, u32 priority, u32 txhash)
1148 {
1149 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, 0,
1150 			     tclass, label, priority, txhash, key);
1151 }
1152 
tcp_v6_timewait_ack(struct sock * sk,struct sk_buff * skb)1153 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1154 {
1155 	struct inet_timewait_sock *tw = inet_twsk(sk);
1156 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1157 	struct tcp_key key = {};
1158 #ifdef CONFIG_TCP_AO
1159 	struct tcp_ao_info *ao_info;
1160 
1161 	if (static_branch_unlikely(&tcp_ao_needed.key)) {
1162 
1163 		/* FIXME: the segment to-be-acked is not verified yet */
1164 		ao_info = rcu_dereference(tcptw->ao_info);
1165 		if (ao_info) {
1166 			const struct tcp_ao_hdr *aoh;
1167 
1168 			/* Invalid TCP option size or twice included auth */
1169 			if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
1170 				goto out;
1171 			if (aoh)
1172 				key.ao_key = tcp_ao_established_key(ao_info,
1173 						aoh->rnext_keyid, -1);
1174 		}
1175 	}
1176 	if (key.ao_key) {
1177 		struct tcp_ao_key *rnext_key;
1178 
1179 		key.traffic_key = snd_other_key(key.ao_key);
1180 		/* rcv_next switches to our rcv_next */
1181 		rnext_key = READ_ONCE(ao_info->rnext_key);
1182 		key.rcv_next = rnext_key->rcvid;
1183 		key.sne = READ_ONCE(ao_info->snd_sne);
1184 		key.type = TCP_KEY_AO;
1185 #else
1186 	if (0) {
1187 #endif
1188 #ifdef CONFIG_TCP_MD5SIG
1189 	} else if (static_branch_unlikely(&tcp_md5_needed.key)) {
1190 		key.md5_key = tcp_twsk_md5_key(tcptw);
1191 		if (key.md5_key)
1192 			key.type = TCP_KEY_MD5;
1193 #endif
1194 	}
1195 
1196 	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt,
1197 			READ_ONCE(tcptw->tw_rcv_nxt),
1198 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1199 			tcp_tw_tsval(tcptw),
1200 			READ_ONCE(tcptw->tw_ts_recent), tw->tw_bound_dev_if,
1201 			&key, tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel),
1202 			tw->tw_priority, tw->tw_txhash);
1203 
1204 #ifdef CONFIG_TCP_AO
1205 out:
1206 #endif
1207 	inet_twsk_put(tw);
1208 }
1209 
1210 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1211 				  struct request_sock *req)
1212 {
1213 	struct tcp_key key = {};
1214 
1215 #ifdef CONFIG_TCP_AO
1216 	if (static_branch_unlikely(&tcp_ao_needed.key) &&
1217 	    tcp_rsk_used_ao(req)) {
1218 		const struct in6_addr *addr = &ipv6_hdr(skb)->saddr;
1219 		const struct tcp_ao_hdr *aoh;
1220 		int l3index;
1221 
1222 		l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1223 		/* Invalid TCP option size or twice included auth */
1224 		if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
1225 			return;
1226 		if (!aoh)
1227 			return;
1228 		key.ao_key = tcp_ao_do_lookup(sk, l3index,
1229 					      (union tcp_ao_addr *)addr,
1230 					      AF_INET6, aoh->rnext_keyid, -1);
1231 		if (unlikely(!key.ao_key)) {
1232 			/* Send ACK with any matching MKT for the peer */
1233 			key.ao_key = tcp_ao_do_lookup(sk, l3index,
1234 						      (union tcp_ao_addr *)addr,
1235 						      AF_INET6, -1, -1);
1236 			/* Matching key disappeared (user removed the key?)
1237 			 * let the handshake timeout.
1238 			 */
1239 			if (!key.ao_key) {
1240 				net_info_ratelimited("TCP-AO key for (%pI6, %d)->(%pI6, %d) suddenly disappeared, won't ACK new connection\n",
1241 						     addr,
1242 						     ntohs(tcp_hdr(skb)->source),
1243 						     &ipv6_hdr(skb)->daddr,
1244 						     ntohs(tcp_hdr(skb)->dest));
1245 				return;
1246 			}
1247 		}
1248 		key.traffic_key = kmalloc(tcp_ao_digest_size(key.ao_key), GFP_ATOMIC);
1249 		if (!key.traffic_key)
1250 			return;
1251 
1252 		key.type = TCP_KEY_AO;
1253 		key.rcv_next = aoh->keyid;
1254 		tcp_v6_ao_calc_key_rsk(key.ao_key, key.traffic_key, req);
1255 #else
1256 	if (0) {
1257 #endif
1258 #ifdef CONFIG_TCP_MD5SIG
1259 	} else if (static_branch_unlikely(&tcp_md5_needed.key)) {
1260 		int l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1261 
1262 		key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr,
1263 						   l3index);
1264 		if (key.md5_key)
1265 			key.type = TCP_KEY_MD5;
1266 #endif
1267 	}
1268 
1269 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1270 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1271 	 */
1272 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1273 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1274 			tcp_rsk(req)->rcv_nxt,
1275 			tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale,
1276 			tcp_rsk_tsval(tcp_rsk(req)),
1277 			READ_ONCE(req->ts_recent), sk->sk_bound_dev_if,
1278 			&key, ipv6_get_dsfield(ipv6_hdr(skb)), 0,
1279 			READ_ONCE(sk->sk_priority),
1280 			READ_ONCE(tcp_rsk(req)->txhash));
1281 	if (tcp_key_is_ao(&key))
1282 		kfree(key.traffic_key);
1283 }
1284 
1285 
1286 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1287 {
1288 #ifdef CONFIG_SYN_COOKIES
1289 	const struct tcphdr *th = tcp_hdr(skb);
1290 
1291 	if (!th->syn)
1292 		sk = cookie_v6_check(sk, skb);
1293 #endif
1294 	return sk;
1295 }
1296 
1297 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1298 			 struct tcphdr *th, u32 *cookie)
1299 {
1300 	u16 mss = 0;
1301 #ifdef CONFIG_SYN_COOKIES
1302 	mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1303 				    &tcp_request_sock_ipv6_ops, sk, th);
1304 	if (mss) {
1305 		*cookie = __cookie_v6_init_sequence(iph, th, &mss);
1306 		tcp_synq_overflow(sk);
1307 	}
1308 #endif
1309 	return mss;
1310 }
1311 
1312 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1313 {
1314 	if (skb->protocol == htons(ETH_P_IP))
1315 		return tcp_v4_conn_request(sk, skb);
1316 
1317 	if (!ipv6_unicast_destination(skb))
1318 		goto drop;
1319 
1320 	if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
1321 		__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
1322 		return 0;
1323 	}
1324 
1325 	return tcp_conn_request(&tcp6_request_sock_ops,
1326 				&tcp_request_sock_ipv6_ops, sk, skb);
1327 
1328 drop:
1329 	tcp_listendrop(sk);
1330 	return 0; /* don't send reset */
1331 }
1332 
1333 static void tcp_v6_restore_cb(struct sk_buff *skb)
1334 {
1335 	/* We need to move header back to the beginning if xfrm6_policy_check()
1336 	 * and tcp_v6_fill_cb() are going to be called again.
1337 	 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1338 	 */
1339 	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1340 		sizeof(struct inet6_skb_parm));
1341 }
1342 
1343 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1344 					 struct request_sock *req,
1345 					 struct dst_entry *dst,
1346 					 struct request_sock *req_unhash,
1347 					 bool *own_req)
1348 {
1349 	struct inet_request_sock *ireq;
1350 	struct ipv6_pinfo *newnp;
1351 	const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1352 	struct ipv6_txoptions *opt;
1353 	struct inet_sock *newinet;
1354 	bool found_dup_sk = false;
1355 	struct tcp_sock *newtp;
1356 	struct sock *newsk;
1357 #ifdef CONFIG_TCP_MD5SIG
1358 	struct tcp_md5sig_key *key;
1359 	int l3index;
1360 #endif
1361 	struct flowi6 fl6;
1362 
1363 	if (skb->protocol == htons(ETH_P_IP)) {
1364 		/*
1365 		 *	v6 mapped
1366 		 */
1367 
1368 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1369 					     req_unhash, own_req);
1370 
1371 		if (!newsk)
1372 			return NULL;
1373 
1374 		inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
1375 
1376 		newnp = tcp_inet6_sk(newsk);
1377 		newtp = tcp_sk(newsk);
1378 
1379 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1380 
1381 		newnp->saddr = newsk->sk_v6_rcv_saddr;
1382 
1383 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1384 		if (sk_is_mptcp(newsk))
1385 			mptcpv6_handle_mapped(newsk, true);
1386 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1387 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1388 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1389 #endif
1390 
1391 		newnp->ipv6_mc_list = NULL;
1392 		newnp->ipv6_ac_list = NULL;
1393 		newnp->ipv6_fl_list = NULL;
1394 		newnp->pktoptions  = NULL;
1395 		newnp->opt	   = NULL;
1396 		newnp->mcast_oif   = inet_iif(skb);
1397 		newnp->mcast_hops  = ip_hdr(skb)->ttl;
1398 		newnp->rcv_flowinfo = 0;
1399 		if (inet6_test_bit(REPFLOW, sk))
1400 			newnp->flow_label = 0;
1401 
1402 		/*
1403 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1404 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1405 		 * that function for the gory details. -acme
1406 		 */
1407 
1408 		/* It is tricky place. Until this moment IPv4 tcp
1409 		   worked with IPv6 icsk.icsk_af_ops.
1410 		   Sync it now.
1411 		 */
1412 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1413 
1414 		return newsk;
1415 	}
1416 
1417 	ireq = inet_rsk(req);
1418 
1419 	if (sk_acceptq_is_full(sk))
1420 		goto out_overflow;
1421 
1422 	if (!dst) {
1423 		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1424 		if (!dst)
1425 			goto out;
1426 	}
1427 
1428 	newsk = tcp_create_openreq_child(sk, req, skb);
1429 	if (!newsk)
1430 		goto out_nonewsk;
1431 
1432 	/*
1433 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1434 	 * count here, tcp_create_openreq_child now does this for us, see the
1435 	 * comment in that function for the gory details. -acme
1436 	 */
1437 
1438 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1439 	inet6_sk_rx_dst_set(newsk, skb);
1440 
1441 	inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
1442 
1443 	newtp = tcp_sk(newsk);
1444 	newinet = inet_sk(newsk);
1445 	newnp = tcp_inet6_sk(newsk);
1446 
1447 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1448 
1449 	ip6_dst_store(newsk, dst, NULL, NULL);
1450 
1451 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1452 	newnp->saddr = ireq->ir_v6_loc_addr;
1453 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1454 	newsk->sk_bound_dev_if = ireq->ir_iif;
1455 
1456 	/* Now IPv6 options...
1457 
1458 	   First: no IPv4 options.
1459 	 */
1460 	newinet->inet_opt = NULL;
1461 	newnp->ipv6_mc_list = NULL;
1462 	newnp->ipv6_ac_list = NULL;
1463 	newnp->ipv6_fl_list = NULL;
1464 
1465 	/* Clone RX bits */
1466 	newnp->rxopt.all = np->rxopt.all;
1467 
1468 	newnp->pktoptions = NULL;
1469 	newnp->opt	  = NULL;
1470 	newnp->mcast_oif  = tcp_v6_iif(skb);
1471 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1472 	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1473 	if (inet6_test_bit(REPFLOW, sk))
1474 		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1475 
1476 	/* Set ToS of the new socket based upon the value of incoming SYN.
1477 	 * ECT bits are set later in tcp_init_transfer().
1478 	 */
1479 	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1480 		newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1481 
1482 	/* Clone native IPv6 options from listening socket (if any)
1483 
1484 	   Yes, keeping reference count would be much more clever,
1485 	   but we make one more one thing there: reattach optmem
1486 	   to newsk.
1487 	 */
1488 	opt = ireq->ipv6_opt;
1489 	if (!opt)
1490 		opt = rcu_dereference(np->opt);
1491 	if (opt) {
1492 		opt = ipv6_dup_options(newsk, opt);
1493 		RCU_INIT_POINTER(newnp->opt, opt);
1494 	}
1495 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1496 	if (opt)
1497 		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1498 						    opt->opt_flen;
1499 
1500 	tcp_ca_openreq_child(newsk, dst);
1501 
1502 	tcp_sync_mss(newsk, dst_mtu(dst));
1503 	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1504 
1505 	tcp_initialize_rcv_mss(newsk);
1506 
1507 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1508 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1509 
1510 #ifdef CONFIG_TCP_MD5SIG
1511 	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1512 
1513 	if (!tcp_rsk_used_ao(req)) {
1514 		/* Copy over the MD5 key from the original socket */
1515 		key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index);
1516 		if (key) {
1517 			const union tcp_md5_addr *addr;
1518 
1519 			addr = (union tcp_md5_addr *)&newsk->sk_v6_daddr;
1520 			if (tcp_md5_key_copy(newsk, addr, AF_INET6, 128, l3index, key)) {
1521 				inet_csk_prepare_forced_close(newsk);
1522 				tcp_done(newsk);
1523 				goto out;
1524 			}
1525 		}
1526 	}
1527 #endif
1528 #ifdef CONFIG_TCP_AO
1529 	/* Copy over tcp_ao_info if any */
1530 	if (tcp_ao_copy_all_matching(sk, newsk, req, skb, AF_INET6))
1531 		goto out; /* OOM */
1532 #endif
1533 
1534 	if (__inet_inherit_port(sk, newsk) < 0) {
1535 		inet_csk_prepare_forced_close(newsk);
1536 		tcp_done(newsk);
1537 		goto out;
1538 	}
1539 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1540 				       &found_dup_sk);
1541 	if (*own_req) {
1542 		tcp_move_syn(newtp, req);
1543 
1544 		/* Clone pktoptions received with SYN, if we own the req */
1545 		if (ireq->pktopts) {
1546 			newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
1547 			consume_skb(ireq->pktopts);
1548 			ireq->pktopts = NULL;
1549 			if (newnp->pktoptions)
1550 				tcp_v6_restore_cb(newnp->pktoptions);
1551 		}
1552 	} else {
1553 		if (!req_unhash && found_dup_sk) {
1554 			/* This code path should only be executed in the
1555 			 * syncookie case only
1556 			 */
1557 			bh_unlock_sock(newsk);
1558 			sock_put(newsk);
1559 			newsk = NULL;
1560 		}
1561 	}
1562 
1563 	return newsk;
1564 
1565 out_overflow:
1566 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1567 out_nonewsk:
1568 	dst_release(dst);
1569 out:
1570 	tcp_listendrop(sk);
1571 	return NULL;
1572 }
1573 
1574 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1575 							   u32));
1576 /* The socket must have it's spinlock held when we get
1577  * here, unless it is a TCP_LISTEN socket.
1578  *
1579  * We have a potential double-lock case here, so even when
1580  * doing backlog processing we use the BH locking scheme.
1581  * This is because we cannot sleep with the original spinlock
1582  * held.
1583  */
1584 INDIRECT_CALLABLE_SCOPE
1585 int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1586 {
1587 	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1588 	struct sk_buff *opt_skb = NULL;
1589 	enum skb_drop_reason reason;
1590 	struct tcp_sock *tp;
1591 
1592 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1593 	   goes to IPv4 receive handler and backlogged.
1594 	   From backlog it always goes here. Kerboom...
1595 	   Fortunately, tcp_rcv_established and rcv_established
1596 	   handle them correctly, but it is not case with
1597 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1598 	 */
1599 
1600 	if (skb->protocol == htons(ETH_P_IP))
1601 		return tcp_v4_do_rcv(sk, skb);
1602 
1603 	/*
1604 	 *	socket locking is here for SMP purposes as backlog rcv
1605 	 *	is currently called with bh processing disabled.
1606 	 */
1607 
1608 	/* Do Stevens' IPV6_PKTOPTIONS.
1609 
1610 	   Yes, guys, it is the only place in our code, where we
1611 	   may make it not affecting IPv4.
1612 	   The rest of code is protocol independent,
1613 	   and I do not like idea to uglify IPv4.
1614 
1615 	   Actually, all the idea behind IPV6_PKTOPTIONS
1616 	   looks not very well thought. For now we latch
1617 	   options, received in the last packet, enqueued
1618 	   by tcp. Feel free to propose better solution.
1619 					       --ANK (980728)
1620 	 */
1621 	if (np->rxopt.all && sk->sk_state != TCP_LISTEN)
1622 		opt_skb = skb_clone_and_charge_r(skb, sk);
1623 
1624 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1625 		struct dst_entry *dst;
1626 
1627 		dst = rcu_dereference_protected(sk->sk_rx_dst,
1628 						lockdep_sock_is_held(sk));
1629 
1630 		sock_rps_save_rxhash(sk, skb);
1631 		sk_mark_napi_id(sk, skb);
1632 		if (dst) {
1633 			if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
1634 			    INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
1635 					    dst, sk->sk_rx_dst_cookie) == NULL) {
1636 				RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
1637 				dst_release(dst);
1638 			}
1639 		}
1640 
1641 		tcp_rcv_established(sk, skb);
1642 		if (opt_skb)
1643 			goto ipv6_pktoptions;
1644 		return 0;
1645 	}
1646 
1647 	if (tcp_checksum_complete(skb))
1648 		goto csum_err;
1649 
1650 	if (sk->sk_state == TCP_LISTEN) {
1651 		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1652 
1653 		if (nsk != sk) {
1654 			if (nsk) {
1655 				reason = tcp_child_process(sk, nsk, skb);
1656 				if (reason)
1657 					goto reset;
1658 			}
1659 			return 0;
1660 		}
1661 	} else
1662 		sock_rps_save_rxhash(sk, skb);
1663 
1664 	reason = tcp_rcv_state_process(sk, skb);
1665 	if (reason)
1666 		goto reset;
1667 	if (opt_skb)
1668 		goto ipv6_pktoptions;
1669 	return 0;
1670 
1671 reset:
1672 	tcp_v6_send_reset(sk, skb, sk_rst_convert_drop_reason(reason));
1673 discard:
1674 	if (opt_skb)
1675 		__kfree_skb(opt_skb);
1676 	sk_skb_reason_drop(sk, skb, reason);
1677 	return 0;
1678 csum_err:
1679 	reason = SKB_DROP_REASON_TCP_CSUM;
1680 	trace_tcp_bad_csum(skb);
1681 	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1682 	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1683 	goto discard;
1684 
1685 
1686 ipv6_pktoptions:
1687 	/* Do you ask, what is it?
1688 
1689 	   1. skb was enqueued by tcp.
1690 	   2. skb is added to tail of read queue, rather than out of order.
1691 	   3. socket is not in passive state.
1692 	   4. Finally, it really contains options, which user wants to receive.
1693 	 */
1694 	tp = tcp_sk(sk);
1695 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1696 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1697 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1698 			WRITE_ONCE(np->mcast_oif, tcp_v6_iif(opt_skb));
1699 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1700 			WRITE_ONCE(np->mcast_hops,
1701 				   ipv6_hdr(opt_skb)->hop_limit);
1702 		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1703 			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1704 		if (inet6_test_bit(REPFLOW, sk))
1705 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1706 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1707 			tcp_v6_restore_cb(opt_skb);
1708 			opt_skb = xchg(&np->pktoptions, opt_skb);
1709 		} else {
1710 			__kfree_skb(opt_skb);
1711 			opt_skb = xchg(&np->pktoptions, NULL);
1712 		}
1713 	}
1714 
1715 	consume_skb(opt_skb);
1716 	return 0;
1717 }
1718 
1719 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1720 			   const struct tcphdr *th)
1721 {
1722 	/* This is tricky: we move IP6CB at its correct location into
1723 	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1724 	 * _decode_session6() uses IP6CB().
1725 	 * barrier() makes sure compiler won't play aliasing games.
1726 	 */
1727 	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1728 		sizeof(struct inet6_skb_parm));
1729 	barrier();
1730 
1731 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1732 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1733 				    skb->len - th->doff*4);
1734 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1735 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1736 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1737 	TCP_SKB_CB(skb)->sacked = 0;
1738 	TCP_SKB_CB(skb)->has_rxtstamp =
1739 			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1740 }
1741 
1742 INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
1743 {
1744 	enum skb_drop_reason drop_reason;
1745 	int sdif = inet6_sdif(skb);
1746 	int dif = inet6_iif(skb);
1747 	const struct tcphdr *th;
1748 	const struct ipv6hdr *hdr;
1749 	struct sock *sk = NULL;
1750 	bool refcounted;
1751 	int ret;
1752 	u32 isn;
1753 	struct net *net = dev_net(skb->dev);
1754 
1755 	drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1756 	if (skb->pkt_type != PACKET_HOST)
1757 		goto discard_it;
1758 
1759 	/*
1760 	 *	Count it even if it's bad.
1761 	 */
1762 	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1763 
1764 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1765 		goto discard_it;
1766 
1767 	th = (const struct tcphdr *)skb->data;
1768 
1769 	if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) {
1770 		drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1771 		goto bad_packet;
1772 	}
1773 	if (!pskb_may_pull(skb, th->doff*4))
1774 		goto discard_it;
1775 
1776 	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1777 		goto csum_error;
1778 
1779 	th = (const struct tcphdr *)skb->data;
1780 	hdr = ipv6_hdr(skb);
1781 
1782 lookup:
1783 	sk = __inet6_lookup_skb(net->ipv4.tcp_death_row.hashinfo, skb, __tcp_hdrlen(th),
1784 				th->source, th->dest, inet6_iif(skb), sdif,
1785 				&refcounted);
1786 	if (!sk)
1787 		goto no_tcp_socket;
1788 
1789 	if (sk->sk_state == TCP_TIME_WAIT)
1790 		goto do_time_wait;
1791 
1792 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1793 		struct request_sock *req = inet_reqsk(sk);
1794 		bool req_stolen = false;
1795 		struct sock *nsk;
1796 
1797 		sk = req->rsk_listener;
1798 		if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1799 			drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1800 		else
1801 			drop_reason = tcp_inbound_hash(sk, req, skb,
1802 						       &hdr->saddr, &hdr->daddr,
1803 						       AF_INET6, dif, sdif);
1804 		if (drop_reason) {
1805 			sk_drops_add(sk, skb);
1806 			reqsk_put(req);
1807 			goto discard_it;
1808 		}
1809 		if (tcp_checksum_complete(skb)) {
1810 			reqsk_put(req);
1811 			goto csum_error;
1812 		}
1813 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1814 			nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
1815 			if (!nsk) {
1816 				inet_csk_reqsk_queue_drop_and_put(sk, req);
1817 				goto lookup;
1818 			}
1819 			sk = nsk;
1820 			/* reuseport_migrate_sock() has already held one sk_refcnt
1821 			 * before returning.
1822 			 */
1823 		} else {
1824 			sock_hold(sk);
1825 		}
1826 		refcounted = true;
1827 		nsk = NULL;
1828 		if (!tcp_filter(sk, skb)) {
1829 			th = (const struct tcphdr *)skb->data;
1830 			hdr = ipv6_hdr(skb);
1831 			tcp_v6_fill_cb(skb, hdr, th);
1832 			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1833 		} else {
1834 			drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1835 		}
1836 		if (!nsk) {
1837 			reqsk_put(req);
1838 			if (req_stolen) {
1839 				/* Another cpu got exclusive access to req
1840 				 * and created a full blown socket.
1841 				 * Try to feed this packet to this socket
1842 				 * instead of discarding it.
1843 				 */
1844 				tcp_v6_restore_cb(skb);
1845 				sock_put(sk);
1846 				goto lookup;
1847 			}
1848 			goto discard_and_relse;
1849 		}
1850 		nf_reset_ct(skb);
1851 		if (nsk == sk) {
1852 			reqsk_put(req);
1853 			tcp_v6_restore_cb(skb);
1854 		} else {
1855 			drop_reason = tcp_child_process(sk, nsk, skb);
1856 			if (drop_reason) {
1857 				enum sk_rst_reason rst_reason;
1858 
1859 				rst_reason = sk_rst_convert_drop_reason(drop_reason);
1860 				tcp_v6_send_reset(nsk, skb, rst_reason);
1861 				goto discard_and_relse;
1862 			}
1863 			sock_put(sk);
1864 			return 0;
1865 		}
1866 	}
1867 
1868 process:
1869 	if (static_branch_unlikely(&ip6_min_hopcount)) {
1870 		/* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
1871 		if (unlikely(hdr->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount))) {
1872 			__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1873 			drop_reason = SKB_DROP_REASON_TCP_MINTTL;
1874 			goto discard_and_relse;
1875 		}
1876 	}
1877 
1878 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
1879 		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1880 		goto discard_and_relse;
1881 	}
1882 
1883 	drop_reason = tcp_inbound_hash(sk, NULL, skb, &hdr->saddr, &hdr->daddr,
1884 				       AF_INET6, dif, sdif);
1885 	if (drop_reason)
1886 		goto discard_and_relse;
1887 
1888 	nf_reset_ct(skb);
1889 
1890 	if (tcp_filter(sk, skb)) {
1891 		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1892 		goto discard_and_relse;
1893 	}
1894 	th = (const struct tcphdr *)skb->data;
1895 	hdr = ipv6_hdr(skb);
1896 	tcp_v6_fill_cb(skb, hdr, th);
1897 
1898 	skb->dev = NULL;
1899 
1900 	if (sk->sk_state == TCP_LISTEN) {
1901 		ret = tcp_v6_do_rcv(sk, skb);
1902 		goto put_and_return;
1903 	}
1904 
1905 	sk_incoming_cpu_update(sk);
1906 
1907 	bh_lock_sock_nested(sk);
1908 	tcp_segs_in(tcp_sk(sk), skb);
1909 	ret = 0;
1910 	if (!sock_owned_by_user(sk)) {
1911 		ret = tcp_v6_do_rcv(sk, skb);
1912 	} else {
1913 		if (tcp_add_backlog(sk, skb, &drop_reason))
1914 			goto discard_and_relse;
1915 	}
1916 	bh_unlock_sock(sk);
1917 put_and_return:
1918 	if (refcounted)
1919 		sock_put(sk);
1920 	return ret ? -1 : 0;
1921 
1922 no_tcp_socket:
1923 	drop_reason = SKB_DROP_REASON_NO_SOCKET;
1924 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1925 		goto discard_it;
1926 
1927 	tcp_v6_fill_cb(skb, hdr, th);
1928 
1929 	if (tcp_checksum_complete(skb)) {
1930 csum_error:
1931 		drop_reason = SKB_DROP_REASON_TCP_CSUM;
1932 		trace_tcp_bad_csum(skb);
1933 		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1934 bad_packet:
1935 		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1936 	} else {
1937 		tcp_v6_send_reset(NULL, skb, sk_rst_convert_drop_reason(drop_reason));
1938 	}
1939 
1940 discard_it:
1941 	SKB_DR_OR(drop_reason, NOT_SPECIFIED);
1942 	sk_skb_reason_drop(sk, skb, drop_reason);
1943 	return 0;
1944 
1945 discard_and_relse:
1946 	sk_drops_add(sk, skb);
1947 	if (refcounted)
1948 		sock_put(sk);
1949 	goto discard_it;
1950 
1951 do_time_wait:
1952 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1953 		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1954 		inet_twsk_put(inet_twsk(sk));
1955 		goto discard_it;
1956 	}
1957 
1958 	tcp_v6_fill_cb(skb, hdr, th);
1959 
1960 	if (tcp_checksum_complete(skb)) {
1961 		inet_twsk_put(inet_twsk(sk));
1962 		goto csum_error;
1963 	}
1964 
1965 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th, &isn)) {
1966 	case TCP_TW_SYN:
1967 	{
1968 		struct sock *sk2;
1969 
1970 		sk2 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
1971 					    skb, __tcp_hdrlen(th),
1972 					    &ipv6_hdr(skb)->saddr, th->source,
1973 					    &ipv6_hdr(skb)->daddr,
1974 					    ntohs(th->dest),
1975 					    tcp_v6_iif_l3_slave(skb),
1976 					    sdif);
1977 		if (sk2) {
1978 			struct inet_timewait_sock *tw = inet_twsk(sk);
1979 			inet_twsk_deschedule_put(tw);
1980 			sk = sk2;
1981 			tcp_v6_restore_cb(skb);
1982 			refcounted = false;
1983 			__this_cpu_write(tcp_tw_isn, isn);
1984 			goto process;
1985 		}
1986 	}
1987 		/* to ACK */
1988 		fallthrough;
1989 	case TCP_TW_ACK:
1990 		tcp_v6_timewait_ack(sk, skb);
1991 		break;
1992 	case TCP_TW_RST:
1993 		tcp_v6_send_reset(sk, skb, SK_RST_REASON_TCP_TIMEWAIT_SOCKET);
1994 		inet_twsk_deschedule_put(inet_twsk(sk));
1995 		goto discard_it;
1996 	case TCP_TW_SUCCESS:
1997 		;
1998 	}
1999 	goto discard_it;
2000 }
2001 
2002 void tcp_v6_early_demux(struct sk_buff *skb)
2003 {
2004 	struct net *net = dev_net(skb->dev);
2005 	const struct ipv6hdr *hdr;
2006 	const struct tcphdr *th;
2007 	struct sock *sk;
2008 
2009 	if (skb->pkt_type != PACKET_HOST)
2010 		return;
2011 
2012 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
2013 		return;
2014 
2015 	hdr = ipv6_hdr(skb);
2016 	th = tcp_hdr(skb);
2017 
2018 	if (th->doff < sizeof(struct tcphdr) / 4)
2019 		return;
2020 
2021 	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
2022 	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
2023 					&hdr->saddr, th->source,
2024 					&hdr->daddr, ntohs(th->dest),
2025 					inet6_iif(skb), inet6_sdif(skb));
2026 	if (sk) {
2027 		skb->sk = sk;
2028 		skb->destructor = sock_edemux;
2029 		if (sk_fullsock(sk)) {
2030 			struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
2031 
2032 			if (dst)
2033 				dst = dst_check(dst, sk->sk_rx_dst_cookie);
2034 			if (dst &&
2035 			    sk->sk_rx_dst_ifindex == skb->skb_iif)
2036 				skb_dst_set_noref(skb, dst);
2037 		}
2038 	}
2039 }
2040 
2041 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
2042 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
2043 	.twsk_destructor = tcp_twsk_destructor,
2044 };
2045 
2046 INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
2047 {
2048 	__tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr);
2049 }
2050 
2051 const struct inet_connection_sock_af_ops ipv6_specific = {
2052 	.queue_xmit	   = inet6_csk_xmit,
2053 	.send_check	   = tcp_v6_send_check,
2054 	.rebuild_header	   = inet6_sk_rebuild_header,
2055 	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
2056 	.conn_request	   = tcp_v6_conn_request,
2057 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
2058 	.net_header_len	   = sizeof(struct ipv6hdr),
2059 	.setsockopt	   = ipv6_setsockopt,
2060 	.getsockopt	   = ipv6_getsockopt,
2061 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
2062 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
2063 	.mtu_reduced	   = tcp_v6_mtu_reduced,
2064 };
2065 
2066 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2067 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
2068 #ifdef CONFIG_TCP_MD5SIG
2069 	.md5_lookup	=	tcp_v6_md5_lookup,
2070 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
2071 	.md5_parse	=	tcp_v6_parse_md5_keys,
2072 #endif
2073 #ifdef CONFIG_TCP_AO
2074 	.ao_lookup	=	tcp_v6_ao_lookup,
2075 	.calc_ao_hash	=	tcp_v6_ao_hash_skb,
2076 	.ao_parse	=	tcp_v6_parse_ao,
2077 	.ao_calc_key_sk	=	tcp_v6_ao_calc_key_sk,
2078 #endif
2079 };
2080 #endif
2081 
2082 /*
2083  *	TCP over IPv4 via INET6 API
2084  */
2085 static const struct inet_connection_sock_af_ops ipv6_mapped = {
2086 	.queue_xmit	   = ip_queue_xmit,
2087 	.send_check	   = tcp_v4_send_check,
2088 	.rebuild_header	   = inet_sk_rebuild_header,
2089 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2090 	.conn_request	   = tcp_v6_conn_request,
2091 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
2092 	.net_header_len	   = sizeof(struct iphdr),
2093 	.setsockopt	   = ipv6_setsockopt,
2094 	.getsockopt	   = ipv6_getsockopt,
2095 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
2096 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
2097 	.mtu_reduced	   = tcp_v4_mtu_reduced,
2098 };
2099 
2100 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2101 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
2102 #ifdef CONFIG_TCP_MD5SIG
2103 	.md5_lookup	=	tcp_v4_md5_lookup,
2104 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
2105 	.md5_parse	=	tcp_v6_parse_md5_keys,
2106 #endif
2107 #ifdef CONFIG_TCP_AO
2108 	.ao_lookup	=	tcp_v6_ao_lookup,
2109 	.calc_ao_hash	=	tcp_v4_ao_hash_skb,
2110 	.ao_parse	=	tcp_v6_parse_ao,
2111 	.ao_calc_key_sk	=	tcp_v4_ao_calc_key_sk,
2112 #endif
2113 };
2114 #endif
2115 
2116 /* NOTE: A lot of things set to zero explicitly by call to
2117  *       sk_alloc() so need not be done here.
2118  */
2119 static int tcp_v6_init_sock(struct sock *sk)
2120 {
2121 	struct inet_connection_sock *icsk = inet_csk(sk);
2122 
2123 	tcp_init_sock(sk);
2124 
2125 	icsk->icsk_af_ops = &ipv6_specific;
2126 
2127 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2128 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
2129 #endif
2130 
2131 	return 0;
2132 }
2133 
2134 #ifdef CONFIG_PROC_FS
2135 /* Proc filesystem TCPv6 sock list dumping. */
2136 static void get_openreq6(struct seq_file *seq,
2137 			 const struct request_sock *req, int i)
2138 {
2139 	long ttd = req->rsk_timer.expires - jiffies;
2140 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
2141 	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
2142 
2143 	if (ttd < 0)
2144 		ttd = 0;
2145 
2146 	seq_printf(seq,
2147 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2148 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
2149 		   i,
2150 		   src->s6_addr32[0], src->s6_addr32[1],
2151 		   src->s6_addr32[2], src->s6_addr32[3],
2152 		   inet_rsk(req)->ir_num,
2153 		   dest->s6_addr32[0], dest->s6_addr32[1],
2154 		   dest->s6_addr32[2], dest->s6_addr32[3],
2155 		   ntohs(inet_rsk(req)->ir_rmt_port),
2156 		   TCP_SYN_RECV,
2157 		   0, 0, /* could print option size, but that is af dependent. */
2158 		   1,   /* timers active (only the expire timer) */
2159 		   jiffies_to_clock_t(ttd),
2160 		   req->num_timeout,
2161 		   from_kuid_munged(seq_user_ns(seq),
2162 				    sock_i_uid(req->rsk_listener)),
2163 		   0,  /* non standard timer */
2164 		   0, /* open_requests have no inode */
2165 		   0, req);
2166 }
2167 
2168 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2169 {
2170 	const struct in6_addr *dest, *src;
2171 	__u16 destp, srcp;
2172 	int timer_active;
2173 	unsigned long timer_expires;
2174 	const struct inet_sock *inet = inet_sk(sp);
2175 	const struct tcp_sock *tp = tcp_sk(sp);
2176 	const struct inet_connection_sock *icsk = inet_csk(sp);
2177 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2178 	int rx_queue;
2179 	int state;
2180 
2181 	dest  = &sp->sk_v6_daddr;
2182 	src   = &sp->sk_v6_rcv_saddr;
2183 	destp = ntohs(inet->inet_dport);
2184 	srcp  = ntohs(inet->inet_sport);
2185 
2186 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2187 	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2188 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2189 		timer_active	= 1;
2190 		timer_expires	= icsk->icsk_timeout;
2191 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2192 		timer_active	= 4;
2193 		timer_expires	= icsk->icsk_timeout;
2194 	} else if (timer_pending(&sp->sk_timer)) {
2195 		timer_active	= 2;
2196 		timer_expires	= sp->sk_timer.expires;
2197 	} else {
2198 		timer_active	= 0;
2199 		timer_expires = jiffies;
2200 	}
2201 
2202 	state = inet_sk_state_load(sp);
2203 	if (state == TCP_LISTEN)
2204 		rx_queue = READ_ONCE(sp->sk_ack_backlog);
2205 	else
2206 		/* Because we don't lock the socket,
2207 		 * we might find a transient negative value.
2208 		 */
2209 		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2210 				      READ_ONCE(tp->copied_seq), 0);
2211 
2212 	seq_printf(seq,
2213 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2214 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
2215 		   i,
2216 		   src->s6_addr32[0], src->s6_addr32[1],
2217 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2218 		   dest->s6_addr32[0], dest->s6_addr32[1],
2219 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2220 		   state,
2221 		   READ_ONCE(tp->write_seq) - tp->snd_una,
2222 		   rx_queue,
2223 		   timer_active,
2224 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
2225 		   icsk->icsk_retransmits,
2226 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
2227 		   icsk->icsk_probes_out,
2228 		   sock_i_ino(sp),
2229 		   refcount_read(&sp->sk_refcnt), sp,
2230 		   jiffies_to_clock_t(icsk->icsk_rto),
2231 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
2232 		   (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
2233 		   tcp_snd_cwnd(tp),
2234 		   state == TCP_LISTEN ?
2235 			fastopenq->max_qlen :
2236 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
2237 		   );
2238 }
2239 
2240 static void get_timewait6_sock(struct seq_file *seq,
2241 			       struct inet_timewait_sock *tw, int i)
2242 {
2243 	long delta = tw->tw_timer.expires - jiffies;
2244 	const struct in6_addr *dest, *src;
2245 	__u16 destp, srcp;
2246 
2247 	dest = &tw->tw_v6_daddr;
2248 	src  = &tw->tw_v6_rcv_saddr;
2249 	destp = ntohs(tw->tw_dport);
2250 	srcp  = ntohs(tw->tw_sport);
2251 
2252 	seq_printf(seq,
2253 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2254 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2255 		   i,
2256 		   src->s6_addr32[0], src->s6_addr32[1],
2257 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2258 		   dest->s6_addr32[0], dest->s6_addr32[1],
2259 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2260 		   READ_ONCE(tw->tw_substate), 0, 0,
2261 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2262 		   refcount_read(&tw->tw_refcnt), tw);
2263 }
2264 
2265 static int tcp6_seq_show(struct seq_file *seq, void *v)
2266 {
2267 	struct tcp_iter_state *st;
2268 	struct sock *sk = v;
2269 
2270 	if (v == SEQ_START_TOKEN) {
2271 		seq_puts(seq,
2272 			 "  sl  "
2273 			 "local_address                         "
2274 			 "remote_address                        "
2275 			 "st tx_queue rx_queue tr tm->when retrnsmt"
2276 			 "   uid  timeout inode\n");
2277 		goto out;
2278 	}
2279 	st = seq->private;
2280 
2281 	if (sk->sk_state == TCP_TIME_WAIT)
2282 		get_timewait6_sock(seq, v, st->num);
2283 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2284 		get_openreq6(seq, v, st->num);
2285 	else
2286 		get_tcp6_sock(seq, v, st->num);
2287 out:
2288 	return 0;
2289 }
2290 
2291 static const struct seq_operations tcp6_seq_ops = {
2292 	.show		= tcp6_seq_show,
2293 	.start		= tcp_seq_start,
2294 	.next		= tcp_seq_next,
2295 	.stop		= tcp_seq_stop,
2296 };
2297 
2298 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2299 	.family		= AF_INET6,
2300 };
2301 
2302 int __net_init tcp6_proc_init(struct net *net)
2303 {
2304 	if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
2305 			sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
2306 		return -ENOMEM;
2307 	return 0;
2308 }
2309 
2310 void tcp6_proc_exit(struct net *net)
2311 {
2312 	remove_proc_entry("tcp6", net->proc_net);
2313 }
2314 #endif
2315 
2316 struct proto tcpv6_prot = {
2317 	.name			= "TCPv6",
2318 	.owner			= THIS_MODULE,
2319 	.close			= tcp_close,
2320 	.pre_connect		= tcp_v6_pre_connect,
2321 	.connect		= tcp_v6_connect,
2322 	.disconnect		= tcp_disconnect,
2323 	.accept			= inet_csk_accept,
2324 	.ioctl			= tcp_ioctl,
2325 	.init			= tcp_v6_init_sock,
2326 	.destroy		= tcp_v4_destroy_sock,
2327 	.shutdown		= tcp_shutdown,
2328 	.setsockopt		= tcp_setsockopt,
2329 	.getsockopt		= tcp_getsockopt,
2330 	.bpf_bypass_getsockopt	= tcp_bpf_bypass_getsockopt,
2331 	.keepalive		= tcp_set_keepalive,
2332 	.recvmsg		= tcp_recvmsg,
2333 	.sendmsg		= tcp_sendmsg,
2334 	.splice_eof		= tcp_splice_eof,
2335 	.backlog_rcv		= tcp_v6_do_rcv,
2336 	.release_cb		= tcp_release_cb,
2337 	.hash			= inet6_hash,
2338 	.unhash			= inet_unhash,
2339 	.get_port		= inet_csk_get_port,
2340 	.put_port		= inet_put_port,
2341 #ifdef CONFIG_BPF_SYSCALL
2342 	.psock_update_sk_prot	= tcp_bpf_update_proto,
2343 #endif
2344 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2345 	.leave_memory_pressure	= tcp_leave_memory_pressure,
2346 	.stream_memory_free	= tcp_stream_memory_free,
2347 	.sockets_allocated	= &tcp_sockets_allocated,
2348 
2349 	.memory_allocated	= &tcp_memory_allocated,
2350 	.per_cpu_fw_alloc	= &tcp_memory_per_cpu_fw_alloc,
2351 
2352 	.memory_pressure	= &tcp_memory_pressure,
2353 	.orphan_count		= &tcp_orphan_count,
2354 	.sysctl_mem		= sysctl_tcp_mem,
2355 	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
2356 	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
2357 	.max_header		= MAX_TCP_HEADER,
2358 	.obj_size		= sizeof(struct tcp6_sock),
2359 	.ipv6_pinfo_offset = offsetof(struct tcp6_sock, inet6),
2360 	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2361 	.twsk_prot		= &tcp6_timewait_sock_ops,
2362 	.rsk_prot		= &tcp6_request_sock_ops,
2363 	.h.hashinfo		= NULL,
2364 	.no_autobind		= true,
2365 	.diag_destroy		= tcp_abort,
2366 };
2367 EXPORT_SYMBOL_GPL(tcpv6_prot);
2368 
2369 
2370 static struct inet_protosw tcpv6_protosw = {
2371 	.type		=	SOCK_STREAM,
2372 	.protocol	=	IPPROTO_TCP,
2373 	.prot		=	&tcpv6_prot,
2374 	.ops		=	&inet6_stream_ops,
2375 	.flags		=	INET_PROTOSW_PERMANENT |
2376 				INET_PROTOSW_ICSK,
2377 };
2378 
2379 static int __net_init tcpv6_net_init(struct net *net)
2380 {
2381 	int res;
2382 
2383 	res = inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2384 				   SOCK_RAW, IPPROTO_TCP, net);
2385 	if (!res)
2386 		net->ipv6.tcp_sk->sk_clockid = CLOCK_MONOTONIC;
2387 
2388 	return res;
2389 }
2390 
2391 static void __net_exit tcpv6_net_exit(struct net *net)
2392 {
2393 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2394 }
2395 
2396 static struct pernet_operations tcpv6_net_ops = {
2397 	.init	    = tcpv6_net_init,
2398 	.exit	    = tcpv6_net_exit,
2399 };
2400 
2401 int __init tcpv6_init(void)
2402 {
2403 	int ret;
2404 
2405 	net_hotdata.tcpv6_protocol = (struct inet6_protocol) {
2406 		.handler     = tcp_v6_rcv,
2407 		.err_handler = tcp_v6_err,
2408 		.flags	     = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
2409 	};
2410 	ret = inet6_add_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
2411 	if (ret)
2412 		goto out;
2413 
2414 	/* register inet6 protocol */
2415 	ret = inet6_register_protosw(&tcpv6_protosw);
2416 	if (ret)
2417 		goto out_tcpv6_protocol;
2418 
2419 	ret = register_pernet_subsys(&tcpv6_net_ops);
2420 	if (ret)
2421 		goto out_tcpv6_protosw;
2422 
2423 	ret = mptcpv6_init();
2424 	if (ret)
2425 		goto out_tcpv6_pernet_subsys;
2426 
2427 out:
2428 	return ret;
2429 
2430 out_tcpv6_pernet_subsys:
2431 	unregister_pernet_subsys(&tcpv6_net_ops);
2432 out_tcpv6_protosw:
2433 	inet6_unregister_protosw(&tcpv6_protosw);
2434 out_tcpv6_protocol:
2435 	inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
2436 	goto out;
2437 }
2438 
2439 void tcpv6_exit(void)
2440 {
2441 	unregister_pernet_subsys(&tcpv6_net_ops);
2442 	inet6_unregister_protosw(&tcpv6_protosw);
2443 	inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
2444 }
2445