1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPV6 GSO/GRO offload support
4  *	Linux INET6 implementation
5  *
6  *      TCPv6 GSO/GRO support
7  */
8 #include <linux/indirect_call_wrapper.h>
9 #include <linux/skbuff.h>
10 #include <net/inet6_hashtables.h>
11 #include <net/gro.h>
12 #include <net/protocol.h>
13 #include <net/tcp.h>
14 #include <net/ip6_checksum.h>
15 #include "ip6_offload.h"
16 
tcp6_check_fraglist_gro(struct list_head * head,struct sk_buff * skb,struct tcphdr * th)17 static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
18 				    struct tcphdr *th)
19 {
20 #if IS_ENABLED(CONFIG_IPV6)
21 	const struct ipv6hdr *hdr;
22 	struct sk_buff *p;
23 	struct sock *sk;
24 	struct net *net;
25 	int iif, sdif;
26 
27 	if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
28 		return;
29 
30 	p = tcp_gro_lookup(head, th);
31 	if (p) {
32 		NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
33 		return;
34 	}
35 
36 	inet6_get_iif_sdif(skb, &iif, &sdif);
37 	hdr = skb_gro_network_header(skb);
38 	net = dev_net(skb->dev);
39 	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
40 					&hdr->saddr, th->source,
41 					&hdr->daddr, ntohs(th->dest),
42 					iif, sdif);
43 	NAPI_GRO_CB(skb)->is_flist = !sk;
44 	if (sk)
45 		sock_put(sk);
46 #endif /* IS_ENABLED(CONFIG_IPV6) */
47 }
48 
49 INDIRECT_CALLABLE_SCOPE
tcp6_gro_receive(struct list_head * head,struct sk_buff * skb)50 struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
51 {
52 	struct tcphdr *th;
53 
54 	/* Don't bother verifying checksum if we're going to flush anyway. */
55 	if (!NAPI_GRO_CB(skb)->flush &&
56 	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
57 				      ip6_gro_compute_pseudo))
58 		goto flush;
59 
60 	th = tcp_gro_pull_header(skb);
61 	if (!th)
62 		goto flush;
63 
64 	tcp6_check_fraglist_gro(head, skb, th);
65 
66 	return tcp_gro_receive(head, skb, th);
67 
68 flush:
69 	NAPI_GRO_CB(skb)->flush = 1;
70 	return NULL;
71 }
72 
tcp6_gro_complete(struct sk_buff * skb,int thoff)73 INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
74 {
75 	const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
76 	const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
77 	struct tcphdr *th = tcp_hdr(skb);
78 
79 	if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
80 		skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV6;
81 		skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
82 
83 		__skb_incr_checksum_unnecessary(skb);
84 
85 		return 0;
86 	}
87 
88 	th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
89 				  &iph->daddr, 0);
90 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
91 
92 	tcp_gro_complete(skb);
93 	return 0;
94 }
95 
__tcpv6_gso_segment_csum(struct sk_buff * seg,__be16 * oldport,__be16 newport)96 static void __tcpv6_gso_segment_csum(struct sk_buff *seg,
97 				     __be16 *oldport, __be16 newport)
98 {
99 	struct tcphdr *th;
100 
101 	if (*oldport == newport)
102 		return;
103 
104 	th = tcp_hdr(seg);
105 	inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
106 	*oldport = newport;
107 }
108 
__tcpv6_gso_segment_list_csum(struct sk_buff * segs)109 static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs)
110 {
111 	const struct tcphdr *th;
112 	const struct ipv6hdr *iph;
113 	struct sk_buff *seg;
114 	struct tcphdr *th2;
115 	struct ipv6hdr *iph2;
116 
117 	seg = segs;
118 	th = tcp_hdr(seg);
119 	iph = ipv6_hdr(seg);
120 	th2 = tcp_hdr(seg->next);
121 	iph2 = ipv6_hdr(seg->next);
122 
123 	if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
124 	    ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
125 	    ipv6_addr_equal(&iph->daddr, &iph2->daddr))
126 		return segs;
127 
128 	while ((seg = seg->next)) {
129 		th2 = tcp_hdr(seg);
130 		iph2 = ipv6_hdr(seg);
131 
132 		iph2->saddr = iph->saddr;
133 		iph2->daddr = iph->daddr;
134 		__tcpv6_gso_segment_csum(seg, &th2->source, th->source);
135 		__tcpv6_gso_segment_csum(seg, &th2->dest, th->dest);
136 	}
137 
138 	return segs;
139 }
140 
__tcp6_gso_segment_list(struct sk_buff * skb,netdev_features_t features)141 static struct sk_buff *__tcp6_gso_segment_list(struct sk_buff *skb,
142 					      netdev_features_t features)
143 {
144 	skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
145 	if (IS_ERR(skb))
146 		return skb;
147 
148 	return __tcpv6_gso_segment_list_csum(skb);
149 }
150 
tcp6_gso_segment(struct sk_buff * skb,netdev_features_t features)151 static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
152 					netdev_features_t features)
153 {
154 	struct tcphdr *th;
155 
156 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
157 		return ERR_PTR(-EINVAL);
158 
159 	if (!pskb_may_pull(skb, sizeof(*th)))
160 		return ERR_PTR(-EINVAL);
161 
162 	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
163 		struct tcphdr *th = tcp_hdr(skb);
164 
165 		if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
166 			return __tcp6_gso_segment_list(skb, features);
167 
168 		skb->ip_summed = CHECKSUM_NONE;
169 	}
170 
171 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
172 		const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
173 		struct tcphdr *th = tcp_hdr(skb);
174 
175 		/* Set up pseudo header, usually expect stack to have done
176 		 * this.
177 		 */
178 
179 		th->check = 0;
180 		skb->ip_summed = CHECKSUM_PARTIAL;
181 		__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
182 	}
183 
184 	return tcp_gso_segment(skb, features);
185 }
186 
tcpv6_offload_init(void)187 int __init tcpv6_offload_init(void)
188 {
189 	net_hotdata.tcpv6_offload = (struct net_offload) {
190 		.callbacks = {
191 			.gso_segment	=	tcp6_gso_segment,
192 			.gro_receive	=	tcp6_gro_receive,
193 			.gro_complete	=	tcp6_gro_complete,
194 		},
195 	};
196 	return inet6_add_offload(&net_hotdata.tcpv6_offload, IPPROTO_TCP);
197 }
198