1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/module.h>
3 #include <linux/errno.h>
4 #include <linux/socket.h>
5 #include <linux/kernel.h>
6 #include <net/dst_metadata.h>
7 #include <net/udp.h>
8 #include <net/udp_tunnel.h>
9 #include <net/inet_dscp.h>
10
udp_sock_create4(struct net * net,struct udp_port_cfg * cfg,struct socket ** sockp)11 int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
12 struct socket **sockp)
13 {
14 int err;
15 struct socket *sock = NULL;
16 struct sockaddr_in udp_addr;
17
18 err = sock_create_kern(net, AF_INET, SOCK_DGRAM, 0, &sock);
19 if (err < 0)
20 goto error;
21
22 if (cfg->bind_ifindex) {
23 err = sock_bindtoindex(sock->sk, cfg->bind_ifindex, true);
24 if (err < 0)
25 goto error;
26 }
27
28 udp_addr.sin_family = AF_INET;
29 udp_addr.sin_addr = cfg->local_ip;
30 udp_addr.sin_port = cfg->local_udp_port;
31 err = kernel_bind(sock, (struct sockaddr *)&udp_addr,
32 sizeof(udp_addr));
33 if (err < 0)
34 goto error;
35
36 if (cfg->peer_udp_port) {
37 udp_addr.sin_family = AF_INET;
38 udp_addr.sin_addr = cfg->peer_ip;
39 udp_addr.sin_port = cfg->peer_udp_port;
40 err = kernel_connect(sock, (struct sockaddr *)&udp_addr,
41 sizeof(udp_addr), 0);
42 if (err < 0)
43 goto error;
44 }
45
46 sock->sk->sk_no_check_tx = !cfg->use_udp_checksums;
47
48 *sockp = sock;
49 return 0;
50
51 error:
52 if (sock) {
53 kernel_sock_shutdown(sock, SHUT_RDWR);
54 sock_release(sock);
55 }
56 *sockp = NULL;
57 return err;
58 }
59 EXPORT_SYMBOL(udp_sock_create4);
60
setup_udp_tunnel_sock(struct net * net,struct socket * sock,struct udp_tunnel_sock_cfg * cfg)61 void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
62 struct udp_tunnel_sock_cfg *cfg)
63 {
64 struct sock *sk = sock->sk;
65
66 /* Disable multicast loopback */
67 inet_clear_bit(MC_LOOP, sk);
68
69 /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
70 inet_inc_convert_csum(sk);
71
72 rcu_assign_sk_user_data(sk, cfg->sk_user_data);
73
74 udp_sk(sk)->encap_type = cfg->encap_type;
75 udp_sk(sk)->encap_rcv = cfg->encap_rcv;
76 udp_sk(sk)->encap_err_rcv = cfg->encap_err_rcv;
77 udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup;
78 udp_sk(sk)->encap_destroy = cfg->encap_destroy;
79 udp_sk(sk)->gro_receive = cfg->gro_receive;
80 udp_sk(sk)->gro_complete = cfg->gro_complete;
81
82 udp_tunnel_encap_enable(sk);
83 }
84 EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
85
udp_tunnel_push_rx_port(struct net_device * dev,struct socket * sock,unsigned short type)86 void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
87 unsigned short type)
88 {
89 struct sock *sk = sock->sk;
90 struct udp_tunnel_info ti;
91
92 ti.type = type;
93 ti.sa_family = sk->sk_family;
94 ti.port = inet_sk(sk)->inet_sport;
95
96 udp_tunnel_nic_add_port(dev, &ti);
97 }
98 EXPORT_SYMBOL_GPL(udp_tunnel_push_rx_port);
99
udp_tunnel_drop_rx_port(struct net_device * dev,struct socket * sock,unsigned short type)100 void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
101 unsigned short type)
102 {
103 struct sock *sk = sock->sk;
104 struct udp_tunnel_info ti;
105
106 ti.type = type;
107 ti.sa_family = sk->sk_family;
108 ti.port = inet_sk(sk)->inet_sport;
109
110 udp_tunnel_nic_del_port(dev, &ti);
111 }
112 EXPORT_SYMBOL_GPL(udp_tunnel_drop_rx_port);
113
114 /* Notify netdevs that UDP port started listening */
udp_tunnel_notify_add_rx_port(struct socket * sock,unsigned short type)115 void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type)
116 {
117 struct sock *sk = sock->sk;
118 struct net *net = sock_net(sk);
119 struct udp_tunnel_info ti;
120 struct net_device *dev;
121
122 ti.type = type;
123 ti.sa_family = sk->sk_family;
124 ti.port = inet_sk(sk)->inet_sport;
125
126 rcu_read_lock();
127 for_each_netdev_rcu(net, dev) {
128 udp_tunnel_nic_add_port(dev, &ti);
129 }
130 rcu_read_unlock();
131 }
132 EXPORT_SYMBOL_GPL(udp_tunnel_notify_add_rx_port);
133
134 /* Notify netdevs that UDP port is no more listening */
udp_tunnel_notify_del_rx_port(struct socket * sock,unsigned short type)135 void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type)
136 {
137 struct sock *sk = sock->sk;
138 struct net *net = sock_net(sk);
139 struct udp_tunnel_info ti;
140 struct net_device *dev;
141
142 ti.type = type;
143 ti.sa_family = sk->sk_family;
144 ti.port = inet_sk(sk)->inet_sport;
145
146 rcu_read_lock();
147 for_each_netdev_rcu(net, dev) {
148 udp_tunnel_nic_del_port(dev, &ti);
149 }
150 rcu_read_unlock();
151 }
152 EXPORT_SYMBOL_GPL(udp_tunnel_notify_del_rx_port);
153
udp_tunnel_xmit_skb(struct rtable * rt,struct sock * sk,struct sk_buff * skb,__be32 src,__be32 dst,__u8 tos,__u8 ttl,__be16 df,__be16 src_port,__be16 dst_port,bool xnet,bool nocheck)154 void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
155 __be32 src, __be32 dst, __u8 tos, __u8 ttl,
156 __be16 df, __be16 src_port, __be16 dst_port,
157 bool xnet, bool nocheck)
158 {
159 struct udphdr *uh;
160
161 __skb_push(skb, sizeof(*uh));
162 skb_reset_transport_header(skb);
163 uh = udp_hdr(skb);
164
165 uh->dest = dst_port;
166 uh->source = src_port;
167 uh->len = htons(skb->len);
168
169 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
170
171 udp_set_csum(nocheck, skb, src, dst, skb->len);
172
173 iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet);
174 }
175 EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
176
udp_tunnel_sock_release(struct socket * sock)177 void udp_tunnel_sock_release(struct socket *sock)
178 {
179 rcu_assign_sk_user_data(sock->sk, NULL);
180 synchronize_rcu();
181 kernel_sock_shutdown(sock, SHUT_RDWR);
182 sock_release(sock);
183 }
184 EXPORT_SYMBOL_GPL(udp_tunnel_sock_release);
185
udp_tun_rx_dst(struct sk_buff * skb,unsigned short family,const unsigned long * flags,__be64 tunnel_id,int md_size)186 struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
187 const unsigned long *flags,
188 __be64 tunnel_id, int md_size)
189 {
190 struct metadata_dst *tun_dst;
191 struct ip_tunnel_info *info;
192
193 if (family == AF_INET)
194 tun_dst = ip_tun_rx_dst(skb, flags, tunnel_id, md_size);
195 else
196 tun_dst = ipv6_tun_rx_dst(skb, flags, tunnel_id, md_size);
197 if (!tun_dst)
198 return NULL;
199
200 info = &tun_dst->u.tun_info;
201 info->key.tp_src = udp_hdr(skb)->source;
202 info->key.tp_dst = udp_hdr(skb)->dest;
203 if (udp_hdr(skb)->check)
204 __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
205 return tun_dst;
206 }
207 EXPORT_SYMBOL_GPL(udp_tun_rx_dst);
208
udp_tunnel_dst_lookup(struct sk_buff * skb,struct net_device * dev,struct net * net,int oif,__be32 * saddr,const struct ip_tunnel_key * key,__be16 sport,__be16 dport,u8 tos,struct dst_cache * dst_cache)209 struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
210 struct net_device *dev,
211 struct net *net, int oif,
212 __be32 *saddr,
213 const struct ip_tunnel_key *key,
214 __be16 sport, __be16 dport, u8 tos,
215 struct dst_cache *dst_cache)
216 {
217 struct rtable *rt = NULL;
218 struct flowi4 fl4;
219
220 #ifdef CONFIG_DST_CACHE
221 if (dst_cache) {
222 rt = dst_cache_get_ip4(dst_cache, saddr);
223 if (rt)
224 return rt;
225 }
226 #endif
227
228 memset(&fl4, 0, sizeof(fl4));
229 fl4.flowi4_mark = skb->mark;
230 fl4.flowi4_proto = IPPROTO_UDP;
231 fl4.flowi4_oif = oif;
232 fl4.daddr = key->u.ipv4.dst;
233 fl4.saddr = key->u.ipv4.src;
234 fl4.fl4_dport = dport;
235 fl4.fl4_sport = sport;
236 fl4.flowi4_tos = tos & INET_DSCP_MASK;
237 fl4.flowi4_flags = key->flow_flags;
238
239 rt = ip_route_output_key(net, &fl4);
240 if (IS_ERR(rt)) {
241 netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
242 return ERR_PTR(-ENETUNREACH);
243 }
244 if (rt->dst.dev == dev) { /* is this necessary? */
245 netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
246 ip_rt_put(rt);
247 return ERR_PTR(-ELOOP);
248 }
249 #ifdef CONFIG_DST_CACHE
250 if (dst_cache)
251 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
252 #endif
253 *saddr = fl4.saddr;
254 return rt;
255 }
256 EXPORT_SYMBOL_GPL(udp_tunnel_dst_lookup);
257
258 MODULE_DESCRIPTION("IPv4 Foo over UDP tunnel driver");
259 MODULE_LICENSE("GPL");
260