1  /* SPDX-License-Identifier: GPL-2.0 */
2  /*
3   * Multipath TCP
4   *
5   * Copyright (c) 2017 - 2019, Intel Corporation.
6   */
7  
8  #ifndef __NET_MPTCP_H
9  #define __NET_MPTCP_H
10  
11  #include <linux/skbuff.h>
12  #include <linux/tcp.h>
13  #include <linux/types.h>
14  
15  struct mptcp_info;
16  struct mptcp_sock;
17  struct seq_file;
18  
19  /* MPTCP sk_buff extension data */
20  struct mptcp_ext {
21  	union {
22  		u64	data_ack;
23  		u32	data_ack32;
24  	};
25  	u64		data_seq;
26  	u32		subflow_seq;
27  	u16		data_len;
28  	__sum16		csum;
29  	u8		use_map:1,
30  			dsn64:1,
31  			data_fin:1,
32  			use_ack:1,
33  			ack64:1,
34  			mpc_map:1,
35  			frozen:1,
36  			reset_transient:1;
37  	u8		reset_reason:4,
38  			csum_reqd:1,
39  			infinite_map:1;
40  };
41  
42  #define MPTCPOPT_HMAC_LEN	20
43  #define MPTCP_RM_IDS_MAX	8
44  
45  struct mptcp_rm_list {
46  	u8 ids[MPTCP_RM_IDS_MAX];
47  	u8 nr;
48  };
49  
50  struct mptcp_addr_info {
51  	u8			id;
52  	sa_family_t		family;
53  	__be16			port;
54  	union {
55  		struct in_addr	addr;
56  #if IS_ENABLED(CONFIG_MPTCP_IPV6)
57  		struct in6_addr	addr6;
58  #endif
59  	};
60  };
61  
62  struct mptcp_out_options {
63  #if IS_ENABLED(CONFIG_MPTCP)
64  	u16 suboptions;
65  	struct mptcp_rm_list rm_list;
66  	u8 join_id;
67  	u8 backup;
68  	u8 reset_reason:4,
69  	   reset_transient:1,
70  	   csum_reqd:1,
71  	   allow_join_id0:1;
72  	union {
73  		struct {
74  			u64 sndr_key;
75  			u64 rcvr_key;
76  			u64 data_seq;
77  			u32 subflow_seq;
78  			u16 data_len;
79  			__sum16 csum;
80  		};
81  		struct {
82  			struct mptcp_addr_info addr;
83  			u64 ahmac;
84  		};
85  		struct {
86  			struct mptcp_ext ext_copy;
87  			u64 fail_seq;
88  		};
89  		struct {
90  			u32 nonce;
91  			u32 token;
92  			u64 thmac;
93  			u8 hmac[MPTCPOPT_HMAC_LEN];
94  		};
95  	};
96  #endif
97  };
98  
99  #define MPTCP_SCHED_NAME_MAX	16
100  #define MPTCP_SCHED_MAX		128
101  #define MPTCP_SCHED_BUF_MAX	(MPTCP_SCHED_NAME_MAX * MPTCP_SCHED_MAX)
102  
103  #define MPTCP_SUBFLOWS_MAX	8
104  
105  struct mptcp_sched_data {
106  	bool	reinject;
107  	u8	subflows;
108  	struct mptcp_subflow_context *contexts[MPTCP_SUBFLOWS_MAX];
109  };
110  
111  struct mptcp_sched_ops {
112  	int (*get_subflow)(struct mptcp_sock *msk,
113  			   struct mptcp_sched_data *data);
114  
115  	char			name[MPTCP_SCHED_NAME_MAX];
116  	struct module		*owner;
117  	struct list_head	list;
118  
119  	void (*init)(struct mptcp_sock *msk);
120  	void (*release)(struct mptcp_sock *msk);
121  } ____cacheline_aligned_in_smp;
122  
123  #ifdef CONFIG_MPTCP
124  void mptcp_init(void);
125  
sk_is_mptcp(const struct sock * sk)126  static inline bool sk_is_mptcp(const struct sock *sk)
127  {
128  	return tcp_sk(sk)->is_mptcp;
129  }
130  
rsk_is_mptcp(const struct request_sock * req)131  static inline bool rsk_is_mptcp(const struct request_sock *req)
132  {
133  	return tcp_rsk(req)->is_mptcp;
134  }
135  
rsk_drop_req(const struct request_sock * req)136  static inline bool rsk_drop_req(const struct request_sock *req)
137  {
138  	return tcp_rsk(req)->is_mptcp && tcp_rsk(req)->drop_req;
139  }
140  
141  void mptcp_space(const struct sock *ssk, int *space, int *full_space);
142  bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
143  		       unsigned int *size, struct mptcp_out_options *opts);
144  bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
145  			  struct mptcp_out_options *opts);
146  bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
147  			       unsigned int *size, unsigned int remaining,
148  			       struct mptcp_out_options *opts);
149  bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
150  
151  void mptcp_write_options(struct tcphdr *th, __be32 *ptr, struct tcp_sock *tp,
152  			 struct mptcp_out_options *opts);
153  
154  void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info);
155  
156  /* move the skb extension owership, with the assumption that 'to' is
157   * newly allocated
158   */
mptcp_skb_ext_move(struct sk_buff * to,struct sk_buff * from)159  static inline void mptcp_skb_ext_move(struct sk_buff *to,
160  				      struct sk_buff *from)
161  {
162  	if (!skb_ext_exist(from, SKB_EXT_MPTCP))
163  		return;
164  
165  	if (WARN_ON_ONCE(to->active_extensions))
166  		skb_ext_put(to);
167  
168  	to->active_extensions = from->active_extensions;
169  	to->extensions = from->extensions;
170  	from->active_extensions = 0;
171  }
172  
mptcp_skb_ext_copy(struct sk_buff * to,struct sk_buff * from)173  static inline void mptcp_skb_ext_copy(struct sk_buff *to,
174  				      struct sk_buff *from)
175  {
176  	struct mptcp_ext *from_ext;
177  
178  	from_ext = skb_ext_find(from, SKB_EXT_MPTCP);
179  	if (!from_ext)
180  		return;
181  
182  	from_ext->frozen = 1;
183  	skb_ext_copy(to, from);
184  }
185  
mptcp_ext_matches(const struct mptcp_ext * to_ext,const struct mptcp_ext * from_ext)186  static inline bool mptcp_ext_matches(const struct mptcp_ext *to_ext,
187  				     const struct mptcp_ext *from_ext)
188  {
189  	/* MPTCP always clears the ext when adding it to the skb, so
190  	 * holes do not bother us here
191  	 */
192  	return !from_ext ||
193  	       (to_ext && from_ext &&
194  	        !memcmp(from_ext, to_ext, sizeof(struct mptcp_ext)));
195  }
196  
197  /* check if skbs can be collapsed.
198   * MPTCP collapse is allowed if neither @to or @from carry an mptcp data
199   * mapping, or if the extension of @to is the same as @from.
200   * Collapsing is not possible if @to lacks an extension, but @from carries one.
201   */
mptcp_skb_can_collapse(const struct sk_buff * to,const struct sk_buff * from)202  static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
203  					  const struct sk_buff *from)
204  {
205  	return mptcp_ext_matches(skb_ext_find(to, SKB_EXT_MPTCP),
206  				 skb_ext_find(from, SKB_EXT_MPTCP));
207  }
208  
209  void mptcp_seq_show(struct seq_file *seq);
210  int mptcp_subflow_init_cookie_req(struct request_sock *req,
211  				  const struct sock *sk_listener,
212  				  struct sk_buff *skb);
213  struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
214  					       struct sock *sk_listener,
215  					       bool attach_listener);
216  
217  __be32 mptcp_get_reset_option(const struct sk_buff *skb);
218  
mptcp_reset_option(const struct sk_buff * skb)219  static inline __be32 mptcp_reset_option(const struct sk_buff *skb)
220  {
221  	if (skb_ext_exist(skb, SKB_EXT_MPTCP))
222  		return mptcp_get_reset_option(skb);
223  
224  	return htonl(0u);
225  }
226  
227  void mptcp_active_detect_blackhole(struct sock *sk, bool expired);
228  #else
229  
mptcp_init(void)230  static inline void mptcp_init(void)
231  {
232  }
233  
sk_is_mptcp(const struct sock * sk)234  static inline bool sk_is_mptcp(const struct sock *sk)
235  {
236  	return false;
237  }
238  
rsk_is_mptcp(const struct request_sock * req)239  static inline bool rsk_is_mptcp(const struct request_sock *req)
240  {
241  	return false;
242  }
243  
rsk_drop_req(const struct request_sock * req)244  static inline bool rsk_drop_req(const struct request_sock *req)
245  {
246  	return false;
247  }
248  
mptcp_syn_options(struct sock * sk,const struct sk_buff * skb,unsigned int * size,struct mptcp_out_options * opts)249  static inline bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
250  				     unsigned int *size,
251  				     struct mptcp_out_options *opts)
252  {
253  	return false;
254  }
255  
mptcp_synack_options(const struct request_sock * req,unsigned int * size,struct mptcp_out_options * opts)256  static inline bool mptcp_synack_options(const struct request_sock *req,
257  					unsigned int *size,
258  					struct mptcp_out_options *opts)
259  {
260  	return false;
261  }
262  
mptcp_established_options(struct sock * sk,struct sk_buff * skb,unsigned int * size,unsigned int remaining,struct mptcp_out_options * opts)263  static inline bool mptcp_established_options(struct sock *sk,
264  					     struct sk_buff *skb,
265  					     unsigned int *size,
266  					     unsigned int remaining,
267  					     struct mptcp_out_options *opts)
268  {
269  	return false;
270  }
271  
mptcp_incoming_options(struct sock * sk,struct sk_buff * skb)272  static inline bool mptcp_incoming_options(struct sock *sk,
273  					  struct sk_buff *skb)
274  {
275  	return true;
276  }
277  
mptcp_skb_ext_move(struct sk_buff * to,const struct sk_buff * from)278  static inline void mptcp_skb_ext_move(struct sk_buff *to,
279  				      const struct sk_buff *from)
280  {
281  }
282  
mptcp_skb_ext_copy(struct sk_buff * to,struct sk_buff * from)283  static inline void mptcp_skb_ext_copy(struct sk_buff *to,
284  				      struct sk_buff *from)
285  {
286  }
287  
mptcp_skb_can_collapse(const struct sk_buff * to,const struct sk_buff * from)288  static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
289  					  const struct sk_buff *from)
290  {
291  	return true;
292  }
293  
mptcp_space(const struct sock * ssk,int * s,int * fs)294  static inline void mptcp_space(const struct sock *ssk, int *s, int *fs) { }
mptcp_seq_show(struct seq_file * seq)295  static inline void mptcp_seq_show(struct seq_file *seq) { }
296  
mptcp_subflow_init_cookie_req(struct request_sock * req,const struct sock * sk_listener,struct sk_buff * skb)297  static inline int mptcp_subflow_init_cookie_req(struct request_sock *req,
298  						const struct sock *sk_listener,
299  						struct sk_buff *skb)
300  {
301  	return 0; /* TCP fallback */
302  }
303  
mptcp_subflow_reqsk_alloc(const struct request_sock_ops * ops,struct sock * sk_listener,bool attach_listener)304  static inline struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
305  							     struct sock *sk_listener,
306  							     bool attach_listener)
307  {
308  	return NULL;
309  }
310  
mptcp_reset_option(const struct sk_buff * skb)311  static inline __be32 mptcp_reset_option(const struct sk_buff *skb)  { return htonl(0u); }
312  
mptcp_active_detect_blackhole(struct sock * sk,bool expired)313  static inline void mptcp_active_detect_blackhole(struct sock *sk, bool expired) { }
314  #endif /* CONFIG_MPTCP */
315  
316  #if IS_ENABLED(CONFIG_MPTCP_IPV6)
317  int mptcpv6_init(void);
318  void mptcpv6_handle_mapped(struct sock *sk, bool mapped);
319  #elif IS_ENABLED(CONFIG_IPV6)
mptcpv6_init(void)320  static inline int mptcpv6_init(void) { return 0; }
mptcpv6_handle_mapped(struct sock * sk,bool mapped)321  static inline void mptcpv6_handle_mapped(struct sock *sk, bool mapped) { }
322  #endif
323  
324  #if defined(CONFIG_MPTCP) && defined(CONFIG_BPF_SYSCALL)
325  struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk);
326  #else
bpf_mptcp_sock_from_subflow(struct sock * sk)327  static inline struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk) { return NULL; }
328  #endif
329  
330  #if !IS_ENABLED(CONFIG_MPTCP)
331  struct mptcp_sock { };
332  #endif
333  
334  #endif /* __NET_MPTCP_H */
335