1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_NETFILTER_H
3 #define __LINUX_NETFILTER_H
4
5 #include <linux/init.h>
6 #include <linux/skbuff.h>
7 #include <linux/net.h>
8 #include <linux/if.h>
9 #include <linux/in.h>
10 #include <linux/in6.h>
11 #include <linux/wait.h>
12 #include <linux/list.h>
13 #include <linux/static_key.h>
14 #include <linux/module.h>
15 #include <linux/netfilter_defs.h>
16 #include <linux/netdevice.h>
17 #include <linux/sockptr.h>
18 #include <net/net_namespace.h>
19
NF_DROP_GETERR(int verdict)20 static inline int NF_DROP_GETERR(int verdict)
21 {
22 return -(verdict >> NF_VERDICT_QBITS);
23 }
24
25 static __always_inline int
NF_DROP_REASON(struct sk_buff * skb,enum skb_drop_reason reason,u32 err)26 NF_DROP_REASON(struct sk_buff *skb, enum skb_drop_reason reason, u32 err)
27 {
28 BUILD_BUG_ON(err > 0xffff);
29
30 kfree_skb_reason(skb, reason);
31
32 return ((err << 16) | NF_STOLEN);
33 }
34
nf_inet_addr_cmp(const union nf_inet_addr * a1,const union nf_inet_addr * a2)35 static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
36 const union nf_inet_addr *a2)
37 {
38 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
39 const unsigned long *ul1 = (const unsigned long *)a1;
40 const unsigned long *ul2 = (const unsigned long *)a2;
41
42 return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL;
43 #else
44 return a1->all[0] == a2->all[0] &&
45 a1->all[1] == a2->all[1] &&
46 a1->all[2] == a2->all[2] &&
47 a1->all[3] == a2->all[3];
48 #endif
49 }
50
nf_inet_addr_mask(const union nf_inet_addr * a1,union nf_inet_addr * result,const union nf_inet_addr * mask)51 static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
52 union nf_inet_addr *result,
53 const union nf_inet_addr *mask)
54 {
55 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
56 const unsigned long *ua = (const unsigned long *)a1;
57 unsigned long *ur = (unsigned long *)result;
58 const unsigned long *um = (const unsigned long *)mask;
59
60 ur[0] = ua[0] & um[0];
61 ur[1] = ua[1] & um[1];
62 #else
63 result->all[0] = a1->all[0] & mask->all[0];
64 result->all[1] = a1->all[1] & mask->all[1];
65 result->all[2] = a1->all[2] & mask->all[2];
66 result->all[3] = a1->all[3] & mask->all[3];
67 #endif
68 }
69
70 int netfilter_init(void);
71
72 struct sk_buff;
73
74 struct nf_hook_ops;
75
76 struct sock;
77
78 struct nf_hook_state {
79 u8 hook;
80 u8 pf;
81 struct net_device *in;
82 struct net_device *out;
83 struct sock *sk;
84 struct net *net;
85 int (*okfn)(struct net *, struct sock *, struct sk_buff *);
86 };
87
88 typedef unsigned int nf_hookfn(void *priv,
89 struct sk_buff *skb,
90 const struct nf_hook_state *state);
91 enum nf_hook_ops_type {
92 NF_HOOK_OP_UNDEFINED,
93 NF_HOOK_OP_NF_TABLES,
94 NF_HOOK_OP_BPF,
95 };
96
97 struct nf_hook_ops {
98 /* User fills in from here down. */
99 nf_hookfn *hook;
100 struct net_device *dev;
101 void *priv;
102 u8 pf;
103 enum nf_hook_ops_type hook_ops_type:8;
104 unsigned int hooknum;
105 /* Hooks are ordered in ascending priority. */
106 int priority;
107 };
108
109 struct nf_hook_entry {
110 nf_hookfn *hook;
111 void *priv;
112 };
113
114 struct nf_hook_entries_rcu_head {
115 struct rcu_head head;
116 void *allocation;
117 };
118
119 struct nf_hook_entries {
120 u16 num_hook_entries;
121 /* padding */
122 struct nf_hook_entry hooks[];
123
124 /* trailer: pointers to original orig_ops of each hook,
125 * followed by rcu_head and scratch space used for freeing
126 * the structure via call_rcu.
127 *
128 * This is not part of struct nf_hook_entry since its only
129 * needed in slow path (hook register/unregister):
130 * const struct nf_hook_ops *orig_ops[]
131 *
132 * For the same reason, we store this at end -- its
133 * only needed when a hook is deleted, not during
134 * packet path processing:
135 * struct nf_hook_entries_rcu_head head
136 */
137 };
138
139 #ifdef CONFIG_NETFILTER
nf_hook_entries_get_hook_ops(const struct nf_hook_entries * e)140 static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e)
141 {
142 unsigned int n = e->num_hook_entries;
143 const void *hook_end;
144
145 hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */
146
147 return (struct nf_hook_ops **)hook_end;
148 }
149
150 static inline int
nf_hook_entry_hookfn(const struct nf_hook_entry * entry,struct sk_buff * skb,struct nf_hook_state * state)151 nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb,
152 struct nf_hook_state *state)
153 {
154 return entry->hook(entry->priv, skb, state);
155 }
156
nf_hook_state_init(struct nf_hook_state * p,unsigned int hook,u_int8_t pf,struct net_device * indev,struct net_device * outdev,struct sock * sk,struct net * net,int (* okfn)(struct net *,struct sock *,struct sk_buff *))157 static inline void nf_hook_state_init(struct nf_hook_state *p,
158 unsigned int hook,
159 u_int8_t pf,
160 struct net_device *indev,
161 struct net_device *outdev,
162 struct sock *sk,
163 struct net *net,
164 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
165 {
166 p->hook = hook;
167 p->pf = pf;
168 p->in = indev;
169 p->out = outdev;
170 p->sk = sk;
171 p->net = net;
172 p->okfn = okfn;
173 }
174
175
176
177 struct nf_sockopt_ops {
178 struct list_head list;
179
180 u_int8_t pf;
181
182 /* Non-inclusive ranges: use 0/0/NULL to never get called. */
183 int set_optmin;
184 int set_optmax;
185 int (*set)(struct sock *sk, int optval, sockptr_t arg,
186 unsigned int len);
187 int get_optmin;
188 int get_optmax;
189 int (*get)(struct sock *sk, int optval, void __user *user, int *len);
190 /* Use the module struct to lock set/get code in place */
191 struct module *owner;
192 };
193
194 /* Function to register/unregister hook points. */
195 int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
196 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
197 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
198 unsigned int n);
199 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
200 unsigned int n);
201
202 /* Functions to register get/setsockopt ranges (non-inclusive). You
203 need to check permissions yourself! */
204 int nf_register_sockopt(struct nf_sockopt_ops *reg);
205 void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
206
207 #ifdef CONFIG_JUMP_LABEL
208 extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
209 #endif
210
211 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
212 const struct nf_hook_entries *e, unsigned int i);
213
214 void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state,
215 const struct nf_hook_entries *e);
216 /**
217 * nf_hook - call a netfilter hook
218 *
219 * Returns 1 if the hook has allowed the packet to pass. The function
220 * okfn must be invoked by the caller in this case. Any other return
221 * value indicates the packet has been consumed by the hook.
222 */
nf_hook(u_int8_t pf,unsigned int hook,struct net * net,struct sock * sk,struct sk_buff * skb,struct net_device * indev,struct net_device * outdev,int (* okfn)(struct net *,struct sock *,struct sk_buff *))223 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
224 struct sock *sk, struct sk_buff *skb,
225 struct net_device *indev, struct net_device *outdev,
226 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
227 {
228 struct nf_hook_entries *hook_head = NULL;
229 int ret = 1;
230
231 #ifdef CONFIG_JUMP_LABEL
232 if (__builtin_constant_p(pf) &&
233 __builtin_constant_p(hook) &&
234 !static_key_false(&nf_hooks_needed[pf][hook]))
235 return 1;
236 #endif
237
238 rcu_read_lock();
239 switch (pf) {
240 case NFPROTO_IPV4:
241 hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
242 break;
243 case NFPROTO_IPV6:
244 hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
245 break;
246 case NFPROTO_ARP:
247 #ifdef CONFIG_NETFILTER_FAMILY_ARP
248 if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
249 break;
250 hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
251 #endif
252 break;
253 case NFPROTO_BRIDGE:
254 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
255 hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
256 #endif
257 break;
258 default:
259 WARN_ON_ONCE(1);
260 break;
261 }
262
263 if (hook_head) {
264 struct nf_hook_state state;
265
266 nf_hook_state_init(&state, hook, pf, indev, outdev,
267 sk, net, okfn);
268
269 ret = nf_hook_slow(skb, &state, hook_head, 0);
270 }
271 rcu_read_unlock();
272
273 return ret;
274 }
275
276 /* Activate hook; either okfn or kfree_skb called, unless a hook
277 returns NF_STOLEN (in which case, it's up to the hook to deal with
278 the consequences).
279
280 Returns -ERRNO if packet dropped. Zero means queued, stolen or
281 accepted.
282 */
283
284 /* RR:
285 > I don't want nf_hook to return anything because people might forget
286 > about async and trust the return value to mean "packet was ok".
287
288 AK:
289 Just document it clearly, then you can expect some sense from kernel
290 coders :)
291 */
292
293 static inline int
NF_HOOK_COND(uint8_t pf,unsigned int hook,struct net * net,struct sock * sk,struct sk_buff * skb,struct net_device * in,struct net_device * out,int (* okfn)(struct net *,struct sock *,struct sk_buff *),bool cond)294 NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
295 struct sk_buff *skb, struct net_device *in, struct net_device *out,
296 int (*okfn)(struct net *, struct sock *, struct sk_buff *),
297 bool cond)
298 {
299 int ret;
300
301 if (!cond ||
302 ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1))
303 ret = okfn(net, sk, skb);
304 return ret;
305 }
306
307 static inline int
NF_HOOK(uint8_t pf,unsigned int hook,struct net * net,struct sock * sk,struct sk_buff * skb,struct net_device * in,struct net_device * out,int (* okfn)(struct net *,struct sock *,struct sk_buff *))308 NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb,
309 struct net_device *in, struct net_device *out,
310 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
311 {
312 int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn);
313 if (ret == 1)
314 ret = okfn(net, sk, skb);
315 return ret;
316 }
317
318 static inline void
NF_HOOK_LIST(uint8_t pf,unsigned int hook,struct net * net,struct sock * sk,struct list_head * head,struct net_device * in,struct net_device * out,int (* okfn)(struct net *,struct sock *,struct sk_buff *))319 NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
320 struct list_head *head, struct net_device *in, struct net_device *out,
321 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
322 {
323 struct nf_hook_entries *hook_head = NULL;
324
325 #ifdef CONFIG_JUMP_LABEL
326 if (__builtin_constant_p(pf) &&
327 __builtin_constant_p(hook) &&
328 !static_key_false(&nf_hooks_needed[pf][hook]))
329 return;
330 #endif
331
332 rcu_read_lock();
333 switch (pf) {
334 case NFPROTO_IPV4:
335 hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
336 break;
337 case NFPROTO_IPV6:
338 hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
339 break;
340 default:
341 WARN_ON_ONCE(1);
342 break;
343 }
344
345 if (hook_head) {
346 struct nf_hook_state state;
347
348 nf_hook_state_init(&state, hook, pf, in, out, sk, net, okfn);
349
350 nf_hook_slow_list(head, &state, hook_head);
351 }
352 rcu_read_unlock();
353 }
354
355 /* Call setsockopt() */
356 int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, sockptr_t opt,
357 unsigned int len);
358 int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
359 int *len);
360
361 struct flowi;
362 struct nf_queue_entry;
363
364 __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
365 unsigned int dataoff, u_int8_t protocol,
366 unsigned short family);
367
368 __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
369 unsigned int dataoff, unsigned int len,
370 u_int8_t protocol, unsigned short family);
371 int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
372 bool strict, unsigned short family);
373
374 #include <net/flow.h>
375
376 struct nf_conn;
377 enum nf_nat_manip_type;
378 struct nlattr;
379
380 struct nf_nat_hook {
381 int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip,
382 const struct nlattr *attr);
383 void (*decode_session)(struct sk_buff *skb, struct flowi *fl);
384 void (*remove_nat_bysrc)(struct nf_conn *ct);
385 };
386
387 extern const struct nf_nat_hook __rcu *nf_nat_hook;
388
389 static inline void
nf_nat_decode_session(struct sk_buff * skb,struct flowi * fl,u_int8_t family)390 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
391 {
392 #if IS_ENABLED(CONFIG_NF_NAT)
393 const struct nf_nat_hook *nat_hook;
394
395 rcu_read_lock();
396 nat_hook = rcu_dereference(nf_nat_hook);
397 if (nat_hook && nat_hook->decode_session)
398 nat_hook->decode_session(skb, fl);
399 rcu_read_unlock();
400 #endif
401 }
402
403 #else /* !CONFIG_NETFILTER */
404 static inline int
NF_HOOK_COND(uint8_t pf,unsigned int hook,struct net * net,struct sock * sk,struct sk_buff * skb,struct net_device * in,struct net_device * out,int (* okfn)(struct net *,struct sock *,struct sk_buff *),bool cond)405 NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
406 struct sk_buff *skb, struct net_device *in, struct net_device *out,
407 int (*okfn)(struct net *, struct sock *, struct sk_buff *),
408 bool cond)
409 {
410 return okfn(net, sk, skb);
411 }
412
413 static inline int
NF_HOOK(uint8_t pf,unsigned int hook,struct net * net,struct sock * sk,struct sk_buff * skb,struct net_device * in,struct net_device * out,int (* okfn)(struct net *,struct sock *,struct sk_buff *))414 NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
415 struct sk_buff *skb, struct net_device *in, struct net_device *out,
416 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
417 {
418 return okfn(net, sk, skb);
419 }
420
421 static inline void
NF_HOOK_LIST(uint8_t pf,unsigned int hook,struct net * net,struct sock * sk,struct list_head * head,struct net_device * in,struct net_device * out,int (* okfn)(struct net *,struct sock *,struct sk_buff *))422 NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
423 struct list_head *head, struct net_device *in, struct net_device *out,
424 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
425 {
426 /* nothing to do */
427 }
428
nf_hook(u_int8_t pf,unsigned int hook,struct net * net,struct sock * sk,struct sk_buff * skb,struct net_device * indev,struct net_device * outdev,int (* okfn)(struct net *,struct sock *,struct sk_buff *))429 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
430 struct sock *sk, struct sk_buff *skb,
431 struct net_device *indev, struct net_device *outdev,
432 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
433 {
434 return 1;
435 }
436 struct flowi;
437 static inline void
nf_nat_decode_session(struct sk_buff * skb,struct flowi * fl,u_int8_t family)438 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
439 {
440 }
441 #endif /*CONFIG_NETFILTER*/
442
443 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
444 #include <linux/netfilter/nf_conntrack_zones_common.h>
445
446 void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
447 void nf_ct_set_closing(struct nf_conntrack *nfct);
448 struct nf_conntrack_tuple;
449 bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
450 const struct sk_buff *skb);
451 #else
nf_ct_attach(struct sk_buff * new,struct sk_buff * skb)452 static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
nf_ct_set_closing(struct nf_conntrack * nfct)453 static inline void nf_ct_set_closing(struct nf_conntrack *nfct) {}
454 struct nf_conntrack_tuple;
nf_ct_get_tuple_skb(struct nf_conntrack_tuple * dst_tuple,const struct sk_buff * skb)455 static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
456 const struct sk_buff *skb)
457 {
458 return false;
459 }
460 #endif
461
462 struct nf_conn;
463 enum ip_conntrack_info;
464
465 struct nf_ct_hook {
466 int (*update)(struct net *net, struct sk_buff *skb);
467 void (*destroy)(struct nf_conntrack *);
468 bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
469 const struct sk_buff *);
470 void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
471 void (*set_closing)(struct nf_conntrack *nfct);
472 int (*confirm)(struct sk_buff *skb);
473 };
474 extern const struct nf_ct_hook __rcu *nf_ct_hook;
475
476 struct nlattr;
477
478 struct nfnl_ct_hook {
479 size_t (*build_size)(const struct nf_conn *ct);
480 int (*build)(struct sk_buff *skb, struct nf_conn *ct,
481 enum ip_conntrack_info ctinfo,
482 u_int16_t ct_attr, u_int16_t ct_info_attr);
483 int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
484 int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
485 u32 portid, u32 report);
486 void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
487 enum ip_conntrack_info ctinfo, s32 off);
488 };
489 extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook;
490
491 struct nf_defrag_hook {
492 struct module *owner;
493 int (*enable)(struct net *net);
494 void (*disable)(struct net *net);
495 };
496
497 extern const struct nf_defrag_hook __rcu *nf_defrag_v4_hook;
498 extern const struct nf_defrag_hook __rcu *nf_defrag_v6_hook;
499
500 /*
501 * nf_skb_duplicated - TEE target has sent a packet
502 *
503 * When a xtables target sends a packet, the OUTPUT and POSTROUTING
504 * hooks are traversed again, i.e. nft and xtables are invoked recursively.
505 *
506 * This is used by xtables TEE target to prevent the duplicated skb from
507 * being duplicated again.
508 */
509 DECLARE_PER_CPU(bool, nf_skb_duplicated);
510
511 /*
512 * Contains bitmask of ctnetlink event subscribers, if any.
513 * Can't be pernet due to NETLINK_LISTEN_ALL_NSID setsockopt flag.
514 */
515 extern u8 nf_ctnetlink_has_listener;
516 #endif /*__LINUX_NETFILTER_H*/
517