1 // SPDX-License-Identifier: GPL-2.0-only
2 /* xfrm_user.c: User interface to configure xfrm engine.
3 *
4 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
5 *
6 * Changes:
7 * Mitsuru KANDA @USAGI
8 * Kazunori MIYAZAWA @USAGI
9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10 * IPv6 support
11 *
12 */
13
14 #include <linux/compat.h>
15 #include <linux/crypto.h>
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/slab.h>
20 #include <linux/socket.h>
21 #include <linux/string.h>
22 #include <linux/net.h>
23 #include <linux/skbuff.h>
24 #include <linux/pfkeyv2.h>
25 #include <linux/ipsec.h>
26 #include <linux/init.h>
27 #include <linux/security.h>
28 #include <net/sock.h>
29 #include <net/xfrm.h>
30 #include <net/netlink.h>
31 #include <net/ah.h>
32 #include <linux/uaccess.h>
33 #if IS_ENABLED(CONFIG_IPV6)
34 #include <linux/in6.h>
35 #endif
36 #include <linux/unaligned.h>
37
verify_one_alg(struct nlattr ** attrs,enum xfrm_attr_type_t type,struct netlink_ext_ack * extack)38 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type,
39 struct netlink_ext_ack *extack)
40 {
41 struct nlattr *rt = attrs[type];
42 struct xfrm_algo *algp;
43
44 if (!rt)
45 return 0;
46
47 algp = nla_data(rt);
48 if (nla_len(rt) < (int)xfrm_alg_len(algp)) {
49 NL_SET_ERR_MSG(extack, "Invalid AUTH/CRYPT/COMP attribute length");
50 return -EINVAL;
51 }
52
53 switch (type) {
54 case XFRMA_ALG_AUTH:
55 case XFRMA_ALG_CRYPT:
56 case XFRMA_ALG_COMP:
57 break;
58
59 default:
60 NL_SET_ERR_MSG(extack, "Invalid algorithm attribute type");
61 return -EINVAL;
62 }
63
64 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
65 return 0;
66 }
67
verify_auth_trunc(struct nlattr ** attrs,struct netlink_ext_ack * extack)68 static int verify_auth_trunc(struct nlattr **attrs,
69 struct netlink_ext_ack *extack)
70 {
71 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
72 struct xfrm_algo_auth *algp;
73
74 if (!rt)
75 return 0;
76
77 algp = nla_data(rt);
78 if (nla_len(rt) < (int)xfrm_alg_auth_len(algp)) {
79 NL_SET_ERR_MSG(extack, "Invalid AUTH_TRUNC attribute length");
80 return -EINVAL;
81 }
82
83 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
84 return 0;
85 }
86
verify_aead(struct nlattr ** attrs,struct netlink_ext_ack * extack)87 static int verify_aead(struct nlattr **attrs, struct netlink_ext_ack *extack)
88 {
89 struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
90 struct xfrm_algo_aead *algp;
91
92 if (!rt)
93 return 0;
94
95 algp = nla_data(rt);
96 if (nla_len(rt) < (int)aead_len(algp)) {
97 NL_SET_ERR_MSG(extack, "Invalid AEAD attribute length");
98 return -EINVAL;
99 }
100
101 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
102 return 0;
103 }
104
verify_one_addr(struct nlattr ** attrs,enum xfrm_attr_type_t type,xfrm_address_t ** addrp)105 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
106 xfrm_address_t **addrp)
107 {
108 struct nlattr *rt = attrs[type];
109
110 if (rt && addrp)
111 *addrp = nla_data(rt);
112 }
113
verify_sec_ctx_len(struct nlattr ** attrs,struct netlink_ext_ack * extack)114 static inline int verify_sec_ctx_len(struct nlattr **attrs, struct netlink_ext_ack *extack)
115 {
116 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
117 struct xfrm_user_sec_ctx *uctx;
118
119 if (!rt)
120 return 0;
121
122 uctx = nla_data(rt);
123 if (uctx->len > nla_len(rt) ||
124 uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) {
125 NL_SET_ERR_MSG(extack, "Invalid security context length");
126 return -EINVAL;
127 }
128
129 return 0;
130 }
131
verify_replay(struct xfrm_usersa_info * p,struct nlattr ** attrs,u8 sa_dir,struct netlink_ext_ack * extack)132 static inline int verify_replay(struct xfrm_usersa_info *p,
133 struct nlattr **attrs, u8 sa_dir,
134 struct netlink_ext_ack *extack)
135 {
136 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
137 struct xfrm_replay_state_esn *rs;
138
139 if (!rt) {
140 if (p->flags & XFRM_STATE_ESN) {
141 NL_SET_ERR_MSG(extack, "Missing required attribute for ESN");
142 return -EINVAL;
143 }
144 return 0;
145 }
146
147 rs = nla_data(rt);
148
149 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) {
150 NL_SET_ERR_MSG(extack, "ESN bitmap length must be <= 128");
151 return -EINVAL;
152 }
153
154 if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
155 nla_len(rt) != sizeof(*rs)) {
156 NL_SET_ERR_MSG(extack, "ESN attribute is too short to fit the full bitmap length");
157 return -EINVAL;
158 }
159
160 /* As only ESP and AH support ESN feature. */
161 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) {
162 NL_SET_ERR_MSG(extack, "ESN only supported for ESP and AH");
163 return -EINVAL;
164 }
165
166 if (p->replay_window != 0) {
167 NL_SET_ERR_MSG(extack, "ESN not compatible with legacy replay_window");
168 return -EINVAL;
169 }
170
171 if (sa_dir == XFRM_SA_DIR_OUT) {
172 if (rs->replay_window) {
173 NL_SET_ERR_MSG(extack, "Replay window should be 0 for output SA");
174 return -EINVAL;
175 }
176 if (rs->seq || rs->seq_hi) {
177 NL_SET_ERR_MSG(extack,
178 "Replay seq and seq_hi should be 0 for output SA");
179 return -EINVAL;
180 }
181 if (rs->bmp_len) {
182 NL_SET_ERR_MSG(extack, "Replay bmp_len should 0 for output SA");
183 return -EINVAL;
184 }
185 }
186
187 if (sa_dir == XFRM_SA_DIR_IN) {
188 if (rs->oseq || rs->oseq_hi) {
189 NL_SET_ERR_MSG(extack,
190 "Replay oseq and oseq_hi should be 0 for input SA");
191 return -EINVAL;
192 }
193 }
194
195 return 0;
196 }
197
verify_newsa_info(struct xfrm_usersa_info * p,struct nlattr ** attrs,struct netlink_ext_ack * extack)198 static int verify_newsa_info(struct xfrm_usersa_info *p,
199 struct nlattr **attrs,
200 struct netlink_ext_ack *extack)
201 {
202 int err;
203 u8 sa_dir = attrs[XFRMA_SA_DIR] ? nla_get_u8(attrs[XFRMA_SA_DIR]) : 0;
204 u16 family = p->sel.family;
205
206 err = -EINVAL;
207 switch (p->family) {
208 case AF_INET:
209 break;
210
211 case AF_INET6:
212 #if IS_ENABLED(CONFIG_IPV6)
213 break;
214 #else
215 err = -EAFNOSUPPORT;
216 NL_SET_ERR_MSG(extack, "IPv6 support disabled");
217 goto out;
218 #endif
219
220 default:
221 NL_SET_ERR_MSG(extack, "Invalid address family");
222 goto out;
223 }
224
225 if (!family && !(p->flags & XFRM_STATE_AF_UNSPEC))
226 family = p->family;
227
228 switch (family) {
229 case AF_UNSPEC:
230 break;
231
232 case AF_INET:
233 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) {
234 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)");
235 goto out;
236 }
237
238 break;
239
240 case AF_INET6:
241 #if IS_ENABLED(CONFIG_IPV6)
242 if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) {
243 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)");
244 goto out;
245 }
246
247 break;
248 #else
249 NL_SET_ERR_MSG(extack, "IPv6 support disabled");
250 err = -EAFNOSUPPORT;
251 goto out;
252 #endif
253
254 default:
255 NL_SET_ERR_MSG(extack, "Invalid address family in selector");
256 goto out;
257 }
258
259 err = -EINVAL;
260 switch (p->id.proto) {
261 case IPPROTO_AH:
262 if (!attrs[XFRMA_ALG_AUTH] &&
263 !attrs[XFRMA_ALG_AUTH_TRUNC]) {
264 NL_SET_ERR_MSG(extack, "Missing required attribute for AH: AUTH_TRUNC or AUTH");
265 goto out;
266 }
267
268 if (attrs[XFRMA_ALG_AEAD] ||
269 attrs[XFRMA_ALG_CRYPT] ||
270 attrs[XFRMA_ALG_COMP] ||
271 attrs[XFRMA_TFCPAD]) {
272 NL_SET_ERR_MSG(extack, "Invalid attributes for AH: AEAD, CRYPT, COMP, TFCPAD");
273 goto out;
274 }
275 break;
276
277 case IPPROTO_ESP:
278 if (attrs[XFRMA_ALG_COMP]) {
279 NL_SET_ERR_MSG(extack, "Invalid attribute for ESP: COMP");
280 goto out;
281 }
282
283 if (!attrs[XFRMA_ALG_AUTH] &&
284 !attrs[XFRMA_ALG_AUTH_TRUNC] &&
285 !attrs[XFRMA_ALG_CRYPT] &&
286 !attrs[XFRMA_ALG_AEAD]) {
287 NL_SET_ERR_MSG(extack, "Missing required attribute for ESP: at least one of AUTH, AUTH_TRUNC, CRYPT, AEAD");
288 goto out;
289 }
290
291 if ((attrs[XFRMA_ALG_AUTH] ||
292 attrs[XFRMA_ALG_AUTH_TRUNC] ||
293 attrs[XFRMA_ALG_CRYPT]) &&
294 attrs[XFRMA_ALG_AEAD]) {
295 NL_SET_ERR_MSG(extack, "Invalid attribute combination for ESP: AEAD can't be used with AUTH, AUTH_TRUNC, CRYPT");
296 goto out;
297 }
298
299 if (attrs[XFRMA_TFCPAD] &&
300 p->mode != XFRM_MODE_TUNNEL) {
301 NL_SET_ERR_MSG(extack, "TFC padding can only be used in tunnel mode");
302 goto out;
303 }
304 break;
305
306 case IPPROTO_COMP:
307 if (!attrs[XFRMA_ALG_COMP]) {
308 NL_SET_ERR_MSG(extack, "Missing required attribute for COMP: COMP");
309 goto out;
310 }
311
312 if (attrs[XFRMA_ALG_AEAD] ||
313 attrs[XFRMA_ALG_AUTH] ||
314 attrs[XFRMA_ALG_AUTH_TRUNC] ||
315 attrs[XFRMA_ALG_CRYPT] ||
316 attrs[XFRMA_TFCPAD]) {
317 NL_SET_ERR_MSG(extack, "Invalid attributes for COMP: AEAD, AUTH, AUTH_TRUNC, CRYPT, TFCPAD");
318 goto out;
319 }
320
321 if (ntohl(p->id.spi) >= 0x10000) {
322 NL_SET_ERR_MSG(extack, "SPI is too large for COMP (must be < 0x10000)");
323 goto out;
324 }
325 break;
326
327 #if IS_ENABLED(CONFIG_IPV6)
328 case IPPROTO_DSTOPTS:
329 case IPPROTO_ROUTING:
330 if (attrs[XFRMA_ALG_COMP] ||
331 attrs[XFRMA_ALG_AUTH] ||
332 attrs[XFRMA_ALG_AUTH_TRUNC] ||
333 attrs[XFRMA_ALG_AEAD] ||
334 attrs[XFRMA_ALG_CRYPT] ||
335 attrs[XFRMA_ENCAP] ||
336 attrs[XFRMA_SEC_CTX] ||
337 attrs[XFRMA_TFCPAD]) {
338 NL_SET_ERR_MSG(extack, "Invalid attributes for DSTOPTS/ROUTING");
339 goto out;
340 }
341
342 if (!attrs[XFRMA_COADDR]) {
343 NL_SET_ERR_MSG(extack, "Missing required COADDR attribute for DSTOPTS/ROUTING");
344 goto out;
345 }
346 break;
347 #endif
348
349 default:
350 NL_SET_ERR_MSG(extack, "Unsupported protocol");
351 goto out;
352 }
353
354 if ((err = verify_aead(attrs, extack)))
355 goto out;
356 if ((err = verify_auth_trunc(attrs, extack)))
357 goto out;
358 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH, extack)))
359 goto out;
360 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT, extack)))
361 goto out;
362 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP, extack)))
363 goto out;
364 if ((err = verify_sec_ctx_len(attrs, extack)))
365 goto out;
366 if ((err = verify_replay(p, attrs, sa_dir, extack)))
367 goto out;
368
369 err = -EINVAL;
370 switch (p->mode) {
371 case XFRM_MODE_TRANSPORT:
372 case XFRM_MODE_TUNNEL:
373 case XFRM_MODE_ROUTEOPTIMIZATION:
374 case XFRM_MODE_BEET:
375 break;
376
377 default:
378 NL_SET_ERR_MSG(extack, "Unsupported mode");
379 goto out;
380 }
381
382 err = 0;
383
384 if (attrs[XFRMA_MTIMER_THRESH]) {
385 if (!attrs[XFRMA_ENCAP]) {
386 NL_SET_ERR_MSG(extack, "MTIMER_THRESH attribute can only be set on ENCAP states");
387 err = -EINVAL;
388 goto out;
389 }
390
391 if (sa_dir == XFRM_SA_DIR_OUT) {
392 NL_SET_ERR_MSG(extack,
393 "MTIMER_THRESH attribute should not be set on output SA");
394 err = -EINVAL;
395 goto out;
396 }
397 }
398
399 if (sa_dir == XFRM_SA_DIR_OUT) {
400 if (p->flags & XFRM_STATE_DECAP_DSCP) {
401 NL_SET_ERR_MSG(extack, "Flag DECAP_DSCP should not be set for output SA");
402 err = -EINVAL;
403 goto out;
404 }
405
406 if (p->flags & XFRM_STATE_ICMP) {
407 NL_SET_ERR_MSG(extack, "Flag ICMP should not be set for output SA");
408 err = -EINVAL;
409 goto out;
410 }
411
412 if (p->flags & XFRM_STATE_WILDRECV) {
413 NL_SET_ERR_MSG(extack, "Flag WILDRECV should not be set for output SA");
414 err = -EINVAL;
415 goto out;
416 }
417
418 if (p->replay_window) {
419 NL_SET_ERR_MSG(extack, "Replay window should be 0 for output SA");
420 err = -EINVAL;
421 goto out;
422 }
423
424 if (attrs[XFRMA_REPLAY_VAL]) {
425 struct xfrm_replay_state *replay;
426
427 replay = nla_data(attrs[XFRMA_REPLAY_VAL]);
428
429 if (replay->seq || replay->bitmap) {
430 NL_SET_ERR_MSG(extack,
431 "Replay seq and bitmap should be 0 for output SA");
432 err = -EINVAL;
433 goto out;
434 }
435 }
436 }
437
438 if (sa_dir == XFRM_SA_DIR_IN) {
439 if (p->flags & XFRM_STATE_NOPMTUDISC) {
440 NL_SET_ERR_MSG(extack, "Flag NOPMTUDISC should not be set for input SA");
441 err = -EINVAL;
442 goto out;
443 }
444
445 if (attrs[XFRMA_SA_EXTRA_FLAGS]) {
446 u32 xflags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
447
448 if (xflags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP) {
449 NL_SET_ERR_MSG(extack, "Flag DONT_ENCAP_DSCP should not be set for input SA");
450 err = -EINVAL;
451 goto out;
452 }
453
454 if (xflags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP) {
455 NL_SET_ERR_MSG(extack, "Flag OSEQ_MAY_WRAP should not be set for input SA");
456 err = -EINVAL;
457 goto out;
458 }
459
460 }
461 }
462
463 out:
464 return err;
465 }
466
attach_one_algo(struct xfrm_algo ** algpp,u8 * props,struct xfrm_algo_desc * (* get_byname)(const char *,int),struct nlattr * rta,struct netlink_ext_ack * extack)467 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
468 struct xfrm_algo_desc *(*get_byname)(const char *, int),
469 struct nlattr *rta, struct netlink_ext_ack *extack)
470 {
471 struct xfrm_algo *p, *ualg;
472 struct xfrm_algo_desc *algo;
473
474 if (!rta)
475 return 0;
476
477 ualg = nla_data(rta);
478
479 algo = get_byname(ualg->alg_name, 1);
480 if (!algo) {
481 NL_SET_ERR_MSG(extack, "Requested COMP algorithm not found");
482 return -ENOSYS;
483 }
484 *props = algo->desc.sadb_alg_id;
485
486 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
487 if (!p)
488 return -ENOMEM;
489
490 strcpy(p->alg_name, algo->name);
491 *algpp = p;
492 return 0;
493 }
494
attach_crypt(struct xfrm_state * x,struct nlattr * rta,struct netlink_ext_ack * extack)495 static int attach_crypt(struct xfrm_state *x, struct nlattr *rta,
496 struct netlink_ext_ack *extack)
497 {
498 struct xfrm_algo *p, *ualg;
499 struct xfrm_algo_desc *algo;
500
501 if (!rta)
502 return 0;
503
504 ualg = nla_data(rta);
505
506 algo = xfrm_ealg_get_byname(ualg->alg_name, 1);
507 if (!algo) {
508 NL_SET_ERR_MSG(extack, "Requested CRYPT algorithm not found");
509 return -ENOSYS;
510 }
511 x->props.ealgo = algo->desc.sadb_alg_id;
512
513 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
514 if (!p)
515 return -ENOMEM;
516
517 strcpy(p->alg_name, algo->name);
518 x->ealg = p;
519 x->geniv = algo->uinfo.encr.geniv;
520 return 0;
521 }
522
attach_auth(struct xfrm_algo_auth ** algpp,u8 * props,struct nlattr * rta,struct netlink_ext_ack * extack)523 static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
524 struct nlattr *rta, struct netlink_ext_ack *extack)
525 {
526 struct xfrm_algo *ualg;
527 struct xfrm_algo_auth *p;
528 struct xfrm_algo_desc *algo;
529
530 if (!rta)
531 return 0;
532
533 ualg = nla_data(rta);
534
535 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
536 if (!algo) {
537 NL_SET_ERR_MSG(extack, "Requested AUTH algorithm not found");
538 return -ENOSYS;
539 }
540 *props = algo->desc.sadb_alg_id;
541
542 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
543 if (!p)
544 return -ENOMEM;
545
546 strcpy(p->alg_name, algo->name);
547 p->alg_key_len = ualg->alg_key_len;
548 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
549 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
550
551 *algpp = p;
552 return 0;
553 }
554
attach_auth_trunc(struct xfrm_algo_auth ** algpp,u8 * props,struct nlattr * rta,struct netlink_ext_ack * extack)555 static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
556 struct nlattr *rta, struct netlink_ext_ack *extack)
557 {
558 struct xfrm_algo_auth *p, *ualg;
559 struct xfrm_algo_desc *algo;
560
561 if (!rta)
562 return 0;
563
564 ualg = nla_data(rta);
565
566 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
567 if (!algo) {
568 NL_SET_ERR_MSG(extack, "Requested AUTH_TRUNC algorithm not found");
569 return -ENOSYS;
570 }
571 if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) {
572 NL_SET_ERR_MSG(extack, "Invalid length requested for truncated ICV");
573 return -EINVAL;
574 }
575 *props = algo->desc.sadb_alg_id;
576
577 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
578 if (!p)
579 return -ENOMEM;
580
581 strcpy(p->alg_name, algo->name);
582 if (!p->alg_trunc_len)
583 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
584
585 *algpp = p;
586 return 0;
587 }
588
attach_aead(struct xfrm_state * x,struct nlattr * rta,struct netlink_ext_ack * extack)589 static int attach_aead(struct xfrm_state *x, struct nlattr *rta,
590 struct netlink_ext_ack *extack)
591 {
592 struct xfrm_algo_aead *p, *ualg;
593 struct xfrm_algo_desc *algo;
594
595 if (!rta)
596 return 0;
597
598 ualg = nla_data(rta);
599
600 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
601 if (!algo) {
602 NL_SET_ERR_MSG(extack, "Requested AEAD algorithm not found");
603 return -ENOSYS;
604 }
605 x->props.ealgo = algo->desc.sadb_alg_id;
606
607 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
608 if (!p)
609 return -ENOMEM;
610
611 strcpy(p->alg_name, algo->name);
612 x->aead = p;
613 x->geniv = algo->uinfo.aead.geniv;
614 return 0;
615 }
616
xfrm_replay_verify_len(struct xfrm_replay_state_esn * replay_esn,struct nlattr * rp,struct netlink_ext_ack * extack)617 static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn,
618 struct nlattr *rp,
619 struct netlink_ext_ack *extack)
620 {
621 struct xfrm_replay_state_esn *up;
622 unsigned int ulen;
623
624 if (!replay_esn || !rp)
625 return 0;
626
627 up = nla_data(rp);
628 ulen = xfrm_replay_state_esn_len(up);
629
630 /* Check the overall length and the internal bitmap length to avoid
631 * potential overflow. */
632 if (nla_len(rp) < (int)ulen) {
633 NL_SET_ERR_MSG(extack, "ESN attribute is too short");
634 return -EINVAL;
635 }
636
637 if (xfrm_replay_state_esn_len(replay_esn) != ulen) {
638 NL_SET_ERR_MSG(extack, "New ESN size doesn't match the existing SA's ESN size");
639 return -EINVAL;
640 }
641
642 if (replay_esn->bmp_len != up->bmp_len) {
643 NL_SET_ERR_MSG(extack, "New ESN bitmap size doesn't match the existing SA's ESN bitmap");
644 return -EINVAL;
645 }
646
647 if (up->replay_window > up->bmp_len * sizeof(__u32) * 8) {
648 NL_SET_ERR_MSG(extack, "ESN replay window is longer than the bitmap");
649 return -EINVAL;
650 }
651
652 return 0;
653 }
654
xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn ** replay_esn,struct xfrm_replay_state_esn ** preplay_esn,struct nlattr * rta)655 static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
656 struct xfrm_replay_state_esn **preplay_esn,
657 struct nlattr *rta)
658 {
659 struct xfrm_replay_state_esn *p, *pp, *up;
660 unsigned int klen, ulen;
661
662 if (!rta)
663 return 0;
664
665 up = nla_data(rta);
666 klen = xfrm_replay_state_esn_len(up);
667 ulen = nla_len(rta) >= (int)klen ? klen : sizeof(*up);
668
669 p = kzalloc(klen, GFP_KERNEL);
670 if (!p)
671 return -ENOMEM;
672
673 pp = kzalloc(klen, GFP_KERNEL);
674 if (!pp) {
675 kfree(p);
676 return -ENOMEM;
677 }
678
679 memcpy(p, up, ulen);
680 memcpy(pp, up, ulen);
681
682 *replay_esn = p;
683 *preplay_esn = pp;
684
685 return 0;
686 }
687
xfrm_user_sec_ctx_size(struct xfrm_sec_ctx * xfrm_ctx)688 static inline unsigned int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
689 {
690 unsigned int len = 0;
691
692 if (xfrm_ctx) {
693 len += sizeof(struct xfrm_user_sec_ctx);
694 len += xfrm_ctx->ctx_len;
695 }
696 return len;
697 }
698
copy_from_user_state(struct xfrm_state * x,struct xfrm_usersa_info * p)699 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
700 {
701 memcpy(&x->id, &p->id, sizeof(x->id));
702 memcpy(&x->sel, &p->sel, sizeof(x->sel));
703 memcpy(&x->lft, &p->lft, sizeof(x->lft));
704 x->props.mode = p->mode;
705 x->props.replay_window = min_t(unsigned int, p->replay_window,
706 sizeof(x->replay.bitmap) * 8);
707 x->props.reqid = p->reqid;
708 x->props.family = p->family;
709 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
710 x->props.flags = p->flags;
711
712 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
713 x->sel.family = p->family;
714 }
715
716 /*
717 * someday when pfkey also has support, we could have the code
718 * somehow made shareable and move it to xfrm_state.c - JHS
719 *
720 */
xfrm_update_ae_params(struct xfrm_state * x,struct nlattr ** attrs,int update_esn)721 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
722 int update_esn)
723 {
724 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
725 struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
726 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
727 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
728 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
729 struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH];
730
731 if (re && x->replay_esn && x->preplay_esn) {
732 struct xfrm_replay_state_esn *replay_esn;
733 replay_esn = nla_data(re);
734 memcpy(x->replay_esn, replay_esn,
735 xfrm_replay_state_esn_len(replay_esn));
736 memcpy(x->preplay_esn, replay_esn,
737 xfrm_replay_state_esn_len(replay_esn));
738 }
739
740 if (rp) {
741 struct xfrm_replay_state *replay;
742 replay = nla_data(rp);
743 memcpy(&x->replay, replay, sizeof(*replay));
744 memcpy(&x->preplay, replay, sizeof(*replay));
745 }
746
747 if (lt) {
748 struct xfrm_lifetime_cur *ltime;
749 ltime = nla_data(lt);
750 x->curlft.bytes = ltime->bytes;
751 x->curlft.packets = ltime->packets;
752 x->curlft.add_time = ltime->add_time;
753 x->curlft.use_time = ltime->use_time;
754 }
755
756 if (et)
757 x->replay_maxage = nla_get_u32(et);
758
759 if (rt)
760 x->replay_maxdiff = nla_get_u32(rt);
761
762 if (mt)
763 x->mapping_maxage = nla_get_u32(mt);
764 }
765
xfrm_smark_init(struct nlattr ** attrs,struct xfrm_mark * m)766 static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m)
767 {
768 if (attrs[XFRMA_SET_MARK]) {
769 m->v = nla_get_u32(attrs[XFRMA_SET_MARK]);
770 if (attrs[XFRMA_SET_MARK_MASK])
771 m->m = nla_get_u32(attrs[XFRMA_SET_MARK_MASK]);
772 else
773 m->m = 0xffffffff;
774 } else {
775 m->v = m->m = 0;
776 }
777 }
778
xfrm_state_construct(struct net * net,struct xfrm_usersa_info * p,struct nlattr ** attrs,int * errp,struct netlink_ext_ack * extack)779 static struct xfrm_state *xfrm_state_construct(struct net *net,
780 struct xfrm_usersa_info *p,
781 struct nlattr **attrs,
782 int *errp,
783 struct netlink_ext_ack *extack)
784 {
785 struct xfrm_state *x = xfrm_state_alloc(net);
786 int err = -ENOMEM;
787
788 if (!x)
789 goto error_no_put;
790
791 copy_from_user_state(x, p);
792
793 if (attrs[XFRMA_ENCAP]) {
794 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
795 sizeof(*x->encap), GFP_KERNEL);
796 if (x->encap == NULL)
797 goto error;
798 }
799
800 if (attrs[XFRMA_COADDR]) {
801 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
802 sizeof(*x->coaddr), GFP_KERNEL);
803 if (x->coaddr == NULL)
804 goto error;
805 }
806
807 if (attrs[XFRMA_SA_EXTRA_FLAGS])
808 x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
809
810 if ((err = attach_aead(x, attrs[XFRMA_ALG_AEAD], extack)))
811 goto error;
812 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
813 attrs[XFRMA_ALG_AUTH_TRUNC], extack)))
814 goto error;
815 if (!x->props.aalgo) {
816 if ((err = attach_auth(&x->aalg, &x->props.aalgo,
817 attrs[XFRMA_ALG_AUTH], extack)))
818 goto error;
819 }
820 if ((err = attach_crypt(x, attrs[XFRMA_ALG_CRYPT], extack)))
821 goto error;
822 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
823 xfrm_calg_get_byname,
824 attrs[XFRMA_ALG_COMP], extack)))
825 goto error;
826
827 if (attrs[XFRMA_TFCPAD])
828 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
829
830 xfrm_mark_get(attrs, &x->mark);
831
832 xfrm_smark_init(attrs, &x->props.smark);
833
834 if (attrs[XFRMA_IF_ID])
835 x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
836
837 if (attrs[XFRMA_SA_DIR])
838 x->dir = nla_get_u8(attrs[XFRMA_SA_DIR]);
839
840 if (attrs[XFRMA_NAT_KEEPALIVE_INTERVAL])
841 x->nat_keepalive_interval =
842 nla_get_u32(attrs[XFRMA_NAT_KEEPALIVE_INTERVAL]);
843
844 err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV], extack);
845 if (err)
846 goto error;
847
848 if (attrs[XFRMA_SEC_CTX]) {
849 err = security_xfrm_state_alloc(x,
850 nla_data(attrs[XFRMA_SEC_CTX]));
851 if (err)
852 goto error;
853 }
854
855 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
856 attrs[XFRMA_REPLAY_ESN_VAL])))
857 goto error;
858
859 x->km.seq = p->seq;
860 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
861 /* sysctl_xfrm_aevent_etime is in 100ms units */
862 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
863
864 if ((err = xfrm_init_replay(x, extack)))
865 goto error;
866
867 /* override default values from above */
868 xfrm_update_ae_params(x, attrs, 0);
869
870 /* configure the hardware if offload is requested */
871 if (attrs[XFRMA_OFFLOAD_DEV]) {
872 err = xfrm_dev_state_add(net, x,
873 nla_data(attrs[XFRMA_OFFLOAD_DEV]),
874 extack);
875 if (err)
876 goto error;
877 }
878
879 return x;
880
881 error:
882 x->km.state = XFRM_STATE_DEAD;
883 xfrm_state_put(x);
884 error_no_put:
885 *errp = err;
886 return NULL;
887 }
888
xfrm_add_sa(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)889 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
890 struct nlattr **attrs, struct netlink_ext_ack *extack)
891 {
892 struct net *net = sock_net(skb->sk);
893 struct xfrm_usersa_info *p = nlmsg_data(nlh);
894 struct xfrm_state *x;
895 int err;
896 struct km_event c;
897
898 err = verify_newsa_info(p, attrs, extack);
899 if (err)
900 return err;
901
902 x = xfrm_state_construct(net, p, attrs, &err, extack);
903 if (!x)
904 return err;
905
906 xfrm_state_hold(x);
907 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
908 err = xfrm_state_add(x);
909 else
910 err = xfrm_state_update(x);
911
912 xfrm_audit_state_add(x, err ? 0 : 1, true);
913
914 if (err < 0) {
915 x->km.state = XFRM_STATE_DEAD;
916 xfrm_dev_state_delete(x);
917 __xfrm_state_put(x);
918 goto out;
919 }
920
921 if (x->km.state == XFRM_STATE_VOID)
922 x->km.state = XFRM_STATE_VALID;
923
924 c.seq = nlh->nlmsg_seq;
925 c.portid = nlh->nlmsg_pid;
926 c.event = nlh->nlmsg_type;
927
928 km_state_notify(x, &c);
929 out:
930 xfrm_state_put(x);
931 return err;
932 }
933
xfrm_user_state_lookup(struct net * net,struct xfrm_usersa_id * p,struct nlattr ** attrs,int * errp)934 static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
935 struct xfrm_usersa_id *p,
936 struct nlattr **attrs,
937 int *errp)
938 {
939 struct xfrm_state *x = NULL;
940 struct xfrm_mark m;
941 int err;
942 u32 mark = xfrm_mark_get(attrs, &m);
943
944 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
945 err = -ESRCH;
946 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
947 } else {
948 xfrm_address_t *saddr = NULL;
949
950 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
951 if (!saddr) {
952 err = -EINVAL;
953 goto out;
954 }
955
956 err = -ESRCH;
957 x = xfrm_state_lookup_byaddr(net, mark,
958 &p->daddr, saddr,
959 p->proto, p->family);
960 }
961
962 out:
963 if (!x && errp)
964 *errp = err;
965 return x;
966 }
967
xfrm_del_sa(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)968 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
969 struct nlattr **attrs, struct netlink_ext_ack *extack)
970 {
971 struct net *net = sock_net(skb->sk);
972 struct xfrm_state *x;
973 int err = -ESRCH;
974 struct km_event c;
975 struct xfrm_usersa_id *p = nlmsg_data(nlh);
976
977 x = xfrm_user_state_lookup(net, p, attrs, &err);
978 if (x == NULL)
979 return err;
980
981 if ((err = security_xfrm_state_delete(x)) != 0)
982 goto out;
983
984 if (xfrm_state_kern(x)) {
985 NL_SET_ERR_MSG(extack, "SA is in use by tunnels");
986 err = -EPERM;
987 goto out;
988 }
989
990 err = xfrm_state_delete(x);
991 if (err < 0)
992 goto out;
993
994 c.seq = nlh->nlmsg_seq;
995 c.portid = nlh->nlmsg_pid;
996 c.event = nlh->nlmsg_type;
997 km_state_notify(x, &c);
998
999 out:
1000 xfrm_audit_state_delete(x, err ? 0 : 1, true);
1001 xfrm_state_put(x);
1002 return err;
1003 }
1004
copy_to_user_state(struct xfrm_state * x,struct xfrm_usersa_info * p)1005 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
1006 {
1007 memset(p, 0, sizeof(*p));
1008 memcpy(&p->id, &x->id, sizeof(p->id));
1009 memcpy(&p->sel, &x->sel, sizeof(p->sel));
1010 memcpy(&p->lft, &x->lft, sizeof(p->lft));
1011 if (x->xso.dev)
1012 xfrm_dev_state_update_stats(x);
1013 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
1014 put_unaligned(x->stats.replay_window, &p->stats.replay_window);
1015 put_unaligned(x->stats.replay, &p->stats.replay);
1016 put_unaligned(x->stats.integrity_failed, &p->stats.integrity_failed);
1017 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
1018 p->mode = x->props.mode;
1019 p->replay_window = x->props.replay_window;
1020 p->reqid = x->props.reqid;
1021 p->family = x->props.family;
1022 p->flags = x->props.flags;
1023 p->seq = x->km.seq;
1024 }
1025
1026 struct xfrm_dump_info {
1027 struct sk_buff *in_skb;
1028 struct sk_buff *out_skb;
1029 u32 nlmsg_seq;
1030 u16 nlmsg_flags;
1031 };
1032
copy_sec_ctx(struct xfrm_sec_ctx * s,struct sk_buff * skb)1033 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
1034 {
1035 struct xfrm_user_sec_ctx *uctx;
1036 struct nlattr *attr;
1037 int ctx_size = sizeof(*uctx) + s->ctx_len;
1038
1039 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
1040 if (attr == NULL)
1041 return -EMSGSIZE;
1042
1043 uctx = nla_data(attr);
1044 uctx->exttype = XFRMA_SEC_CTX;
1045 uctx->len = ctx_size;
1046 uctx->ctx_doi = s->ctx_doi;
1047 uctx->ctx_alg = s->ctx_alg;
1048 uctx->ctx_len = s->ctx_len;
1049 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
1050
1051 return 0;
1052 }
1053
copy_user_offload(struct xfrm_dev_offload * xso,struct sk_buff * skb)1054 static int copy_user_offload(struct xfrm_dev_offload *xso, struct sk_buff *skb)
1055 {
1056 struct xfrm_user_offload *xuo;
1057 struct nlattr *attr;
1058
1059 attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo));
1060 if (attr == NULL)
1061 return -EMSGSIZE;
1062
1063 xuo = nla_data(attr);
1064 memset(xuo, 0, sizeof(*xuo));
1065 xuo->ifindex = xso->dev->ifindex;
1066 if (xso->dir == XFRM_DEV_OFFLOAD_IN)
1067 xuo->flags = XFRM_OFFLOAD_INBOUND;
1068 if (xso->type == XFRM_DEV_OFFLOAD_PACKET)
1069 xuo->flags |= XFRM_OFFLOAD_PACKET;
1070
1071 return 0;
1072 }
1073
xfrm_redact(void)1074 static bool xfrm_redact(void)
1075 {
1076 return IS_ENABLED(CONFIG_SECURITY) &&
1077 security_locked_down(LOCKDOWN_XFRM_SECRET);
1078 }
1079
copy_to_user_auth(struct xfrm_algo_auth * auth,struct sk_buff * skb)1080 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
1081 {
1082 struct xfrm_algo *algo;
1083 struct xfrm_algo_auth *ap;
1084 struct nlattr *nla;
1085 bool redact_secret = xfrm_redact();
1086
1087 nla = nla_reserve(skb, XFRMA_ALG_AUTH,
1088 sizeof(*algo) + (auth->alg_key_len + 7) / 8);
1089 if (!nla)
1090 return -EMSGSIZE;
1091 algo = nla_data(nla);
1092 strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
1093
1094 if (redact_secret && auth->alg_key_len)
1095 memset(algo->alg_key, 0, (auth->alg_key_len + 7) / 8);
1096 else
1097 memcpy(algo->alg_key, auth->alg_key,
1098 (auth->alg_key_len + 7) / 8);
1099 algo->alg_key_len = auth->alg_key_len;
1100
1101 nla = nla_reserve(skb, XFRMA_ALG_AUTH_TRUNC, xfrm_alg_auth_len(auth));
1102 if (!nla)
1103 return -EMSGSIZE;
1104 ap = nla_data(nla);
1105 strscpy_pad(ap->alg_name, auth->alg_name, sizeof(ap->alg_name));
1106 ap->alg_key_len = auth->alg_key_len;
1107 ap->alg_trunc_len = auth->alg_trunc_len;
1108 if (redact_secret && auth->alg_key_len)
1109 memset(ap->alg_key, 0, (auth->alg_key_len + 7) / 8);
1110 else
1111 memcpy(ap->alg_key, auth->alg_key,
1112 (auth->alg_key_len + 7) / 8);
1113 return 0;
1114 }
1115
copy_to_user_aead(struct xfrm_algo_aead * aead,struct sk_buff * skb)1116 static int copy_to_user_aead(struct xfrm_algo_aead *aead, struct sk_buff *skb)
1117 {
1118 struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_AEAD, aead_len(aead));
1119 struct xfrm_algo_aead *ap;
1120 bool redact_secret = xfrm_redact();
1121
1122 if (!nla)
1123 return -EMSGSIZE;
1124
1125 ap = nla_data(nla);
1126 strscpy_pad(ap->alg_name, aead->alg_name, sizeof(ap->alg_name));
1127 ap->alg_key_len = aead->alg_key_len;
1128 ap->alg_icv_len = aead->alg_icv_len;
1129
1130 if (redact_secret && aead->alg_key_len)
1131 memset(ap->alg_key, 0, (aead->alg_key_len + 7) / 8);
1132 else
1133 memcpy(ap->alg_key, aead->alg_key,
1134 (aead->alg_key_len + 7) / 8);
1135 return 0;
1136 }
1137
copy_to_user_ealg(struct xfrm_algo * ealg,struct sk_buff * skb)1138 static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb)
1139 {
1140 struct xfrm_algo *ap;
1141 bool redact_secret = xfrm_redact();
1142 struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_CRYPT,
1143 xfrm_alg_len(ealg));
1144 if (!nla)
1145 return -EMSGSIZE;
1146
1147 ap = nla_data(nla);
1148 strscpy_pad(ap->alg_name, ealg->alg_name, sizeof(ap->alg_name));
1149 ap->alg_key_len = ealg->alg_key_len;
1150
1151 if (redact_secret && ealg->alg_key_len)
1152 memset(ap->alg_key, 0, (ealg->alg_key_len + 7) / 8);
1153 else
1154 memcpy(ap->alg_key, ealg->alg_key,
1155 (ealg->alg_key_len + 7) / 8);
1156
1157 return 0;
1158 }
1159
copy_to_user_calg(struct xfrm_algo * calg,struct sk_buff * skb)1160 static int copy_to_user_calg(struct xfrm_algo *calg, struct sk_buff *skb)
1161 {
1162 struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_COMP, sizeof(*calg));
1163 struct xfrm_algo *ap;
1164
1165 if (!nla)
1166 return -EMSGSIZE;
1167
1168 ap = nla_data(nla);
1169 strscpy_pad(ap->alg_name, calg->alg_name, sizeof(ap->alg_name));
1170 ap->alg_key_len = 0;
1171
1172 return 0;
1173 }
1174
copy_to_user_encap(struct xfrm_encap_tmpl * ep,struct sk_buff * skb)1175 static int copy_to_user_encap(struct xfrm_encap_tmpl *ep, struct sk_buff *skb)
1176 {
1177 struct nlattr *nla = nla_reserve(skb, XFRMA_ENCAP, sizeof(*ep));
1178 struct xfrm_encap_tmpl *uep;
1179
1180 if (!nla)
1181 return -EMSGSIZE;
1182
1183 uep = nla_data(nla);
1184 memset(uep, 0, sizeof(*uep));
1185
1186 uep->encap_type = ep->encap_type;
1187 uep->encap_sport = ep->encap_sport;
1188 uep->encap_dport = ep->encap_dport;
1189 uep->encap_oa = ep->encap_oa;
1190
1191 return 0;
1192 }
1193
xfrm_smark_put(struct sk_buff * skb,struct xfrm_mark * m)1194 static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m)
1195 {
1196 int ret = 0;
1197
1198 if (m->v | m->m) {
1199 ret = nla_put_u32(skb, XFRMA_SET_MARK, m->v);
1200 if (!ret)
1201 ret = nla_put_u32(skb, XFRMA_SET_MARK_MASK, m->m);
1202 }
1203 return ret;
1204 }
1205
1206 /* Don't change this without updating xfrm_sa_len! */
copy_to_user_state_extra(struct xfrm_state * x,struct xfrm_usersa_info * p,struct sk_buff * skb)1207 static int copy_to_user_state_extra(struct xfrm_state *x,
1208 struct xfrm_usersa_info *p,
1209 struct sk_buff *skb)
1210 {
1211 int ret = 0;
1212
1213 copy_to_user_state(x, p);
1214
1215 if (x->props.extra_flags) {
1216 ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS,
1217 x->props.extra_flags);
1218 if (ret)
1219 goto out;
1220 }
1221
1222 if (x->coaddr) {
1223 ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
1224 if (ret)
1225 goto out;
1226 }
1227 if (x->lastused) {
1228 ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused,
1229 XFRMA_PAD);
1230 if (ret)
1231 goto out;
1232 }
1233 if (x->aead) {
1234 ret = copy_to_user_aead(x->aead, skb);
1235 if (ret)
1236 goto out;
1237 }
1238 if (x->aalg) {
1239 ret = copy_to_user_auth(x->aalg, skb);
1240 if (ret)
1241 goto out;
1242 }
1243 if (x->ealg) {
1244 ret = copy_to_user_ealg(x->ealg, skb);
1245 if (ret)
1246 goto out;
1247 }
1248 if (x->calg) {
1249 ret = copy_to_user_calg(x->calg, skb);
1250 if (ret)
1251 goto out;
1252 }
1253 if (x->encap) {
1254 ret = copy_to_user_encap(x->encap, skb);
1255 if (ret)
1256 goto out;
1257 }
1258 if (x->tfcpad) {
1259 ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
1260 if (ret)
1261 goto out;
1262 }
1263 ret = xfrm_mark_put(skb, &x->mark);
1264 if (ret)
1265 goto out;
1266
1267 ret = xfrm_smark_put(skb, &x->props.smark);
1268 if (ret)
1269 goto out;
1270
1271 if (x->replay_esn)
1272 ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
1273 xfrm_replay_state_esn_len(x->replay_esn),
1274 x->replay_esn);
1275 else
1276 ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
1277 &x->replay);
1278 if (ret)
1279 goto out;
1280 if(x->xso.dev)
1281 ret = copy_user_offload(&x->xso, skb);
1282 if (ret)
1283 goto out;
1284 if (x->if_id) {
1285 ret = nla_put_u32(skb, XFRMA_IF_ID, x->if_id);
1286 if (ret)
1287 goto out;
1288 }
1289 if (x->security) {
1290 ret = copy_sec_ctx(x->security, skb);
1291 if (ret)
1292 goto out;
1293 }
1294 if (x->mapping_maxage) {
1295 ret = nla_put_u32(skb, XFRMA_MTIMER_THRESH, x->mapping_maxage);
1296 if (ret)
1297 goto out;
1298 }
1299 if (x->dir)
1300 ret = nla_put_u8(skb, XFRMA_SA_DIR, x->dir);
1301
1302 if (x->nat_keepalive_interval) {
1303 ret = nla_put_u32(skb, XFRMA_NAT_KEEPALIVE_INTERVAL,
1304 x->nat_keepalive_interval);
1305 if (ret)
1306 goto out;
1307 }
1308 out:
1309 return ret;
1310 }
1311
dump_one_state(struct xfrm_state * x,int count,void * ptr)1312 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
1313 {
1314 struct xfrm_dump_info *sp = ptr;
1315 struct sk_buff *in_skb = sp->in_skb;
1316 struct sk_buff *skb = sp->out_skb;
1317 struct xfrm_translator *xtr;
1318 struct xfrm_usersa_info *p;
1319 struct nlmsghdr *nlh;
1320 int err;
1321
1322 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
1323 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
1324 if (nlh == NULL)
1325 return -EMSGSIZE;
1326
1327 p = nlmsg_data(nlh);
1328
1329 err = copy_to_user_state_extra(x, p, skb);
1330 if (err) {
1331 nlmsg_cancel(skb, nlh);
1332 return err;
1333 }
1334 nlmsg_end(skb, nlh);
1335
1336 xtr = xfrm_get_translator();
1337 if (xtr) {
1338 err = xtr->alloc_compat(skb, nlh);
1339
1340 xfrm_put_translator(xtr);
1341 if (err) {
1342 nlmsg_cancel(skb, nlh);
1343 return err;
1344 }
1345 }
1346
1347 return 0;
1348 }
1349
xfrm_dump_sa_done(struct netlink_callback * cb)1350 static int xfrm_dump_sa_done(struct netlink_callback *cb)
1351 {
1352 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
1353 struct sock *sk = cb->skb->sk;
1354 struct net *net = sock_net(sk);
1355
1356 if (cb->args[0])
1357 xfrm_state_walk_done(walk, net);
1358 return 0;
1359 }
1360
xfrm_dump_sa(struct sk_buff * skb,struct netlink_callback * cb)1361 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
1362 {
1363 struct net *net = sock_net(skb->sk);
1364 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
1365 struct xfrm_dump_info info;
1366
1367 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
1368 sizeof(cb->args) - sizeof(cb->args[0]));
1369
1370 info.in_skb = cb->skb;
1371 info.out_skb = skb;
1372 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1373 info.nlmsg_flags = NLM_F_MULTI;
1374
1375 if (!cb->args[0]) {
1376 struct nlattr *attrs[XFRMA_MAX+1];
1377 struct xfrm_address_filter *filter = NULL;
1378 u8 proto = 0;
1379 int err;
1380
1381 err = nlmsg_parse_deprecated(cb->nlh, 0, attrs, XFRMA_MAX,
1382 xfrma_policy, cb->extack);
1383 if (err < 0)
1384 return err;
1385
1386 if (attrs[XFRMA_ADDRESS_FILTER]) {
1387 filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]),
1388 sizeof(*filter), GFP_KERNEL);
1389 if (filter == NULL)
1390 return -ENOMEM;
1391
1392 /* see addr_match(), (prefix length >> 5) << 2
1393 * will be used to compare xfrm_address_t
1394 */
1395 if (filter->splen > (sizeof(xfrm_address_t) << 3) ||
1396 filter->dplen > (sizeof(xfrm_address_t) << 3)) {
1397 kfree(filter);
1398 return -EINVAL;
1399 }
1400 }
1401
1402 if (attrs[XFRMA_PROTO])
1403 proto = nla_get_u8(attrs[XFRMA_PROTO]);
1404
1405 xfrm_state_walk_init(walk, proto, filter);
1406 cb->args[0] = 1;
1407 }
1408
1409 (void) xfrm_state_walk(net, walk, dump_one_state, &info);
1410
1411 return skb->len;
1412 }
1413
xfrm_state_netlink(struct sk_buff * in_skb,struct xfrm_state * x,u32 seq)1414 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
1415 struct xfrm_state *x, u32 seq)
1416 {
1417 struct xfrm_dump_info info;
1418 struct sk_buff *skb;
1419 int err;
1420
1421 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1422 if (!skb)
1423 return ERR_PTR(-ENOMEM);
1424
1425 info.in_skb = in_skb;
1426 info.out_skb = skb;
1427 info.nlmsg_seq = seq;
1428 info.nlmsg_flags = 0;
1429
1430 err = dump_one_state(x, 0, &info);
1431 if (err) {
1432 kfree_skb(skb);
1433 return ERR_PTR(err);
1434 }
1435
1436 return skb;
1437 }
1438
1439 /* A wrapper for nlmsg_multicast() checking that nlsk is still available.
1440 * Must be called with RCU read lock.
1441 */
xfrm_nlmsg_multicast(struct net * net,struct sk_buff * skb,u32 pid,unsigned int group)1442 static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
1443 u32 pid, unsigned int group)
1444 {
1445 struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
1446 struct xfrm_translator *xtr;
1447
1448 if (!nlsk) {
1449 kfree_skb(skb);
1450 return -EPIPE;
1451 }
1452
1453 xtr = xfrm_get_translator();
1454 if (xtr) {
1455 int err = xtr->alloc_compat(skb, nlmsg_hdr(skb));
1456
1457 xfrm_put_translator(xtr);
1458 if (err) {
1459 kfree_skb(skb);
1460 return err;
1461 }
1462 }
1463
1464 return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
1465 }
1466
xfrm_spdinfo_msgsize(void)1467 static inline unsigned int xfrm_spdinfo_msgsize(void)
1468 {
1469 return NLMSG_ALIGN(4)
1470 + nla_total_size(sizeof(struct xfrmu_spdinfo))
1471 + nla_total_size(sizeof(struct xfrmu_spdhinfo))
1472 + nla_total_size(sizeof(struct xfrmu_spdhthresh))
1473 + nla_total_size(sizeof(struct xfrmu_spdhthresh));
1474 }
1475
build_spdinfo(struct sk_buff * skb,struct net * net,u32 portid,u32 seq,u32 flags)1476 static int build_spdinfo(struct sk_buff *skb, struct net *net,
1477 u32 portid, u32 seq, u32 flags)
1478 {
1479 struct xfrmk_spdinfo si;
1480 struct xfrmu_spdinfo spc;
1481 struct xfrmu_spdhinfo sph;
1482 struct xfrmu_spdhthresh spt4, spt6;
1483 struct nlmsghdr *nlh;
1484 int err;
1485 u32 *f;
1486 unsigned lseq;
1487
1488 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
1489 if (nlh == NULL) /* shouldn't really happen ... */
1490 return -EMSGSIZE;
1491
1492 f = nlmsg_data(nlh);
1493 *f = flags;
1494 xfrm_spd_getinfo(net, &si);
1495 spc.incnt = si.incnt;
1496 spc.outcnt = si.outcnt;
1497 spc.fwdcnt = si.fwdcnt;
1498 spc.inscnt = si.inscnt;
1499 spc.outscnt = si.outscnt;
1500 spc.fwdscnt = si.fwdscnt;
1501 sph.spdhcnt = si.spdhcnt;
1502 sph.spdhmcnt = si.spdhmcnt;
1503
1504 do {
1505 lseq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1506
1507 spt4.lbits = net->xfrm.policy_hthresh.lbits4;
1508 spt4.rbits = net->xfrm.policy_hthresh.rbits4;
1509 spt6.lbits = net->xfrm.policy_hthresh.lbits6;
1510 spt6.rbits = net->xfrm.policy_hthresh.rbits6;
1511 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, lseq));
1512
1513 err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
1514 if (!err)
1515 err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
1516 if (!err)
1517 err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4);
1518 if (!err)
1519 err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6);
1520 if (err) {
1521 nlmsg_cancel(skb, nlh);
1522 return err;
1523 }
1524
1525 nlmsg_end(skb, nlh);
1526 return 0;
1527 }
1528
xfrm_set_spdinfo(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)1529 static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1530 struct nlattr **attrs,
1531 struct netlink_ext_ack *extack)
1532 {
1533 struct net *net = sock_net(skb->sk);
1534 struct xfrmu_spdhthresh *thresh4 = NULL;
1535 struct xfrmu_spdhthresh *thresh6 = NULL;
1536
1537 /* selector prefixlen thresholds to hash policies */
1538 if (attrs[XFRMA_SPD_IPV4_HTHRESH]) {
1539 struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH];
1540
1541 if (nla_len(rta) < sizeof(*thresh4)) {
1542 NL_SET_ERR_MSG(extack, "Invalid SPD_IPV4_HTHRESH attribute length");
1543 return -EINVAL;
1544 }
1545 thresh4 = nla_data(rta);
1546 if (thresh4->lbits > 32 || thresh4->rbits > 32) {
1547 NL_SET_ERR_MSG(extack, "Invalid hash threshold (must be <= 32 for IPv4)");
1548 return -EINVAL;
1549 }
1550 }
1551 if (attrs[XFRMA_SPD_IPV6_HTHRESH]) {
1552 struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH];
1553
1554 if (nla_len(rta) < sizeof(*thresh6)) {
1555 NL_SET_ERR_MSG(extack, "Invalid SPD_IPV6_HTHRESH attribute length");
1556 return -EINVAL;
1557 }
1558 thresh6 = nla_data(rta);
1559 if (thresh6->lbits > 128 || thresh6->rbits > 128) {
1560 NL_SET_ERR_MSG(extack, "Invalid hash threshold (must be <= 128 for IPv6)");
1561 return -EINVAL;
1562 }
1563 }
1564
1565 if (thresh4 || thresh6) {
1566 write_seqlock(&net->xfrm.policy_hthresh.lock);
1567 if (thresh4) {
1568 net->xfrm.policy_hthresh.lbits4 = thresh4->lbits;
1569 net->xfrm.policy_hthresh.rbits4 = thresh4->rbits;
1570 }
1571 if (thresh6) {
1572 net->xfrm.policy_hthresh.lbits6 = thresh6->lbits;
1573 net->xfrm.policy_hthresh.rbits6 = thresh6->rbits;
1574 }
1575 write_sequnlock(&net->xfrm.policy_hthresh.lock);
1576
1577 xfrm_policy_hash_rebuild(net);
1578 }
1579
1580 return 0;
1581 }
1582
xfrm_get_spdinfo(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)1583 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1584 struct nlattr **attrs,
1585 struct netlink_ext_ack *extack)
1586 {
1587 struct net *net = sock_net(skb->sk);
1588 struct sk_buff *r_skb;
1589 u32 *flags = nlmsg_data(nlh);
1590 u32 sportid = NETLINK_CB(skb).portid;
1591 u32 seq = nlh->nlmsg_seq;
1592 int err;
1593
1594 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
1595 if (r_skb == NULL)
1596 return -ENOMEM;
1597
1598 err = build_spdinfo(r_skb, net, sportid, seq, *flags);
1599 BUG_ON(err < 0);
1600
1601 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1602 }
1603
xfrm_sadinfo_msgsize(void)1604 static inline unsigned int xfrm_sadinfo_msgsize(void)
1605 {
1606 return NLMSG_ALIGN(4)
1607 + nla_total_size(sizeof(struct xfrmu_sadhinfo))
1608 + nla_total_size(4); /* XFRMA_SAD_CNT */
1609 }
1610
build_sadinfo(struct sk_buff * skb,struct net * net,u32 portid,u32 seq,u32 flags)1611 static int build_sadinfo(struct sk_buff *skb, struct net *net,
1612 u32 portid, u32 seq, u32 flags)
1613 {
1614 struct xfrmk_sadinfo si;
1615 struct xfrmu_sadhinfo sh;
1616 struct nlmsghdr *nlh;
1617 int err;
1618 u32 *f;
1619
1620 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
1621 if (nlh == NULL) /* shouldn't really happen ... */
1622 return -EMSGSIZE;
1623
1624 f = nlmsg_data(nlh);
1625 *f = flags;
1626 xfrm_sad_getinfo(net, &si);
1627
1628 sh.sadhmcnt = si.sadhmcnt;
1629 sh.sadhcnt = si.sadhcnt;
1630
1631 err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
1632 if (!err)
1633 err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
1634 if (err) {
1635 nlmsg_cancel(skb, nlh);
1636 return err;
1637 }
1638
1639 nlmsg_end(skb, nlh);
1640 return 0;
1641 }
1642
xfrm_get_sadinfo(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)1643 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1644 struct nlattr **attrs,
1645 struct netlink_ext_ack *extack)
1646 {
1647 struct net *net = sock_net(skb->sk);
1648 struct sk_buff *r_skb;
1649 u32 *flags = nlmsg_data(nlh);
1650 u32 sportid = NETLINK_CB(skb).portid;
1651 u32 seq = nlh->nlmsg_seq;
1652 int err;
1653
1654 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
1655 if (r_skb == NULL)
1656 return -ENOMEM;
1657
1658 err = build_sadinfo(r_skb, net, sportid, seq, *flags);
1659 BUG_ON(err < 0);
1660
1661 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1662 }
1663
xfrm_get_sa(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)1664 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1665 struct nlattr **attrs, struct netlink_ext_ack *extack)
1666 {
1667 struct net *net = sock_net(skb->sk);
1668 struct xfrm_usersa_id *p = nlmsg_data(nlh);
1669 struct xfrm_state *x;
1670 struct sk_buff *resp_skb;
1671 int err = -ESRCH;
1672
1673 x = xfrm_user_state_lookup(net, p, attrs, &err);
1674 if (x == NULL)
1675 goto out_noput;
1676
1677 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1678 if (IS_ERR(resp_skb)) {
1679 err = PTR_ERR(resp_skb);
1680 } else {
1681 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1682 }
1683 xfrm_state_put(x);
1684 out_noput:
1685 return err;
1686 }
1687
xfrm_alloc_userspi(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)1688 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
1689 struct nlattr **attrs,
1690 struct netlink_ext_ack *extack)
1691 {
1692 struct net *net = sock_net(skb->sk);
1693 struct xfrm_state *x;
1694 struct xfrm_userspi_info *p;
1695 struct xfrm_translator *xtr;
1696 struct sk_buff *resp_skb;
1697 xfrm_address_t *daddr;
1698 int family;
1699 int err;
1700 u32 mark;
1701 struct xfrm_mark m;
1702 u32 if_id = 0;
1703
1704 p = nlmsg_data(nlh);
1705 err = verify_spi_info(p->info.id.proto, p->min, p->max, extack);
1706 if (err)
1707 goto out_noput;
1708
1709 family = p->info.family;
1710 daddr = &p->info.id.daddr;
1711
1712 x = NULL;
1713
1714 mark = xfrm_mark_get(attrs, &m);
1715
1716 if (attrs[XFRMA_IF_ID])
1717 if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
1718
1719 if (p->info.seq) {
1720 x = xfrm_find_acq_byseq(net, mark, p->info.seq);
1721 if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
1722 xfrm_state_put(x);
1723 x = NULL;
1724 }
1725 }
1726
1727 if (!x)
1728 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
1729 if_id, p->info.id.proto, daddr,
1730 &p->info.saddr, 1,
1731 family);
1732 err = -ENOENT;
1733 if (!x) {
1734 NL_SET_ERR_MSG(extack, "Target ACQUIRE not found");
1735 goto out_noput;
1736 }
1737
1738 err = xfrm_alloc_spi(x, p->min, p->max, extack);
1739 if (err)
1740 goto out;
1741
1742 if (attrs[XFRMA_SA_DIR])
1743 x->dir = nla_get_u8(attrs[XFRMA_SA_DIR]);
1744
1745 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1746 if (IS_ERR(resp_skb)) {
1747 err = PTR_ERR(resp_skb);
1748 goto out;
1749 }
1750
1751 xtr = xfrm_get_translator();
1752 if (xtr) {
1753 err = xtr->alloc_compat(skb, nlmsg_hdr(skb));
1754
1755 xfrm_put_translator(xtr);
1756 if (err) {
1757 kfree_skb(resp_skb);
1758 goto out;
1759 }
1760 }
1761
1762 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1763
1764 out:
1765 xfrm_state_put(x);
1766 out_noput:
1767 return err;
1768 }
1769
verify_policy_dir(u8 dir,struct netlink_ext_ack * extack)1770 static int verify_policy_dir(u8 dir, struct netlink_ext_ack *extack)
1771 {
1772 switch (dir) {
1773 case XFRM_POLICY_IN:
1774 case XFRM_POLICY_OUT:
1775 case XFRM_POLICY_FWD:
1776 break;
1777
1778 default:
1779 NL_SET_ERR_MSG(extack, "Invalid policy direction");
1780 return -EINVAL;
1781 }
1782
1783 return 0;
1784 }
1785
verify_policy_type(u8 type,struct netlink_ext_ack * extack)1786 static int verify_policy_type(u8 type, struct netlink_ext_ack *extack)
1787 {
1788 switch (type) {
1789 case XFRM_POLICY_TYPE_MAIN:
1790 #ifdef CONFIG_XFRM_SUB_POLICY
1791 case XFRM_POLICY_TYPE_SUB:
1792 #endif
1793 break;
1794
1795 default:
1796 NL_SET_ERR_MSG(extack, "Invalid policy type");
1797 return -EINVAL;
1798 }
1799
1800 return 0;
1801 }
1802
verify_newpolicy_info(struct xfrm_userpolicy_info * p,struct netlink_ext_ack * extack)1803 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p,
1804 struct netlink_ext_ack *extack)
1805 {
1806 int ret;
1807
1808 switch (p->share) {
1809 case XFRM_SHARE_ANY:
1810 case XFRM_SHARE_SESSION:
1811 case XFRM_SHARE_USER:
1812 case XFRM_SHARE_UNIQUE:
1813 break;
1814
1815 default:
1816 NL_SET_ERR_MSG(extack, "Invalid policy share");
1817 return -EINVAL;
1818 }
1819
1820 switch (p->action) {
1821 case XFRM_POLICY_ALLOW:
1822 case XFRM_POLICY_BLOCK:
1823 break;
1824
1825 default:
1826 NL_SET_ERR_MSG(extack, "Invalid policy action");
1827 return -EINVAL;
1828 }
1829
1830 switch (p->sel.family) {
1831 case AF_INET:
1832 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) {
1833 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)");
1834 return -EINVAL;
1835 }
1836
1837 break;
1838
1839 case AF_INET6:
1840 #if IS_ENABLED(CONFIG_IPV6)
1841 if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) {
1842 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)");
1843 return -EINVAL;
1844 }
1845
1846 break;
1847 #else
1848 NL_SET_ERR_MSG(extack, "IPv6 support disabled");
1849 return -EAFNOSUPPORT;
1850 #endif
1851
1852 default:
1853 NL_SET_ERR_MSG(extack, "Invalid selector family");
1854 return -EINVAL;
1855 }
1856
1857 ret = verify_policy_dir(p->dir, extack);
1858 if (ret)
1859 return ret;
1860 if (p->index && (xfrm_policy_id2dir(p->index) != p->dir)) {
1861 NL_SET_ERR_MSG(extack, "Policy index doesn't match direction");
1862 return -EINVAL;
1863 }
1864
1865 return 0;
1866 }
1867
copy_from_user_sec_ctx(struct xfrm_policy * pol,struct nlattr ** attrs)1868 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
1869 {
1870 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1871 struct xfrm_user_sec_ctx *uctx;
1872
1873 if (!rt)
1874 return 0;
1875
1876 uctx = nla_data(rt);
1877 return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL);
1878 }
1879
copy_templates(struct xfrm_policy * xp,struct xfrm_user_tmpl * ut,int nr)1880 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
1881 int nr)
1882 {
1883 int i;
1884
1885 xp->xfrm_nr = nr;
1886 for (i = 0; i < nr; i++, ut++) {
1887 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1888
1889 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
1890 memcpy(&t->saddr, &ut->saddr,
1891 sizeof(xfrm_address_t));
1892 t->reqid = ut->reqid;
1893 t->mode = ut->mode;
1894 t->share = ut->share;
1895 t->optional = ut->optional;
1896 t->aalgos = ut->aalgos;
1897 t->ealgos = ut->ealgos;
1898 t->calgos = ut->calgos;
1899 /* If all masks are ~0, then we allow all algorithms. */
1900 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
1901 t->encap_family = ut->family;
1902 }
1903 }
1904
validate_tmpl(int nr,struct xfrm_user_tmpl * ut,u16 family,int dir,struct netlink_ext_ack * extack)1905 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family,
1906 int dir, struct netlink_ext_ack *extack)
1907 {
1908 u16 prev_family;
1909 int i;
1910
1911 if (nr > XFRM_MAX_DEPTH) {
1912 NL_SET_ERR_MSG(extack, "Template count must be <= XFRM_MAX_DEPTH (" __stringify(XFRM_MAX_DEPTH) ")");
1913 return -EINVAL;
1914 }
1915
1916 prev_family = family;
1917
1918 for (i = 0; i < nr; i++) {
1919 /* We never validated the ut->family value, so many
1920 * applications simply leave it at zero. The check was
1921 * never made and ut->family was ignored because all
1922 * templates could be assumed to have the same family as
1923 * the policy itself. Now that we will have ipv4-in-ipv6
1924 * and ipv6-in-ipv4 tunnels, this is no longer true.
1925 */
1926 if (!ut[i].family)
1927 ut[i].family = family;
1928
1929 switch (ut[i].mode) {
1930 case XFRM_MODE_TUNNEL:
1931 case XFRM_MODE_BEET:
1932 if (ut[i].optional && dir == XFRM_POLICY_OUT) {
1933 NL_SET_ERR_MSG(extack, "Mode in optional template not allowed in outbound policy");
1934 return -EINVAL;
1935 }
1936 break;
1937 default:
1938 if (ut[i].family != prev_family) {
1939 NL_SET_ERR_MSG(extack, "Mode in template doesn't support a family change");
1940 return -EINVAL;
1941 }
1942 break;
1943 }
1944 if (ut[i].mode >= XFRM_MODE_MAX) {
1945 NL_SET_ERR_MSG(extack, "Mode in template must be < XFRM_MODE_MAX (" __stringify(XFRM_MODE_MAX) ")");
1946 return -EINVAL;
1947 }
1948
1949 prev_family = ut[i].family;
1950
1951 switch (ut[i].family) {
1952 case AF_INET:
1953 break;
1954 #if IS_ENABLED(CONFIG_IPV6)
1955 case AF_INET6:
1956 break;
1957 #endif
1958 default:
1959 NL_SET_ERR_MSG(extack, "Invalid family in template");
1960 return -EINVAL;
1961 }
1962
1963 if (!xfrm_id_proto_valid(ut[i].id.proto)) {
1964 NL_SET_ERR_MSG(extack, "Invalid XFRM protocol in template");
1965 return -EINVAL;
1966 }
1967 }
1968
1969 return 0;
1970 }
1971
copy_from_user_tmpl(struct xfrm_policy * pol,struct nlattr ** attrs,int dir,struct netlink_ext_ack * extack)1972 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs,
1973 int dir, struct netlink_ext_ack *extack)
1974 {
1975 struct nlattr *rt = attrs[XFRMA_TMPL];
1976
1977 if (!rt) {
1978 pol->xfrm_nr = 0;
1979 } else {
1980 struct xfrm_user_tmpl *utmpl = nla_data(rt);
1981 int nr = nla_len(rt) / sizeof(*utmpl);
1982 int err;
1983
1984 err = validate_tmpl(nr, utmpl, pol->family, dir, extack);
1985 if (err)
1986 return err;
1987
1988 copy_templates(pol, utmpl, nr);
1989 }
1990 return 0;
1991 }
1992
copy_from_user_policy_type(u8 * tp,struct nlattr ** attrs,struct netlink_ext_ack * extack)1993 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs,
1994 struct netlink_ext_ack *extack)
1995 {
1996 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
1997 struct xfrm_userpolicy_type *upt;
1998 u8 type = XFRM_POLICY_TYPE_MAIN;
1999 int err;
2000
2001 if (rt) {
2002 upt = nla_data(rt);
2003 type = upt->type;
2004 }
2005
2006 err = verify_policy_type(type, extack);
2007 if (err)
2008 return err;
2009
2010 *tp = type;
2011 return 0;
2012 }
2013
copy_from_user_policy(struct xfrm_policy * xp,struct xfrm_userpolicy_info * p)2014 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
2015 {
2016 xp->priority = p->priority;
2017 xp->index = p->index;
2018 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
2019 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
2020 xp->action = p->action;
2021 xp->flags = p->flags;
2022 xp->family = p->sel.family;
2023 /* XXX xp->share = p->share; */
2024 }
2025
copy_to_user_policy(struct xfrm_policy * xp,struct xfrm_userpolicy_info * p,int dir)2026 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
2027 {
2028 memset(p, 0, sizeof(*p));
2029 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
2030 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
2031 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
2032 p->priority = xp->priority;
2033 p->index = xp->index;
2034 p->sel.family = xp->family;
2035 p->dir = dir;
2036 p->action = xp->action;
2037 p->flags = xp->flags;
2038 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
2039 }
2040
xfrm_policy_construct(struct net * net,struct xfrm_userpolicy_info * p,struct nlattr ** attrs,int * errp,struct netlink_ext_ack * extack)2041 static struct xfrm_policy *xfrm_policy_construct(struct net *net,
2042 struct xfrm_userpolicy_info *p,
2043 struct nlattr **attrs,
2044 int *errp,
2045 struct netlink_ext_ack *extack)
2046 {
2047 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
2048 int err;
2049
2050 if (!xp) {
2051 *errp = -ENOMEM;
2052 return NULL;
2053 }
2054
2055 copy_from_user_policy(xp, p);
2056
2057 err = copy_from_user_policy_type(&xp->type, attrs, extack);
2058 if (err)
2059 goto error;
2060
2061 if (!(err = copy_from_user_tmpl(xp, attrs, p->dir, extack)))
2062 err = copy_from_user_sec_ctx(xp, attrs);
2063 if (err)
2064 goto error;
2065
2066 xfrm_mark_get(attrs, &xp->mark);
2067
2068 if (attrs[XFRMA_IF_ID])
2069 xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2070
2071 /* configure the hardware if offload is requested */
2072 if (attrs[XFRMA_OFFLOAD_DEV]) {
2073 err = xfrm_dev_policy_add(net, xp,
2074 nla_data(attrs[XFRMA_OFFLOAD_DEV]),
2075 p->dir, extack);
2076 if (err)
2077 goto error;
2078 }
2079
2080 return xp;
2081 error:
2082 *errp = err;
2083 xp->walk.dead = 1;
2084 xfrm_policy_destroy(xp);
2085 return NULL;
2086 }
2087
xfrm_add_policy(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2088 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2089 struct nlattr **attrs,
2090 struct netlink_ext_ack *extack)
2091 {
2092 struct net *net = sock_net(skb->sk);
2093 struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
2094 struct xfrm_policy *xp;
2095 struct km_event c;
2096 int err;
2097 int excl;
2098
2099 err = verify_newpolicy_info(p, extack);
2100 if (err)
2101 return err;
2102 err = verify_sec_ctx_len(attrs, extack);
2103 if (err)
2104 return err;
2105
2106 xp = xfrm_policy_construct(net, p, attrs, &err, extack);
2107 if (!xp)
2108 return err;
2109
2110 /* shouldn't excl be based on nlh flags??
2111 * Aha! this is anti-netlink really i.e more pfkey derived
2112 * in netlink excl is a flag and you wouldn't need
2113 * a type XFRM_MSG_UPDPOLICY - JHS */
2114 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
2115 err = xfrm_policy_insert(p->dir, xp, excl);
2116 xfrm_audit_policy_add(xp, err ? 0 : 1, true);
2117
2118 if (err) {
2119 xfrm_dev_policy_delete(xp);
2120 xfrm_dev_policy_free(xp);
2121 security_xfrm_policy_free(xp->security);
2122 kfree(xp);
2123 return err;
2124 }
2125
2126 c.event = nlh->nlmsg_type;
2127 c.seq = nlh->nlmsg_seq;
2128 c.portid = nlh->nlmsg_pid;
2129 km_policy_notify(xp, p->dir, &c);
2130
2131 xfrm_pol_put(xp);
2132
2133 return 0;
2134 }
2135
copy_to_user_tmpl(struct xfrm_policy * xp,struct sk_buff * skb)2136 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
2137 {
2138 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
2139 int i;
2140
2141 if (xp->xfrm_nr == 0)
2142 return 0;
2143
2144 if (xp->xfrm_nr > XFRM_MAX_DEPTH)
2145 return -ENOBUFS;
2146
2147 for (i = 0; i < xp->xfrm_nr; i++) {
2148 struct xfrm_user_tmpl *up = &vec[i];
2149 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
2150
2151 memset(up, 0, sizeof(*up));
2152 memcpy(&up->id, &kp->id, sizeof(up->id));
2153 up->family = kp->encap_family;
2154 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
2155 up->reqid = kp->reqid;
2156 up->mode = kp->mode;
2157 up->share = kp->share;
2158 up->optional = kp->optional;
2159 up->aalgos = kp->aalgos;
2160 up->ealgos = kp->ealgos;
2161 up->calgos = kp->calgos;
2162 }
2163
2164 return nla_put(skb, XFRMA_TMPL,
2165 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
2166 }
2167
copy_to_user_state_sec_ctx(struct xfrm_state * x,struct sk_buff * skb)2168 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
2169 {
2170 if (x->security) {
2171 return copy_sec_ctx(x->security, skb);
2172 }
2173 return 0;
2174 }
2175
copy_to_user_sec_ctx(struct xfrm_policy * xp,struct sk_buff * skb)2176 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
2177 {
2178 if (xp->security)
2179 return copy_sec_ctx(xp->security, skb);
2180 return 0;
2181 }
userpolicy_type_attrsize(void)2182 static inline unsigned int userpolicy_type_attrsize(void)
2183 {
2184 #ifdef CONFIG_XFRM_SUB_POLICY
2185 return nla_total_size(sizeof(struct xfrm_userpolicy_type));
2186 #else
2187 return 0;
2188 #endif
2189 }
2190
2191 #ifdef CONFIG_XFRM_SUB_POLICY
copy_to_user_policy_type(u8 type,struct sk_buff * skb)2192 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
2193 {
2194 struct xfrm_userpolicy_type upt;
2195
2196 /* Sadly there are two holes in struct xfrm_userpolicy_type */
2197 memset(&upt, 0, sizeof(upt));
2198 upt.type = type;
2199
2200 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
2201 }
2202
2203 #else
copy_to_user_policy_type(u8 type,struct sk_buff * skb)2204 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
2205 {
2206 return 0;
2207 }
2208 #endif
2209
dump_one_policy(struct xfrm_policy * xp,int dir,int count,void * ptr)2210 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
2211 {
2212 struct xfrm_dump_info *sp = ptr;
2213 struct xfrm_userpolicy_info *p;
2214 struct sk_buff *in_skb = sp->in_skb;
2215 struct sk_buff *skb = sp->out_skb;
2216 struct xfrm_translator *xtr;
2217 struct nlmsghdr *nlh;
2218 int err;
2219
2220 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
2221 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
2222 if (nlh == NULL)
2223 return -EMSGSIZE;
2224
2225 p = nlmsg_data(nlh);
2226 copy_to_user_policy(xp, p, dir);
2227 err = copy_to_user_tmpl(xp, skb);
2228 if (!err)
2229 err = copy_to_user_sec_ctx(xp, skb);
2230 if (!err)
2231 err = copy_to_user_policy_type(xp->type, skb);
2232 if (!err)
2233 err = xfrm_mark_put(skb, &xp->mark);
2234 if (!err)
2235 err = xfrm_if_id_put(skb, xp->if_id);
2236 if (!err && xp->xdo.dev)
2237 err = copy_user_offload(&xp->xdo, skb);
2238 if (err) {
2239 nlmsg_cancel(skb, nlh);
2240 return err;
2241 }
2242 nlmsg_end(skb, nlh);
2243
2244 xtr = xfrm_get_translator();
2245 if (xtr) {
2246 err = xtr->alloc_compat(skb, nlh);
2247
2248 xfrm_put_translator(xtr);
2249 if (err) {
2250 nlmsg_cancel(skb, nlh);
2251 return err;
2252 }
2253 }
2254
2255 return 0;
2256 }
2257
xfrm_dump_policy_done(struct netlink_callback * cb)2258 static int xfrm_dump_policy_done(struct netlink_callback *cb)
2259 {
2260 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
2261 struct net *net = sock_net(cb->skb->sk);
2262
2263 xfrm_policy_walk_done(walk, net);
2264 return 0;
2265 }
2266
xfrm_dump_policy_start(struct netlink_callback * cb)2267 static int xfrm_dump_policy_start(struct netlink_callback *cb)
2268 {
2269 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
2270
2271 BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
2272
2273 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
2274 return 0;
2275 }
2276
xfrm_dump_policy(struct sk_buff * skb,struct netlink_callback * cb)2277 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
2278 {
2279 struct net *net = sock_net(skb->sk);
2280 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
2281 struct xfrm_dump_info info;
2282
2283 info.in_skb = cb->skb;
2284 info.out_skb = skb;
2285 info.nlmsg_seq = cb->nlh->nlmsg_seq;
2286 info.nlmsg_flags = NLM_F_MULTI;
2287
2288 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
2289
2290 return skb->len;
2291 }
2292
xfrm_policy_netlink(struct sk_buff * in_skb,struct xfrm_policy * xp,int dir,u32 seq)2293 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
2294 struct xfrm_policy *xp,
2295 int dir, u32 seq)
2296 {
2297 struct xfrm_dump_info info;
2298 struct sk_buff *skb;
2299 int err;
2300
2301 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2302 if (!skb)
2303 return ERR_PTR(-ENOMEM);
2304
2305 info.in_skb = in_skb;
2306 info.out_skb = skb;
2307 info.nlmsg_seq = seq;
2308 info.nlmsg_flags = 0;
2309
2310 err = dump_one_policy(xp, dir, 0, &info);
2311 if (err) {
2312 kfree_skb(skb);
2313 return ERR_PTR(err);
2314 }
2315
2316 return skb;
2317 }
2318
xfrm_notify_userpolicy(struct net * net)2319 static int xfrm_notify_userpolicy(struct net *net)
2320 {
2321 struct xfrm_userpolicy_default *up;
2322 int len = NLMSG_ALIGN(sizeof(*up));
2323 struct nlmsghdr *nlh;
2324 struct sk_buff *skb;
2325 int err;
2326
2327 skb = nlmsg_new(len, GFP_ATOMIC);
2328 if (skb == NULL)
2329 return -ENOMEM;
2330
2331 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_GETDEFAULT, sizeof(*up), 0);
2332 if (nlh == NULL) {
2333 kfree_skb(skb);
2334 return -EMSGSIZE;
2335 }
2336
2337 up = nlmsg_data(nlh);
2338 up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
2339 up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
2340 up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
2341
2342 nlmsg_end(skb, nlh);
2343
2344 rcu_read_lock();
2345 err = xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
2346 rcu_read_unlock();
2347
2348 return err;
2349 }
2350
xfrm_userpolicy_is_valid(__u8 policy)2351 static bool xfrm_userpolicy_is_valid(__u8 policy)
2352 {
2353 return policy == XFRM_USERPOLICY_BLOCK ||
2354 policy == XFRM_USERPOLICY_ACCEPT;
2355 }
2356
xfrm_set_default(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2357 static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh,
2358 struct nlattr **attrs, struct netlink_ext_ack *extack)
2359 {
2360 struct net *net = sock_net(skb->sk);
2361 struct xfrm_userpolicy_default *up = nlmsg_data(nlh);
2362
2363 if (xfrm_userpolicy_is_valid(up->in))
2364 net->xfrm.policy_default[XFRM_POLICY_IN] = up->in;
2365
2366 if (xfrm_userpolicy_is_valid(up->fwd))
2367 net->xfrm.policy_default[XFRM_POLICY_FWD] = up->fwd;
2368
2369 if (xfrm_userpolicy_is_valid(up->out))
2370 net->xfrm.policy_default[XFRM_POLICY_OUT] = up->out;
2371
2372 rt_genid_bump_all(net);
2373
2374 xfrm_notify_userpolicy(net);
2375 return 0;
2376 }
2377
xfrm_get_default(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2378 static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
2379 struct nlattr **attrs, struct netlink_ext_ack *extack)
2380 {
2381 struct sk_buff *r_skb;
2382 struct nlmsghdr *r_nlh;
2383 struct net *net = sock_net(skb->sk);
2384 struct xfrm_userpolicy_default *r_up;
2385 int len = NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_default));
2386 u32 portid = NETLINK_CB(skb).portid;
2387 u32 seq = nlh->nlmsg_seq;
2388
2389 r_skb = nlmsg_new(len, GFP_ATOMIC);
2390 if (!r_skb)
2391 return -ENOMEM;
2392
2393 r_nlh = nlmsg_put(r_skb, portid, seq, XFRM_MSG_GETDEFAULT, sizeof(*r_up), 0);
2394 if (!r_nlh) {
2395 kfree_skb(r_skb);
2396 return -EMSGSIZE;
2397 }
2398
2399 r_up = nlmsg_data(r_nlh);
2400 r_up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
2401 r_up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
2402 r_up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
2403 nlmsg_end(r_skb, r_nlh);
2404
2405 return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid);
2406 }
2407
xfrm_get_policy(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2408 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2409 struct nlattr **attrs,
2410 struct netlink_ext_ack *extack)
2411 {
2412 struct net *net = sock_net(skb->sk);
2413 struct xfrm_policy *xp;
2414 struct xfrm_userpolicy_id *p;
2415 u8 type = XFRM_POLICY_TYPE_MAIN;
2416 int err;
2417 struct km_event c;
2418 int delete;
2419 struct xfrm_mark m;
2420 u32 if_id = 0;
2421
2422 p = nlmsg_data(nlh);
2423 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
2424
2425 err = copy_from_user_policy_type(&type, attrs, extack);
2426 if (err)
2427 return err;
2428
2429 err = verify_policy_dir(p->dir, extack);
2430 if (err)
2431 return err;
2432
2433 if (attrs[XFRMA_IF_ID])
2434 if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2435
2436 xfrm_mark_get(attrs, &m);
2437
2438 if (p->index)
2439 xp = xfrm_policy_byid(net, &m, if_id, type, p->dir,
2440 p->index, delete, &err);
2441 else {
2442 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
2443 struct xfrm_sec_ctx *ctx;
2444
2445 err = verify_sec_ctx_len(attrs, extack);
2446 if (err)
2447 return err;
2448
2449 ctx = NULL;
2450 if (rt) {
2451 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
2452
2453 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
2454 if (err)
2455 return err;
2456 }
2457 xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
2458 &p->sel, ctx, delete, &err);
2459 security_xfrm_policy_free(ctx);
2460 }
2461 if (xp == NULL)
2462 return -ENOENT;
2463
2464 if (!delete) {
2465 struct sk_buff *resp_skb;
2466
2467 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
2468 if (IS_ERR(resp_skb)) {
2469 err = PTR_ERR(resp_skb);
2470 } else {
2471 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
2472 NETLINK_CB(skb).portid);
2473 }
2474 } else {
2475 xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
2476
2477 if (err != 0)
2478 goto out;
2479
2480 c.data.byid = p->index;
2481 c.event = nlh->nlmsg_type;
2482 c.seq = nlh->nlmsg_seq;
2483 c.portid = nlh->nlmsg_pid;
2484 km_policy_notify(xp, p->dir, &c);
2485 }
2486
2487 out:
2488 xfrm_pol_put(xp);
2489 return err;
2490 }
2491
xfrm_flush_sa(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2492 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
2493 struct nlattr **attrs,
2494 struct netlink_ext_ack *extack)
2495 {
2496 struct net *net = sock_net(skb->sk);
2497 struct km_event c;
2498 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
2499 int err;
2500
2501 err = xfrm_state_flush(net, p->proto, true, false);
2502 if (err) {
2503 if (err == -ESRCH) /* empty table */
2504 return 0;
2505 return err;
2506 }
2507 c.data.proto = p->proto;
2508 c.event = nlh->nlmsg_type;
2509 c.seq = nlh->nlmsg_seq;
2510 c.portid = nlh->nlmsg_pid;
2511 c.net = net;
2512 km_state_notify(NULL, &c);
2513
2514 return 0;
2515 }
2516
xfrm_aevent_msgsize(struct xfrm_state * x)2517 static inline unsigned int xfrm_aevent_msgsize(struct xfrm_state *x)
2518 {
2519 unsigned int replay_size = x->replay_esn ?
2520 xfrm_replay_state_esn_len(x->replay_esn) :
2521 sizeof(struct xfrm_replay_state);
2522
2523 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
2524 + nla_total_size(replay_size)
2525 + nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur))
2526 + nla_total_size(sizeof(struct xfrm_mark))
2527 + nla_total_size(4) /* XFRM_AE_RTHR */
2528 + nla_total_size(4) /* XFRM_AE_ETHR */
2529 + nla_total_size(sizeof(x->dir)); /* XFRMA_SA_DIR */
2530 }
2531
build_aevent(struct sk_buff * skb,struct xfrm_state * x,const struct km_event * c)2532 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
2533 {
2534 struct xfrm_aevent_id *id;
2535 struct nlmsghdr *nlh;
2536 int err;
2537
2538 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
2539 if (nlh == NULL)
2540 return -EMSGSIZE;
2541
2542 id = nlmsg_data(nlh);
2543 memset(&id->sa_id, 0, sizeof(id->sa_id));
2544 memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr));
2545 id->sa_id.spi = x->id.spi;
2546 id->sa_id.family = x->props.family;
2547 id->sa_id.proto = x->id.proto;
2548 memcpy(&id->saddr, &x->props.saddr, sizeof(x->props.saddr));
2549 id->reqid = x->props.reqid;
2550 id->flags = c->data.aevent;
2551
2552 if (x->replay_esn) {
2553 err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
2554 xfrm_replay_state_esn_len(x->replay_esn),
2555 x->replay_esn);
2556 } else {
2557 err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
2558 &x->replay);
2559 }
2560 if (err)
2561 goto out_cancel;
2562 err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft,
2563 XFRMA_PAD);
2564 if (err)
2565 goto out_cancel;
2566
2567 if (id->flags & XFRM_AE_RTHR) {
2568 err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
2569 if (err)
2570 goto out_cancel;
2571 }
2572 if (id->flags & XFRM_AE_ETHR) {
2573 err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
2574 x->replay_maxage * 10 / HZ);
2575 if (err)
2576 goto out_cancel;
2577 }
2578 err = xfrm_mark_put(skb, &x->mark);
2579 if (err)
2580 goto out_cancel;
2581
2582 err = xfrm_if_id_put(skb, x->if_id);
2583 if (err)
2584 goto out_cancel;
2585
2586 if (x->dir) {
2587 err = nla_put_u8(skb, XFRMA_SA_DIR, x->dir);
2588 if (err)
2589 goto out_cancel;
2590 }
2591
2592 nlmsg_end(skb, nlh);
2593 return 0;
2594
2595 out_cancel:
2596 nlmsg_cancel(skb, nlh);
2597 return err;
2598 }
2599
xfrm_get_ae(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2600 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
2601 struct nlattr **attrs, struct netlink_ext_ack *extack)
2602 {
2603 struct net *net = sock_net(skb->sk);
2604 struct xfrm_state *x;
2605 struct sk_buff *r_skb;
2606 int err;
2607 struct km_event c;
2608 u32 mark;
2609 struct xfrm_mark m;
2610 struct xfrm_aevent_id *p = nlmsg_data(nlh);
2611 struct xfrm_usersa_id *id = &p->sa_id;
2612
2613 mark = xfrm_mark_get(attrs, &m);
2614
2615 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
2616 if (x == NULL)
2617 return -ESRCH;
2618
2619 r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
2620 if (r_skb == NULL) {
2621 xfrm_state_put(x);
2622 return -ENOMEM;
2623 }
2624
2625 /*
2626 * XXX: is this lock really needed - none of the other
2627 * gets lock (the concern is things getting updated
2628 * while we are still reading) - jhs
2629 */
2630 spin_lock_bh(&x->lock);
2631 c.data.aevent = p->flags;
2632 c.seq = nlh->nlmsg_seq;
2633 c.portid = nlh->nlmsg_pid;
2634
2635 err = build_aevent(r_skb, x, &c);
2636 BUG_ON(err < 0);
2637
2638 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid);
2639 spin_unlock_bh(&x->lock);
2640 xfrm_state_put(x);
2641 return err;
2642 }
2643
xfrm_new_ae(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2644 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
2645 struct nlattr **attrs, struct netlink_ext_ack *extack)
2646 {
2647 struct net *net = sock_net(skb->sk);
2648 struct xfrm_state *x;
2649 struct km_event c;
2650 int err = -EINVAL;
2651 u32 mark = 0;
2652 struct xfrm_mark m;
2653 struct xfrm_aevent_id *p = nlmsg_data(nlh);
2654 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
2655 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
2656 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
2657 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
2658 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
2659
2660 if (!lt && !rp && !re && !et && !rt) {
2661 NL_SET_ERR_MSG(extack, "Missing required attribute for AE");
2662 return err;
2663 }
2664
2665 /* pedantic mode - thou shalt sayeth replaceth */
2666 if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) {
2667 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE flag is required");
2668 return err;
2669 }
2670
2671 mark = xfrm_mark_get(attrs, &m);
2672
2673 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
2674 if (x == NULL)
2675 return -ESRCH;
2676
2677 if (x->km.state != XFRM_STATE_VALID) {
2678 NL_SET_ERR_MSG(extack, "SA must be in VALID state");
2679 goto out;
2680 }
2681
2682 err = xfrm_replay_verify_len(x->replay_esn, re, extack);
2683 if (err)
2684 goto out;
2685
2686 spin_lock_bh(&x->lock);
2687 xfrm_update_ae_params(x, attrs, 1);
2688 spin_unlock_bh(&x->lock);
2689
2690 c.event = nlh->nlmsg_type;
2691 c.seq = nlh->nlmsg_seq;
2692 c.portid = nlh->nlmsg_pid;
2693 c.data.aevent = XFRM_AE_CU;
2694 km_state_notify(x, &c);
2695 err = 0;
2696 out:
2697 xfrm_state_put(x);
2698 return err;
2699 }
2700
xfrm_flush_policy(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2701 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2702 struct nlattr **attrs,
2703 struct netlink_ext_ack *extack)
2704 {
2705 struct net *net = sock_net(skb->sk);
2706 struct km_event c;
2707 u8 type = XFRM_POLICY_TYPE_MAIN;
2708 int err;
2709
2710 err = copy_from_user_policy_type(&type, attrs, extack);
2711 if (err)
2712 return err;
2713
2714 err = xfrm_policy_flush(net, type, true);
2715 if (err) {
2716 if (err == -ESRCH) /* empty table */
2717 return 0;
2718 return err;
2719 }
2720
2721 c.data.type = type;
2722 c.event = nlh->nlmsg_type;
2723 c.seq = nlh->nlmsg_seq;
2724 c.portid = nlh->nlmsg_pid;
2725 c.net = net;
2726 km_policy_notify(NULL, 0, &c);
2727 return 0;
2728 }
2729
xfrm_add_pol_expire(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2730 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2731 struct nlattr **attrs,
2732 struct netlink_ext_ack *extack)
2733 {
2734 struct net *net = sock_net(skb->sk);
2735 struct xfrm_policy *xp;
2736 struct xfrm_user_polexpire *up = nlmsg_data(nlh);
2737 struct xfrm_userpolicy_info *p = &up->pol;
2738 u8 type = XFRM_POLICY_TYPE_MAIN;
2739 int err = -ENOENT;
2740 struct xfrm_mark m;
2741 u32 if_id = 0;
2742
2743 err = copy_from_user_policy_type(&type, attrs, extack);
2744 if (err)
2745 return err;
2746
2747 err = verify_policy_dir(p->dir, extack);
2748 if (err)
2749 return err;
2750
2751 if (attrs[XFRMA_IF_ID])
2752 if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2753
2754 xfrm_mark_get(attrs, &m);
2755
2756 if (p->index)
2757 xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index,
2758 0, &err);
2759 else {
2760 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
2761 struct xfrm_sec_ctx *ctx;
2762
2763 err = verify_sec_ctx_len(attrs, extack);
2764 if (err)
2765 return err;
2766
2767 ctx = NULL;
2768 if (rt) {
2769 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
2770
2771 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
2772 if (err)
2773 return err;
2774 }
2775 xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
2776 &p->sel, ctx, 0, &err);
2777 security_xfrm_policy_free(ctx);
2778 }
2779 if (xp == NULL)
2780 return -ENOENT;
2781
2782 if (unlikely(xp->walk.dead))
2783 goto out;
2784
2785 err = 0;
2786 if (up->hard) {
2787 xfrm_policy_delete(xp, p->dir);
2788 xfrm_audit_policy_delete(xp, 1, true);
2789 }
2790 km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
2791
2792 out:
2793 xfrm_pol_put(xp);
2794 return err;
2795 }
2796
xfrm_add_sa_expire(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2797 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2798 struct nlattr **attrs,
2799 struct netlink_ext_ack *extack)
2800 {
2801 struct net *net = sock_net(skb->sk);
2802 struct xfrm_state *x;
2803 int err;
2804 struct xfrm_user_expire *ue = nlmsg_data(nlh);
2805 struct xfrm_usersa_info *p = &ue->state;
2806 struct xfrm_mark m;
2807 u32 mark = xfrm_mark_get(attrs, &m);
2808
2809 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
2810
2811 err = -ENOENT;
2812 if (x == NULL)
2813 return err;
2814
2815 spin_lock_bh(&x->lock);
2816 err = -EINVAL;
2817 if (x->km.state != XFRM_STATE_VALID) {
2818 NL_SET_ERR_MSG(extack, "SA must be in VALID state");
2819 goto out;
2820 }
2821
2822 km_state_expired(x, ue->hard, nlh->nlmsg_pid);
2823
2824 if (ue->hard) {
2825 __xfrm_state_delete(x);
2826 xfrm_audit_state_delete(x, 1, true);
2827 }
2828 err = 0;
2829 out:
2830 spin_unlock_bh(&x->lock);
2831 xfrm_state_put(x);
2832 return err;
2833 }
2834
xfrm_add_acquire(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2835 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
2836 struct nlattr **attrs,
2837 struct netlink_ext_ack *extack)
2838 {
2839 struct net *net = sock_net(skb->sk);
2840 struct xfrm_policy *xp;
2841 struct xfrm_user_tmpl *ut;
2842 int i;
2843 struct nlattr *rt = attrs[XFRMA_TMPL];
2844 struct xfrm_mark mark;
2845
2846 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
2847 struct xfrm_state *x = xfrm_state_alloc(net);
2848 int err = -ENOMEM;
2849
2850 if (!x)
2851 goto nomem;
2852
2853 xfrm_mark_get(attrs, &mark);
2854
2855 err = verify_newpolicy_info(&ua->policy, extack);
2856 if (err)
2857 goto free_state;
2858 err = verify_sec_ctx_len(attrs, extack);
2859 if (err)
2860 goto free_state;
2861
2862 /* build an XP */
2863 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err, extack);
2864 if (!xp)
2865 goto free_state;
2866
2867 memcpy(&x->id, &ua->id, sizeof(ua->id));
2868 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
2869 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
2870 xp->mark.m = x->mark.m = mark.m;
2871 xp->mark.v = x->mark.v = mark.v;
2872 ut = nla_data(rt);
2873 /* extract the templates and for each call km_key */
2874 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
2875 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
2876 memcpy(&x->id, &t->id, sizeof(x->id));
2877 x->props.mode = t->mode;
2878 x->props.reqid = t->reqid;
2879 x->props.family = ut->family;
2880 t->aalgos = ua->aalgos;
2881 t->ealgos = ua->ealgos;
2882 t->calgos = ua->calgos;
2883 err = km_query(x, t, xp);
2884
2885 }
2886
2887 xfrm_state_free(x);
2888 kfree(xp);
2889
2890 return 0;
2891
2892 free_state:
2893 xfrm_state_free(x);
2894 nomem:
2895 return err;
2896 }
2897
2898 #ifdef CONFIG_XFRM_MIGRATE
copy_from_user_migrate(struct xfrm_migrate * ma,struct xfrm_kmaddress * k,struct nlattr ** attrs,int * num,struct netlink_ext_ack * extack)2899 static int copy_from_user_migrate(struct xfrm_migrate *ma,
2900 struct xfrm_kmaddress *k,
2901 struct nlattr **attrs, int *num,
2902 struct netlink_ext_ack *extack)
2903 {
2904 struct nlattr *rt = attrs[XFRMA_MIGRATE];
2905 struct xfrm_user_migrate *um;
2906 int i, num_migrate;
2907
2908 if (k != NULL) {
2909 struct xfrm_user_kmaddress *uk;
2910
2911 uk = nla_data(attrs[XFRMA_KMADDRESS]);
2912 memcpy(&k->local, &uk->local, sizeof(k->local));
2913 memcpy(&k->remote, &uk->remote, sizeof(k->remote));
2914 k->family = uk->family;
2915 k->reserved = uk->reserved;
2916 }
2917
2918 um = nla_data(rt);
2919 num_migrate = nla_len(rt) / sizeof(*um);
2920
2921 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) {
2922 NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)");
2923 return -EINVAL;
2924 }
2925
2926 for (i = 0; i < num_migrate; i++, um++, ma++) {
2927 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
2928 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
2929 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
2930 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
2931
2932 ma->proto = um->proto;
2933 ma->mode = um->mode;
2934 ma->reqid = um->reqid;
2935
2936 ma->old_family = um->old_family;
2937 ma->new_family = um->new_family;
2938 }
2939
2940 *num = i;
2941 return 0;
2942 }
2943
xfrm_do_migrate(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2944 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2945 struct nlattr **attrs, struct netlink_ext_ack *extack)
2946 {
2947 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
2948 struct xfrm_migrate m[XFRM_MAX_DEPTH];
2949 struct xfrm_kmaddress km, *kmp;
2950 u8 type;
2951 int err;
2952 int n = 0;
2953 struct net *net = sock_net(skb->sk);
2954 struct xfrm_encap_tmpl *encap = NULL;
2955 u32 if_id = 0;
2956
2957 if (!attrs[XFRMA_MIGRATE]) {
2958 NL_SET_ERR_MSG(extack, "Missing required MIGRATE attribute");
2959 return -EINVAL;
2960 }
2961
2962 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
2963
2964 err = copy_from_user_policy_type(&type, attrs, extack);
2965 if (err)
2966 return err;
2967
2968 err = copy_from_user_migrate(m, kmp, attrs, &n, extack);
2969 if (err)
2970 return err;
2971
2972 if (!n)
2973 return 0;
2974
2975 if (attrs[XFRMA_ENCAP]) {
2976 encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
2977 sizeof(*encap), GFP_KERNEL);
2978 if (!encap)
2979 return -ENOMEM;
2980 }
2981
2982 if (attrs[XFRMA_IF_ID])
2983 if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2984
2985 err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap,
2986 if_id, extack);
2987
2988 kfree(encap);
2989
2990 return err;
2991 }
2992 #else
xfrm_do_migrate(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2993 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2994 struct nlattr **attrs, struct netlink_ext_ack *extack)
2995 {
2996 return -ENOPROTOOPT;
2997 }
2998 #endif
2999
3000 #ifdef CONFIG_XFRM_MIGRATE
copy_to_user_migrate(const struct xfrm_migrate * m,struct sk_buff * skb)3001 static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
3002 {
3003 struct xfrm_user_migrate um;
3004
3005 memset(&um, 0, sizeof(um));
3006 um.proto = m->proto;
3007 um.mode = m->mode;
3008 um.reqid = m->reqid;
3009 um.old_family = m->old_family;
3010 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
3011 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
3012 um.new_family = m->new_family;
3013 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
3014 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
3015
3016 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
3017 }
3018
copy_to_user_kmaddress(const struct xfrm_kmaddress * k,struct sk_buff * skb)3019 static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
3020 {
3021 struct xfrm_user_kmaddress uk;
3022
3023 memset(&uk, 0, sizeof(uk));
3024 uk.family = k->family;
3025 uk.reserved = k->reserved;
3026 memcpy(&uk.local, &k->local, sizeof(uk.local));
3027 memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
3028
3029 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
3030 }
3031
xfrm_migrate_msgsize(int num_migrate,int with_kma,int with_encp)3032 static inline unsigned int xfrm_migrate_msgsize(int num_migrate, int with_kma,
3033 int with_encp)
3034 {
3035 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
3036 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
3037 + (with_encp ? nla_total_size(sizeof(struct xfrm_encap_tmpl)) : 0)
3038 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
3039 + userpolicy_type_attrsize();
3040 }
3041
build_migrate(struct sk_buff * skb,const struct xfrm_migrate * m,int num_migrate,const struct xfrm_kmaddress * k,const struct xfrm_selector * sel,const struct xfrm_encap_tmpl * encap,u8 dir,u8 type)3042 static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
3043 int num_migrate, const struct xfrm_kmaddress *k,
3044 const struct xfrm_selector *sel,
3045 const struct xfrm_encap_tmpl *encap, u8 dir, u8 type)
3046 {
3047 const struct xfrm_migrate *mp;
3048 struct xfrm_userpolicy_id *pol_id;
3049 struct nlmsghdr *nlh;
3050 int i, err;
3051
3052 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
3053 if (nlh == NULL)
3054 return -EMSGSIZE;
3055
3056 pol_id = nlmsg_data(nlh);
3057 /* copy data from selector, dir, and type to the pol_id */
3058 memset(pol_id, 0, sizeof(*pol_id));
3059 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
3060 pol_id->dir = dir;
3061
3062 if (k != NULL) {
3063 err = copy_to_user_kmaddress(k, skb);
3064 if (err)
3065 goto out_cancel;
3066 }
3067 if (encap) {
3068 err = nla_put(skb, XFRMA_ENCAP, sizeof(*encap), encap);
3069 if (err)
3070 goto out_cancel;
3071 }
3072 err = copy_to_user_policy_type(type, skb);
3073 if (err)
3074 goto out_cancel;
3075 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
3076 err = copy_to_user_migrate(mp, skb);
3077 if (err)
3078 goto out_cancel;
3079 }
3080
3081 nlmsg_end(skb, nlh);
3082 return 0;
3083
3084 out_cancel:
3085 nlmsg_cancel(skb, nlh);
3086 return err;
3087 }
3088
xfrm_send_migrate(const struct xfrm_selector * sel,u8 dir,u8 type,const struct xfrm_migrate * m,int num_migrate,const struct xfrm_kmaddress * k,const struct xfrm_encap_tmpl * encap)3089 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3090 const struct xfrm_migrate *m, int num_migrate,
3091 const struct xfrm_kmaddress *k,
3092 const struct xfrm_encap_tmpl *encap)
3093 {
3094 struct net *net = &init_net;
3095 struct sk_buff *skb;
3096 int err;
3097
3098 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k, !!encap),
3099 GFP_ATOMIC);
3100 if (skb == NULL)
3101 return -ENOMEM;
3102
3103 /* build migrate */
3104 err = build_migrate(skb, m, num_migrate, k, sel, encap, dir, type);
3105 BUG_ON(err < 0);
3106
3107 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE);
3108 }
3109 #else
xfrm_send_migrate(const struct xfrm_selector * sel,u8 dir,u8 type,const struct xfrm_migrate * m,int num_migrate,const struct xfrm_kmaddress * k,const struct xfrm_encap_tmpl * encap)3110 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3111 const struct xfrm_migrate *m, int num_migrate,
3112 const struct xfrm_kmaddress *k,
3113 const struct xfrm_encap_tmpl *encap)
3114 {
3115 return -ENOPROTOOPT;
3116 }
3117 #endif
3118
3119 #define XMSGSIZE(type) sizeof(struct type)
3120
3121 const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
3122 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
3123 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
3124 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
3125 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
3126 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
3127 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
3128 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
3129 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
3130 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
3131 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
3132 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
3133 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
3134 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
3135 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
3136 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
3137 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
3138 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
3139 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
3140 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
3141 [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
3142 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
3143 [XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default),
3144 [XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default),
3145 };
3146 EXPORT_SYMBOL_GPL(xfrm_msg_min);
3147
3148 #undef XMSGSIZE
3149
3150 const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
3151 [XFRMA_UNSPEC] = { .strict_start_type = XFRMA_SA_DIR },
3152 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
3153 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
3154 [XFRMA_LASTUSED] = { .type = NLA_U64},
3155 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
3156 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
3157 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
3158 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
3159 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
3160 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
3161 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
3162 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) },
3163 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
3164 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
3165 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
3166 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
3167 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
3168 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
3169 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
3170 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
3171 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
3172 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
3173 [XFRMA_TFCPAD] = { .type = NLA_U32 },
3174 [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) },
3175 [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 },
3176 [XFRMA_PROTO] = { .type = NLA_U8 },
3177 [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
3178 [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) },
3179 [XFRMA_SET_MARK] = { .type = NLA_U32 },
3180 [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 },
3181 [XFRMA_IF_ID] = { .type = NLA_U32 },
3182 [XFRMA_MTIMER_THRESH] = { .type = NLA_U32 },
3183 [XFRMA_SA_DIR] = NLA_POLICY_RANGE(NLA_U8, XFRM_SA_DIR_IN, XFRM_SA_DIR_OUT),
3184 [XFRMA_NAT_KEEPALIVE_INTERVAL] = { .type = NLA_U32 },
3185 };
3186 EXPORT_SYMBOL_GPL(xfrma_policy);
3187
3188 static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
3189 [XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
3190 [XFRMA_SPD_IPV6_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
3191 };
3192
3193 static const struct xfrm_link {
3194 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **,
3195 struct netlink_ext_ack *);
3196 int (*start)(struct netlink_callback *);
3197 int (*dump)(struct sk_buff *, struct netlink_callback *);
3198 int (*done)(struct netlink_callback *);
3199 const struct nla_policy *nla_pol;
3200 int nla_max;
3201 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
3202 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
3203 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
3204 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
3205 .dump = xfrm_dump_sa,
3206 .done = xfrm_dump_sa_done },
3207 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
3208 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
3209 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
3210 .start = xfrm_dump_policy_start,
3211 .dump = xfrm_dump_policy,
3212 .done = xfrm_dump_policy_done },
3213 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
3214 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
3215 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
3216 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
3217 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
3218 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
3219 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
3220 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
3221 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
3222 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
3223 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
3224 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
3225 [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_set_spdinfo,
3226 .nla_pol = xfrma_spd_policy,
3227 .nla_max = XFRMA_SPD_MAX },
3228 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
3229 [XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_set_default },
3230 [XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_get_default },
3231 };
3232
xfrm_reject_unused_attr(int type,struct nlattr ** attrs,struct netlink_ext_ack * extack)3233 static int xfrm_reject_unused_attr(int type, struct nlattr **attrs,
3234 struct netlink_ext_ack *extack)
3235 {
3236 if (attrs[XFRMA_SA_DIR]) {
3237 switch (type) {
3238 case XFRM_MSG_NEWSA:
3239 case XFRM_MSG_UPDSA:
3240 case XFRM_MSG_ALLOCSPI:
3241 break;
3242 default:
3243 NL_SET_ERR_MSG(extack, "Invalid attribute SA_DIR");
3244 return -EINVAL;
3245 }
3246 }
3247
3248 return 0;
3249 }
3250
xfrm_user_rcv_msg(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3251 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
3252 struct netlink_ext_ack *extack)
3253 {
3254 struct net *net = sock_net(skb->sk);
3255 struct nlattr *attrs[XFRMA_MAX+1];
3256 const struct xfrm_link *link;
3257 struct nlmsghdr *nlh64 = NULL;
3258 int type, err;
3259
3260 type = nlh->nlmsg_type;
3261 if (type > XFRM_MSG_MAX)
3262 return -EINVAL;
3263
3264 type -= XFRM_MSG_BASE;
3265 link = &xfrm_dispatch[type];
3266
3267 /* All operations require privileges, even GET */
3268 if (!netlink_net_capable(skb, CAP_NET_ADMIN))
3269 return -EPERM;
3270
3271 if (in_compat_syscall()) {
3272 struct xfrm_translator *xtr = xfrm_get_translator();
3273
3274 if (!xtr)
3275 return -EOPNOTSUPP;
3276
3277 nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max,
3278 link->nla_pol, extack);
3279 xfrm_put_translator(xtr);
3280 if (IS_ERR(nlh64))
3281 return PTR_ERR(nlh64);
3282 if (nlh64)
3283 nlh = nlh64;
3284 }
3285
3286 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
3287 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
3288 (nlh->nlmsg_flags & NLM_F_DUMP)) {
3289 struct netlink_dump_control c = {
3290 .start = link->start,
3291 .dump = link->dump,
3292 .done = link->done,
3293 };
3294
3295 if (link->dump == NULL) {
3296 err = -EINVAL;
3297 goto err;
3298 }
3299
3300 err = netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
3301 goto err;
3302 }
3303
3304 err = nlmsg_parse_deprecated(nlh, xfrm_msg_min[type], attrs,
3305 link->nla_max ? : XFRMA_MAX,
3306 link->nla_pol ? : xfrma_policy, extack);
3307 if (err < 0)
3308 goto err;
3309
3310 if (!link->nla_pol || link->nla_pol == xfrma_policy) {
3311 err = xfrm_reject_unused_attr((type + XFRM_MSG_BASE), attrs, extack);
3312 if (err < 0)
3313 goto err;
3314 }
3315
3316 if (link->doit == NULL) {
3317 err = -EINVAL;
3318 goto err;
3319 }
3320
3321 err = link->doit(skb, nlh, attrs, extack);
3322
3323 /* We need to free skb allocated in xfrm_alloc_compat() before
3324 * returning from this function, because consume_skb() won't take
3325 * care of frag_list since netlink destructor sets
3326 * sbk->head to NULL. (see netlink_skb_destructor())
3327 */
3328 if (skb_has_frag_list(skb)) {
3329 kfree_skb(skb_shinfo(skb)->frag_list);
3330 skb_shinfo(skb)->frag_list = NULL;
3331 }
3332
3333 err:
3334 kvfree(nlh64);
3335 return err;
3336 }
3337
xfrm_netlink_rcv(struct sk_buff * skb)3338 static void xfrm_netlink_rcv(struct sk_buff *skb)
3339 {
3340 struct net *net = sock_net(skb->sk);
3341
3342 mutex_lock(&net->xfrm.xfrm_cfg_mutex);
3343 netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
3344 mutex_unlock(&net->xfrm.xfrm_cfg_mutex);
3345 }
3346
xfrm_expire_msgsize(void)3347 static inline unsigned int xfrm_expire_msgsize(void)
3348 {
3349 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)) +
3350 nla_total_size(sizeof(struct xfrm_mark)) +
3351 nla_total_size(sizeof_field(struct xfrm_state, dir));
3352 }
3353
build_expire(struct sk_buff * skb,struct xfrm_state * x,const struct km_event * c)3354 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
3355 {
3356 struct xfrm_user_expire *ue;
3357 struct nlmsghdr *nlh;
3358 int err;
3359
3360 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
3361 if (nlh == NULL)
3362 return -EMSGSIZE;
3363
3364 ue = nlmsg_data(nlh);
3365 copy_to_user_state(x, &ue->state);
3366 ue->hard = (c->data.hard != 0) ? 1 : 0;
3367 /* clear the padding bytes */
3368 memset_after(ue, 0, hard);
3369
3370 err = xfrm_mark_put(skb, &x->mark);
3371 if (err)
3372 return err;
3373
3374 err = xfrm_if_id_put(skb, x->if_id);
3375 if (err)
3376 return err;
3377
3378 if (x->dir) {
3379 err = nla_put_u8(skb, XFRMA_SA_DIR, x->dir);
3380 if (err)
3381 return err;
3382 }
3383
3384 nlmsg_end(skb, nlh);
3385 return 0;
3386 }
3387
xfrm_exp_state_notify(struct xfrm_state * x,const struct km_event * c)3388 static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
3389 {
3390 struct net *net = xs_net(x);
3391 struct sk_buff *skb;
3392
3393 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
3394 if (skb == NULL)
3395 return -ENOMEM;
3396
3397 if (build_expire(skb, x, c) < 0) {
3398 kfree_skb(skb);
3399 return -EMSGSIZE;
3400 }
3401
3402 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
3403 }
3404
xfrm_aevent_state_notify(struct xfrm_state * x,const struct km_event * c)3405 static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
3406 {
3407 struct net *net = xs_net(x);
3408 struct sk_buff *skb;
3409 int err;
3410
3411 skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
3412 if (skb == NULL)
3413 return -ENOMEM;
3414
3415 err = build_aevent(skb, x, c);
3416 BUG_ON(err < 0);
3417
3418 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS);
3419 }
3420
xfrm_notify_sa_flush(const struct km_event * c)3421 static int xfrm_notify_sa_flush(const struct km_event *c)
3422 {
3423 struct net *net = c->net;
3424 struct xfrm_usersa_flush *p;
3425 struct nlmsghdr *nlh;
3426 struct sk_buff *skb;
3427 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
3428
3429 skb = nlmsg_new(len, GFP_ATOMIC);
3430 if (skb == NULL)
3431 return -ENOMEM;
3432
3433 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
3434 if (nlh == NULL) {
3435 kfree_skb(skb);
3436 return -EMSGSIZE;
3437 }
3438
3439 p = nlmsg_data(nlh);
3440 p->proto = c->data.proto;
3441
3442 nlmsg_end(skb, nlh);
3443
3444 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
3445 }
3446
xfrm_sa_len(struct xfrm_state * x)3447 static inline unsigned int xfrm_sa_len(struct xfrm_state *x)
3448 {
3449 unsigned int l = 0;
3450 if (x->aead)
3451 l += nla_total_size(aead_len(x->aead));
3452 if (x->aalg) {
3453 l += nla_total_size(sizeof(struct xfrm_algo) +
3454 (x->aalg->alg_key_len + 7) / 8);
3455 l += nla_total_size(xfrm_alg_auth_len(x->aalg));
3456 }
3457 if (x->ealg)
3458 l += nla_total_size(xfrm_alg_len(x->ealg));
3459 if (x->calg)
3460 l += nla_total_size(sizeof(*x->calg));
3461 if (x->encap)
3462 l += nla_total_size(sizeof(*x->encap));
3463 if (x->tfcpad)
3464 l += nla_total_size(sizeof(x->tfcpad));
3465 if (x->replay_esn)
3466 l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
3467 else
3468 l += nla_total_size(sizeof(struct xfrm_replay_state));
3469 if (x->security)
3470 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
3471 x->security->ctx_len);
3472 if (x->coaddr)
3473 l += nla_total_size(sizeof(*x->coaddr));
3474 if (x->props.extra_flags)
3475 l += nla_total_size(sizeof(x->props.extra_flags));
3476 if (x->xso.dev)
3477 l += nla_total_size(sizeof(struct xfrm_user_offload));
3478 if (x->props.smark.v | x->props.smark.m) {
3479 l += nla_total_size(sizeof(x->props.smark.v));
3480 l += nla_total_size(sizeof(x->props.smark.m));
3481 }
3482 if (x->if_id)
3483 l += nla_total_size(sizeof(x->if_id));
3484
3485 /* Must count x->lastused as it may become non-zero behind our back. */
3486 l += nla_total_size_64bit(sizeof(u64));
3487
3488 if (x->mapping_maxage)
3489 l += nla_total_size(sizeof(x->mapping_maxage));
3490
3491 if (x->dir)
3492 l += nla_total_size(sizeof(x->dir));
3493
3494 if (x->nat_keepalive_interval)
3495 l += nla_total_size(sizeof(x->nat_keepalive_interval));
3496
3497 return l;
3498 }
3499
xfrm_notify_sa(struct xfrm_state * x,const struct km_event * c)3500 static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
3501 {
3502 struct net *net = xs_net(x);
3503 struct xfrm_usersa_info *p;
3504 struct xfrm_usersa_id *id;
3505 struct nlmsghdr *nlh;
3506 struct sk_buff *skb;
3507 unsigned int len = xfrm_sa_len(x);
3508 unsigned int headlen;
3509 int err;
3510
3511 headlen = sizeof(*p);
3512 if (c->event == XFRM_MSG_DELSA) {
3513 len += nla_total_size(headlen);
3514 headlen = sizeof(*id);
3515 len += nla_total_size(sizeof(struct xfrm_mark));
3516 }
3517 len += NLMSG_ALIGN(headlen);
3518
3519 skb = nlmsg_new(len, GFP_ATOMIC);
3520 if (skb == NULL)
3521 return -ENOMEM;
3522
3523 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
3524 err = -EMSGSIZE;
3525 if (nlh == NULL)
3526 goto out_free_skb;
3527
3528 p = nlmsg_data(nlh);
3529 if (c->event == XFRM_MSG_DELSA) {
3530 struct nlattr *attr;
3531
3532 id = nlmsg_data(nlh);
3533 memset(id, 0, sizeof(*id));
3534 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
3535 id->spi = x->id.spi;
3536 id->family = x->props.family;
3537 id->proto = x->id.proto;
3538
3539 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
3540 err = -EMSGSIZE;
3541 if (attr == NULL)
3542 goto out_free_skb;
3543
3544 p = nla_data(attr);
3545 }
3546 err = copy_to_user_state_extra(x, p, skb);
3547 if (err)
3548 goto out_free_skb;
3549
3550 nlmsg_end(skb, nlh);
3551
3552 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
3553
3554 out_free_skb:
3555 kfree_skb(skb);
3556 return err;
3557 }
3558
xfrm_send_state_notify(struct xfrm_state * x,const struct km_event * c)3559 static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
3560 {
3561
3562 switch (c->event) {
3563 case XFRM_MSG_EXPIRE:
3564 return xfrm_exp_state_notify(x, c);
3565 case XFRM_MSG_NEWAE:
3566 return xfrm_aevent_state_notify(x, c);
3567 case XFRM_MSG_DELSA:
3568 case XFRM_MSG_UPDSA:
3569 case XFRM_MSG_NEWSA:
3570 return xfrm_notify_sa(x, c);
3571 case XFRM_MSG_FLUSHSA:
3572 return xfrm_notify_sa_flush(c);
3573 default:
3574 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
3575 c->event);
3576 break;
3577 }
3578
3579 return 0;
3580
3581 }
3582
xfrm_acquire_msgsize(struct xfrm_state * x,struct xfrm_policy * xp)3583 static inline unsigned int xfrm_acquire_msgsize(struct xfrm_state *x,
3584 struct xfrm_policy *xp)
3585 {
3586 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
3587 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
3588 + nla_total_size(sizeof(struct xfrm_mark))
3589 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
3590 + userpolicy_type_attrsize();
3591 }
3592
build_acquire(struct sk_buff * skb,struct xfrm_state * x,struct xfrm_tmpl * xt,struct xfrm_policy * xp)3593 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
3594 struct xfrm_tmpl *xt, struct xfrm_policy *xp)
3595 {
3596 __u32 seq = xfrm_get_acqseq();
3597 struct xfrm_user_acquire *ua;
3598 struct nlmsghdr *nlh;
3599 int err;
3600
3601 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
3602 if (nlh == NULL)
3603 return -EMSGSIZE;
3604
3605 ua = nlmsg_data(nlh);
3606 memcpy(&ua->id, &x->id, sizeof(ua->id));
3607 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
3608 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
3609 copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT);
3610 ua->aalgos = xt->aalgos;
3611 ua->ealgos = xt->ealgos;
3612 ua->calgos = xt->calgos;
3613 ua->seq = x->km.seq = seq;
3614
3615 err = copy_to_user_tmpl(xp, skb);
3616 if (!err)
3617 err = copy_to_user_state_sec_ctx(x, skb);
3618 if (!err)
3619 err = copy_to_user_policy_type(xp->type, skb);
3620 if (!err)
3621 err = xfrm_mark_put(skb, &xp->mark);
3622 if (!err)
3623 err = xfrm_if_id_put(skb, xp->if_id);
3624 if (!err && xp->xdo.dev)
3625 err = copy_user_offload(&xp->xdo, skb);
3626 if (err) {
3627 nlmsg_cancel(skb, nlh);
3628 return err;
3629 }
3630
3631 nlmsg_end(skb, nlh);
3632 return 0;
3633 }
3634
xfrm_send_acquire(struct xfrm_state * x,struct xfrm_tmpl * xt,struct xfrm_policy * xp)3635 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
3636 struct xfrm_policy *xp)
3637 {
3638 struct net *net = xs_net(x);
3639 struct sk_buff *skb;
3640 int err;
3641
3642 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
3643 if (skb == NULL)
3644 return -ENOMEM;
3645
3646 err = build_acquire(skb, x, xt, xp);
3647 BUG_ON(err < 0);
3648
3649 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE);
3650 }
3651
3652 /* User gives us xfrm_user_policy_info followed by an array of 0
3653 * or more templates.
3654 */
xfrm_compile_policy(struct sock * sk,int opt,u8 * data,int len,int * dir)3655 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
3656 u8 *data, int len, int *dir)
3657 {
3658 struct net *net = sock_net(sk);
3659 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
3660 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
3661 struct xfrm_policy *xp;
3662 int nr;
3663
3664 switch (sk->sk_family) {
3665 case AF_INET:
3666 if (opt != IP_XFRM_POLICY) {
3667 *dir = -EOPNOTSUPP;
3668 return NULL;
3669 }
3670 break;
3671 #if IS_ENABLED(CONFIG_IPV6)
3672 case AF_INET6:
3673 if (opt != IPV6_XFRM_POLICY) {
3674 *dir = -EOPNOTSUPP;
3675 return NULL;
3676 }
3677 break;
3678 #endif
3679 default:
3680 *dir = -EINVAL;
3681 return NULL;
3682 }
3683
3684 *dir = -EINVAL;
3685
3686 if (len < sizeof(*p) ||
3687 verify_newpolicy_info(p, NULL))
3688 return NULL;
3689
3690 nr = ((len - sizeof(*p)) / sizeof(*ut));
3691 if (validate_tmpl(nr, ut, p->sel.family, p->dir, NULL))
3692 return NULL;
3693
3694 if (p->dir > XFRM_POLICY_OUT)
3695 return NULL;
3696
3697 xp = xfrm_policy_alloc(net, GFP_ATOMIC);
3698 if (xp == NULL) {
3699 *dir = -ENOBUFS;
3700 return NULL;
3701 }
3702
3703 copy_from_user_policy(xp, p);
3704 xp->type = XFRM_POLICY_TYPE_MAIN;
3705 copy_templates(xp, ut, nr);
3706
3707 *dir = p->dir;
3708
3709 return xp;
3710 }
3711
xfrm_polexpire_msgsize(struct xfrm_policy * xp)3712 static inline unsigned int xfrm_polexpire_msgsize(struct xfrm_policy *xp)
3713 {
3714 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
3715 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
3716 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
3717 + nla_total_size(sizeof(struct xfrm_mark))
3718 + userpolicy_type_attrsize();
3719 }
3720
build_polexpire(struct sk_buff * skb,struct xfrm_policy * xp,int dir,const struct km_event * c)3721 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
3722 int dir, const struct km_event *c)
3723 {
3724 struct xfrm_user_polexpire *upe;
3725 int hard = c->data.hard;
3726 struct nlmsghdr *nlh;
3727 int err;
3728
3729 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
3730 if (nlh == NULL)
3731 return -EMSGSIZE;
3732
3733 upe = nlmsg_data(nlh);
3734 copy_to_user_policy(xp, &upe->pol, dir);
3735 err = copy_to_user_tmpl(xp, skb);
3736 if (!err)
3737 err = copy_to_user_sec_ctx(xp, skb);
3738 if (!err)
3739 err = copy_to_user_policy_type(xp->type, skb);
3740 if (!err)
3741 err = xfrm_mark_put(skb, &xp->mark);
3742 if (!err)
3743 err = xfrm_if_id_put(skb, xp->if_id);
3744 if (!err && xp->xdo.dev)
3745 err = copy_user_offload(&xp->xdo, skb);
3746 if (err) {
3747 nlmsg_cancel(skb, nlh);
3748 return err;
3749 }
3750 upe->hard = !!hard;
3751
3752 nlmsg_end(skb, nlh);
3753 return 0;
3754 }
3755
xfrm_exp_policy_notify(struct xfrm_policy * xp,int dir,const struct km_event * c)3756 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
3757 {
3758 struct net *net = xp_net(xp);
3759 struct sk_buff *skb;
3760 int err;
3761
3762 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
3763 if (skb == NULL)
3764 return -ENOMEM;
3765
3766 err = build_polexpire(skb, xp, dir, c);
3767 BUG_ON(err < 0);
3768
3769 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
3770 }
3771
xfrm_notify_policy(struct xfrm_policy * xp,int dir,const struct km_event * c)3772 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
3773 {
3774 unsigned int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
3775 struct net *net = xp_net(xp);
3776 struct xfrm_userpolicy_info *p;
3777 struct xfrm_userpolicy_id *id;
3778 struct nlmsghdr *nlh;
3779 struct sk_buff *skb;
3780 unsigned int headlen;
3781 int err;
3782
3783 headlen = sizeof(*p);
3784 if (c->event == XFRM_MSG_DELPOLICY) {
3785 len += nla_total_size(headlen);
3786 headlen = sizeof(*id);
3787 }
3788 len += userpolicy_type_attrsize();
3789 len += nla_total_size(sizeof(struct xfrm_mark));
3790 len += NLMSG_ALIGN(headlen);
3791
3792 skb = nlmsg_new(len, GFP_ATOMIC);
3793 if (skb == NULL)
3794 return -ENOMEM;
3795
3796 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
3797 err = -EMSGSIZE;
3798 if (nlh == NULL)
3799 goto out_free_skb;
3800
3801 p = nlmsg_data(nlh);
3802 if (c->event == XFRM_MSG_DELPOLICY) {
3803 struct nlattr *attr;
3804
3805 id = nlmsg_data(nlh);
3806 memset(id, 0, sizeof(*id));
3807 id->dir = dir;
3808 if (c->data.byid)
3809 id->index = xp->index;
3810 else
3811 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
3812
3813 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
3814 err = -EMSGSIZE;
3815 if (attr == NULL)
3816 goto out_free_skb;
3817
3818 p = nla_data(attr);
3819 }
3820
3821 copy_to_user_policy(xp, p, dir);
3822 err = copy_to_user_tmpl(xp, skb);
3823 if (!err)
3824 err = copy_to_user_policy_type(xp->type, skb);
3825 if (!err)
3826 err = xfrm_mark_put(skb, &xp->mark);
3827 if (!err)
3828 err = xfrm_if_id_put(skb, xp->if_id);
3829 if (!err && xp->xdo.dev)
3830 err = copy_user_offload(&xp->xdo, skb);
3831 if (err)
3832 goto out_free_skb;
3833
3834 nlmsg_end(skb, nlh);
3835
3836 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
3837
3838 out_free_skb:
3839 kfree_skb(skb);
3840 return err;
3841 }
3842
xfrm_notify_policy_flush(const struct km_event * c)3843 static int xfrm_notify_policy_flush(const struct km_event *c)
3844 {
3845 struct net *net = c->net;
3846 struct nlmsghdr *nlh;
3847 struct sk_buff *skb;
3848 int err;
3849
3850 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
3851 if (skb == NULL)
3852 return -ENOMEM;
3853
3854 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
3855 err = -EMSGSIZE;
3856 if (nlh == NULL)
3857 goto out_free_skb;
3858 err = copy_to_user_policy_type(c->data.type, skb);
3859 if (err)
3860 goto out_free_skb;
3861
3862 nlmsg_end(skb, nlh);
3863
3864 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
3865
3866 out_free_skb:
3867 kfree_skb(skb);
3868 return err;
3869 }
3870
xfrm_send_policy_notify(struct xfrm_policy * xp,int dir,const struct km_event * c)3871 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
3872 {
3873
3874 switch (c->event) {
3875 case XFRM_MSG_NEWPOLICY:
3876 case XFRM_MSG_UPDPOLICY:
3877 case XFRM_MSG_DELPOLICY:
3878 return xfrm_notify_policy(xp, dir, c);
3879 case XFRM_MSG_FLUSHPOLICY:
3880 return xfrm_notify_policy_flush(c);
3881 case XFRM_MSG_POLEXPIRE:
3882 return xfrm_exp_policy_notify(xp, dir, c);
3883 default:
3884 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
3885 c->event);
3886 }
3887
3888 return 0;
3889
3890 }
3891
xfrm_report_msgsize(void)3892 static inline unsigned int xfrm_report_msgsize(void)
3893 {
3894 return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
3895 }
3896
build_report(struct sk_buff * skb,u8 proto,struct xfrm_selector * sel,xfrm_address_t * addr)3897 static int build_report(struct sk_buff *skb, u8 proto,
3898 struct xfrm_selector *sel, xfrm_address_t *addr)
3899 {
3900 struct xfrm_user_report *ur;
3901 struct nlmsghdr *nlh;
3902
3903 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
3904 if (nlh == NULL)
3905 return -EMSGSIZE;
3906
3907 ur = nlmsg_data(nlh);
3908 ur->proto = proto;
3909 memcpy(&ur->sel, sel, sizeof(ur->sel));
3910
3911 if (addr) {
3912 int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
3913 if (err) {
3914 nlmsg_cancel(skb, nlh);
3915 return err;
3916 }
3917 }
3918 nlmsg_end(skb, nlh);
3919 return 0;
3920 }
3921
xfrm_send_report(struct net * net,u8 proto,struct xfrm_selector * sel,xfrm_address_t * addr)3922 static int xfrm_send_report(struct net *net, u8 proto,
3923 struct xfrm_selector *sel, xfrm_address_t *addr)
3924 {
3925 struct sk_buff *skb;
3926 int err;
3927
3928 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
3929 if (skb == NULL)
3930 return -ENOMEM;
3931
3932 err = build_report(skb, proto, sel, addr);
3933 BUG_ON(err < 0);
3934
3935 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT);
3936 }
3937
xfrm_mapping_msgsize(void)3938 static inline unsigned int xfrm_mapping_msgsize(void)
3939 {
3940 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
3941 }
3942
build_mapping(struct sk_buff * skb,struct xfrm_state * x,xfrm_address_t * new_saddr,__be16 new_sport)3943 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
3944 xfrm_address_t *new_saddr, __be16 new_sport)
3945 {
3946 struct xfrm_user_mapping *um;
3947 struct nlmsghdr *nlh;
3948
3949 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
3950 if (nlh == NULL)
3951 return -EMSGSIZE;
3952
3953 um = nlmsg_data(nlh);
3954
3955 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
3956 um->id.spi = x->id.spi;
3957 um->id.family = x->props.family;
3958 um->id.proto = x->id.proto;
3959 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
3960 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
3961 um->new_sport = new_sport;
3962 um->old_sport = x->encap->encap_sport;
3963 um->reqid = x->props.reqid;
3964
3965 nlmsg_end(skb, nlh);
3966 return 0;
3967 }
3968
xfrm_send_mapping(struct xfrm_state * x,xfrm_address_t * ipaddr,__be16 sport)3969 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3970 __be16 sport)
3971 {
3972 struct net *net = xs_net(x);
3973 struct sk_buff *skb;
3974 int err;
3975
3976 if (x->id.proto != IPPROTO_ESP)
3977 return -EINVAL;
3978
3979 if (!x->encap)
3980 return -EINVAL;
3981
3982 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
3983 if (skb == NULL)
3984 return -ENOMEM;
3985
3986 err = build_mapping(skb, x, ipaddr, sport);
3987 BUG_ON(err < 0);
3988
3989 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING);
3990 }
3991
xfrm_is_alive(const struct km_event * c)3992 static bool xfrm_is_alive(const struct km_event *c)
3993 {
3994 return (bool)xfrm_acquire_is_on(c->net);
3995 }
3996
3997 static struct xfrm_mgr netlink_mgr = {
3998 .notify = xfrm_send_state_notify,
3999 .acquire = xfrm_send_acquire,
4000 .compile_policy = xfrm_compile_policy,
4001 .notify_policy = xfrm_send_policy_notify,
4002 .report = xfrm_send_report,
4003 .migrate = xfrm_send_migrate,
4004 .new_mapping = xfrm_send_mapping,
4005 .is_alive = xfrm_is_alive,
4006 };
4007
xfrm_user_net_init(struct net * net)4008 static int __net_init xfrm_user_net_init(struct net *net)
4009 {
4010 struct sock *nlsk;
4011 struct netlink_kernel_cfg cfg = {
4012 .groups = XFRMNLGRP_MAX,
4013 .input = xfrm_netlink_rcv,
4014 };
4015
4016 nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg);
4017 if (nlsk == NULL)
4018 return -ENOMEM;
4019 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
4020 rcu_assign_pointer(net->xfrm.nlsk, nlsk);
4021 return 0;
4022 }
4023
xfrm_user_net_pre_exit(struct net * net)4024 static void __net_exit xfrm_user_net_pre_exit(struct net *net)
4025 {
4026 RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
4027 }
4028
xfrm_user_net_exit(struct list_head * net_exit_list)4029 static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
4030 {
4031 struct net *net;
4032
4033 list_for_each_entry(net, net_exit_list, exit_list)
4034 netlink_kernel_release(net->xfrm.nlsk_stash);
4035 }
4036
4037 static struct pernet_operations xfrm_user_net_ops = {
4038 .init = xfrm_user_net_init,
4039 .pre_exit = xfrm_user_net_pre_exit,
4040 .exit_batch = xfrm_user_net_exit,
4041 };
4042
xfrm_user_init(void)4043 static int __init xfrm_user_init(void)
4044 {
4045 int rv;
4046
4047 printk(KERN_INFO "Initializing XFRM netlink socket\n");
4048
4049 rv = register_pernet_subsys(&xfrm_user_net_ops);
4050 if (rv < 0)
4051 return rv;
4052 xfrm_register_km(&netlink_mgr);
4053 return 0;
4054 }
4055
xfrm_user_exit(void)4056 static void __exit xfrm_user_exit(void)
4057 {
4058 xfrm_unregister_km(&netlink_mgr);
4059 unregister_pernet_subsys(&xfrm_user_net_ops);
4060 }
4061
4062 module_init(xfrm_user_init);
4063 module_exit(xfrm_user_exit);
4064 MODULE_DESCRIPTION("XFRM User interface");
4065 MODULE_LICENSE("GPL");
4066 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
4067