1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "IPsec: " fmt
3
4 #include <crypto/hash.h>
5 #include <crypto/utils.h>
6 #include <linux/err.h>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <net/ip.h>
10 #include <net/xfrm.h>
11 #include <net/ah.h>
12 #include <linux/crypto.h>
13 #include <linux/pfkeyv2.h>
14 #include <linux/scatterlist.h>
15 #include <net/icmp.h>
16 #include <net/protocol.h>
17
18 struct ah_skb_cb {
19 struct xfrm_skb_cb xfrm;
20 void *tmp;
21 };
22
23 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
24
ah_alloc_tmp(struct crypto_ahash * ahash,int nfrags,unsigned int size)25 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
26 unsigned int size)
27 {
28 unsigned int len;
29
30 len = size + crypto_ahash_digestsize(ahash);
31
32 len = ALIGN(len, crypto_tfm_ctx_alignment());
33
34 len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
35 len = ALIGN(len, __alignof__(struct scatterlist));
36
37 len += sizeof(struct scatterlist) * nfrags;
38
39 return kmalloc(len, GFP_ATOMIC);
40 }
41
ah_tmp_auth(void * tmp,unsigned int offset)42 static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset)
43 {
44 return tmp + offset;
45 }
46
ah_tmp_icv(void * tmp,unsigned int offset)47 static inline u8 *ah_tmp_icv(void *tmp, unsigned int offset)
48 {
49 return tmp + offset;
50 }
51
ah_tmp_req(struct crypto_ahash * ahash,u8 * icv)52 static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
53 u8 *icv)
54 {
55 struct ahash_request *req;
56
57 req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
58 crypto_tfm_ctx_alignment());
59
60 ahash_request_set_tfm(req, ahash);
61
62 return req;
63 }
64
ah_req_sg(struct crypto_ahash * ahash,struct ahash_request * req)65 static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
66 struct ahash_request *req)
67 {
68 return (void *)ALIGN((unsigned long)(req + 1) +
69 crypto_ahash_reqsize(ahash),
70 __alignof__(struct scatterlist));
71 }
72
73 /* Clear mutable options and find final destination to substitute
74 * into IP header for icv calculation. Options are already checked
75 * for validity, so paranoia is not required. */
76
ip_clear_mutable_options(const struct iphdr * iph,__be32 * daddr)77 static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
78 {
79 unsigned char *optptr = (unsigned char *)(iph+1);
80 int l = iph->ihl*4 - sizeof(struct iphdr);
81 int optlen;
82
83 while (l > 0) {
84 switch (*optptr) {
85 case IPOPT_END:
86 return 0;
87 case IPOPT_NOOP:
88 l--;
89 optptr++;
90 continue;
91 }
92 optlen = optptr[1];
93 if (optlen<2 || optlen>l)
94 return -EINVAL;
95 switch (*optptr) {
96 case IPOPT_SEC:
97 case 0x85: /* Some "Extended Security" crap. */
98 case IPOPT_CIPSO:
99 case IPOPT_RA:
100 case 0x80|21: /* RFC1770 */
101 break;
102 case IPOPT_LSRR:
103 case IPOPT_SSRR:
104 if (optlen < 6)
105 return -EINVAL;
106 memcpy(daddr, optptr+optlen-4, 4);
107 fallthrough;
108 default:
109 memset(optptr, 0, optlen);
110 }
111 l -= optlen;
112 optptr += optlen;
113 }
114 return 0;
115 }
116
ah_output_done(void * data,int err)117 static void ah_output_done(void *data, int err)
118 {
119 u8 *icv;
120 struct iphdr *iph;
121 struct sk_buff *skb = data;
122 struct xfrm_state *x = skb_dst(skb)->xfrm;
123 struct ah_data *ahp = x->data;
124 struct iphdr *top_iph = ip_hdr(skb);
125 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
126 int ihl = ip_hdrlen(skb);
127
128 iph = AH_SKB_CB(skb)->tmp;
129 icv = ah_tmp_icv(iph, ihl);
130 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
131
132 top_iph->tos = iph->tos;
133 top_iph->ttl = iph->ttl;
134 top_iph->frag_off = iph->frag_off;
135 if (top_iph->ihl != 5) {
136 top_iph->daddr = iph->daddr;
137 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
138 }
139
140 kfree(AH_SKB_CB(skb)->tmp);
141 xfrm_output_resume(skb->sk, skb, err);
142 }
143
ah_output(struct xfrm_state * x,struct sk_buff * skb)144 static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
145 {
146 int err;
147 int nfrags;
148 int ihl;
149 u8 *icv;
150 struct sk_buff *trailer;
151 struct crypto_ahash *ahash;
152 struct ahash_request *req;
153 struct scatterlist *sg;
154 struct iphdr *iph, *top_iph;
155 struct ip_auth_hdr *ah;
156 struct ah_data *ahp;
157 int seqhi_len = 0;
158 __be32 *seqhi;
159 int sglists = 0;
160 struct scatterlist *seqhisg;
161
162 ahp = x->data;
163 ahash = ahp->ahash;
164
165 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
166 goto out;
167 nfrags = err;
168
169 skb_push(skb, -skb_network_offset(skb));
170 ah = ip_auth_hdr(skb);
171 ihl = ip_hdrlen(skb);
172
173 if (x->props.flags & XFRM_STATE_ESN) {
174 sglists = 1;
175 seqhi_len = sizeof(*seqhi);
176 }
177 err = -ENOMEM;
178 iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl + seqhi_len);
179 if (!iph)
180 goto out;
181 seqhi = (__be32 *)((char *)iph + ihl);
182 icv = ah_tmp_icv(seqhi, seqhi_len);
183 req = ah_tmp_req(ahash, icv);
184 sg = ah_req_sg(ahash, req);
185 seqhisg = sg + nfrags;
186
187 memset(ah->auth_data, 0, ahp->icv_trunc_len);
188
189 top_iph = ip_hdr(skb);
190
191 iph->tos = top_iph->tos;
192 iph->ttl = top_iph->ttl;
193 iph->frag_off = top_iph->frag_off;
194
195 if (top_iph->ihl != 5) {
196 iph->daddr = top_iph->daddr;
197 memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
198 err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
199 if (err)
200 goto out_free;
201 }
202
203 ah->nexthdr = *skb_mac_header(skb);
204 *skb_mac_header(skb) = IPPROTO_AH;
205
206 top_iph->tos = 0;
207 top_iph->tot_len = htons(skb->len);
208 top_iph->frag_off = 0;
209 top_iph->ttl = 0;
210 top_iph->check = 0;
211
212 if (x->props.flags & XFRM_STATE_ALIGN4)
213 ah->hdrlen = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
214 else
215 ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
216
217 ah->reserved = 0;
218 ah->spi = x->id.spi;
219 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
220
221 sg_init_table(sg, nfrags + sglists);
222 err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
223 if (unlikely(err < 0))
224 goto out_free;
225
226 if (x->props.flags & XFRM_STATE_ESN) {
227 /* Attach seqhi sg right after packet payload */
228 *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
229 sg_set_buf(seqhisg, seqhi, seqhi_len);
230 }
231 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
232 ahash_request_set_callback(req, 0, ah_output_done, skb);
233
234 AH_SKB_CB(skb)->tmp = iph;
235
236 err = crypto_ahash_digest(req);
237 if (err) {
238 if (err == -EINPROGRESS)
239 goto out;
240
241 if (err == -ENOSPC)
242 err = NET_XMIT_DROP;
243 goto out_free;
244 }
245
246 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
247
248 top_iph->tos = iph->tos;
249 top_iph->ttl = iph->ttl;
250 top_iph->frag_off = iph->frag_off;
251 if (top_iph->ihl != 5) {
252 top_iph->daddr = iph->daddr;
253 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
254 }
255
256 out_free:
257 kfree(iph);
258 out:
259 return err;
260 }
261
ah_input_done(void * data,int err)262 static void ah_input_done(void *data, int err)
263 {
264 u8 *auth_data;
265 u8 *icv;
266 struct iphdr *work_iph;
267 struct sk_buff *skb = data;
268 struct xfrm_state *x = xfrm_input_state(skb);
269 struct ah_data *ahp = x->data;
270 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
271 int ihl = ip_hdrlen(skb);
272 int ah_hlen = (ah->hdrlen + 2) << 2;
273
274 if (err)
275 goto out;
276
277 work_iph = AH_SKB_CB(skb)->tmp;
278 auth_data = ah_tmp_auth(work_iph, ihl);
279 icv = ah_tmp_icv(auth_data, ahp->icv_trunc_len);
280
281 err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
282 if (err)
283 goto out;
284
285 err = ah->nexthdr;
286
287 skb->network_header += ah_hlen;
288 memcpy(skb_network_header(skb), work_iph, ihl);
289 __skb_pull(skb, ah_hlen + ihl);
290
291 if (x->props.mode == XFRM_MODE_TUNNEL)
292 skb_reset_transport_header(skb);
293 else
294 skb_set_transport_header(skb, -ihl);
295 out:
296 kfree(AH_SKB_CB(skb)->tmp);
297 xfrm_input_resume(skb, err);
298 }
299
ah_input(struct xfrm_state * x,struct sk_buff * skb)300 static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
301 {
302 int ah_hlen;
303 int ihl;
304 int nexthdr;
305 int nfrags;
306 u8 *auth_data;
307 u8 *icv;
308 struct sk_buff *trailer;
309 struct crypto_ahash *ahash;
310 struct ahash_request *req;
311 struct scatterlist *sg;
312 struct iphdr *iph, *work_iph;
313 struct ip_auth_hdr *ah;
314 struct ah_data *ahp;
315 int err = -ENOMEM;
316 int seqhi_len = 0;
317 __be32 *seqhi;
318 int sglists = 0;
319 struct scatterlist *seqhisg;
320
321 if (!pskb_may_pull(skb, sizeof(*ah)))
322 goto out;
323
324 ah = (struct ip_auth_hdr *)skb->data;
325 ahp = x->data;
326 ahash = ahp->ahash;
327
328 nexthdr = ah->nexthdr;
329 ah_hlen = (ah->hdrlen + 2) << 2;
330
331 if (x->props.flags & XFRM_STATE_ALIGN4) {
332 if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) &&
333 ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len))
334 goto out;
335 } else {
336 if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
337 ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
338 goto out;
339 }
340
341 if (!pskb_may_pull(skb, ah_hlen))
342 goto out;
343
344 /* We are going to _remove_ AH header to keep sockets happy,
345 * so... Later this can change. */
346 if (skb_unclone(skb, GFP_ATOMIC))
347 goto out;
348
349 skb->ip_summed = CHECKSUM_NONE;
350
351
352 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
353 goto out;
354 nfrags = err;
355
356 ah = (struct ip_auth_hdr *)skb->data;
357 iph = ip_hdr(skb);
358 ihl = ip_hdrlen(skb);
359
360 if (x->props.flags & XFRM_STATE_ESN) {
361 sglists = 1;
362 seqhi_len = sizeof(*seqhi);
363 }
364
365 work_iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl +
366 ahp->icv_trunc_len + seqhi_len);
367 if (!work_iph) {
368 err = -ENOMEM;
369 goto out;
370 }
371
372 seqhi = (__be32 *)((char *)work_iph + ihl);
373 auth_data = ah_tmp_auth(seqhi, seqhi_len);
374 icv = ah_tmp_icv(auth_data, ahp->icv_trunc_len);
375 req = ah_tmp_req(ahash, icv);
376 sg = ah_req_sg(ahash, req);
377 seqhisg = sg + nfrags;
378
379 memcpy(work_iph, iph, ihl);
380 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
381 memset(ah->auth_data, 0, ahp->icv_trunc_len);
382
383 iph->ttl = 0;
384 iph->tos = 0;
385 iph->frag_off = 0;
386 iph->check = 0;
387 if (ihl > sizeof(*iph)) {
388 __be32 dummy;
389 err = ip_clear_mutable_options(iph, &dummy);
390 if (err)
391 goto out_free;
392 }
393
394 skb_push(skb, ihl);
395
396 sg_init_table(sg, nfrags + sglists);
397 err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
398 if (unlikely(err < 0))
399 goto out_free;
400
401 if (x->props.flags & XFRM_STATE_ESN) {
402 /* Attach seqhi sg right after packet payload */
403 *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
404 sg_set_buf(seqhisg, seqhi, seqhi_len);
405 }
406 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
407 ahash_request_set_callback(req, 0, ah_input_done, skb);
408
409 AH_SKB_CB(skb)->tmp = work_iph;
410
411 err = crypto_ahash_digest(req);
412 if (err) {
413 if (err == -EINPROGRESS)
414 goto out;
415
416 goto out_free;
417 }
418
419 err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
420 if (err)
421 goto out_free;
422
423 skb->network_header += ah_hlen;
424 memcpy(skb_network_header(skb), work_iph, ihl);
425 __skb_pull(skb, ah_hlen + ihl);
426 if (x->props.mode == XFRM_MODE_TUNNEL)
427 skb_reset_transport_header(skb);
428 else
429 skb_set_transport_header(skb, -ihl);
430
431 err = nexthdr;
432
433 out_free:
434 kfree (work_iph);
435 out:
436 return err;
437 }
438
ah4_err(struct sk_buff * skb,u32 info)439 static int ah4_err(struct sk_buff *skb, u32 info)
440 {
441 struct net *net = dev_net(skb->dev);
442 const struct iphdr *iph = (const struct iphdr *)skb->data;
443 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
444 struct xfrm_state *x;
445
446 switch (icmp_hdr(skb)->type) {
447 case ICMP_DEST_UNREACH:
448 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
449 return 0;
450 break;
451 case ICMP_REDIRECT:
452 break;
453 default:
454 return 0;
455 }
456
457 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
458 ah->spi, IPPROTO_AH, AF_INET);
459 if (!x)
460 return 0;
461
462 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
463 ipv4_update_pmtu(skb, net, info, 0, IPPROTO_AH);
464 else
465 ipv4_redirect(skb, net, 0, IPPROTO_AH);
466 xfrm_state_put(x);
467
468 return 0;
469 }
470
ah_init_state(struct xfrm_state * x,struct netlink_ext_ack * extack)471 static int ah_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
472 {
473 struct ah_data *ahp = NULL;
474 struct xfrm_algo_desc *aalg_desc;
475 struct crypto_ahash *ahash;
476
477 if (!x->aalg) {
478 NL_SET_ERR_MSG(extack, "AH requires a state with an AUTH algorithm");
479 goto error;
480 }
481
482 if (x->encap) {
483 NL_SET_ERR_MSG(extack, "AH is not compatible with encapsulation");
484 goto error;
485 }
486
487 ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
488 if (!ahp)
489 return -ENOMEM;
490
491 ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
492 if (IS_ERR(ahash)) {
493 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
494 goto error;
495 }
496
497 ahp->ahash = ahash;
498 if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
499 (x->aalg->alg_key_len + 7) / 8)) {
500 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
501 goto error;
502 }
503
504 /*
505 * Lookup the algorithm description maintained by xfrm_algo,
506 * verify crypto transform properties, and store information
507 * we need for AH processing. This lookup cannot fail here
508 * after a successful crypto_alloc_ahash().
509 */
510 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
511 BUG_ON(!aalg_desc);
512
513 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
514 crypto_ahash_digestsize(ahash)) {
515 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
516 goto error;
517 }
518
519 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
520 ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
521
522 if (x->props.flags & XFRM_STATE_ALIGN4)
523 x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) +
524 ahp->icv_trunc_len);
525 else
526 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
527 ahp->icv_trunc_len);
528 if (x->props.mode == XFRM_MODE_TUNNEL)
529 x->props.header_len += sizeof(struct iphdr);
530 x->data = ahp;
531
532 return 0;
533
534 error:
535 if (ahp) {
536 crypto_free_ahash(ahp->ahash);
537 kfree(ahp);
538 }
539 return -EINVAL;
540 }
541
ah_destroy(struct xfrm_state * x)542 static void ah_destroy(struct xfrm_state *x)
543 {
544 struct ah_data *ahp = x->data;
545
546 if (!ahp)
547 return;
548
549 crypto_free_ahash(ahp->ahash);
550 kfree(ahp);
551 }
552
ah4_rcv_cb(struct sk_buff * skb,int err)553 static int ah4_rcv_cb(struct sk_buff *skb, int err)
554 {
555 return 0;
556 }
557
558 static const struct xfrm_type ah_type =
559 {
560 .owner = THIS_MODULE,
561 .proto = IPPROTO_AH,
562 .flags = XFRM_TYPE_REPLAY_PROT,
563 .init_state = ah_init_state,
564 .destructor = ah_destroy,
565 .input = ah_input,
566 .output = ah_output
567 };
568
569 static struct xfrm4_protocol ah4_protocol = {
570 .handler = xfrm4_rcv,
571 .input_handler = xfrm_input,
572 .cb_handler = ah4_rcv_cb,
573 .err_handler = ah4_err,
574 .priority = 0,
575 };
576
ah4_init(void)577 static int __init ah4_init(void)
578 {
579 if (xfrm_register_type(&ah_type, AF_INET) < 0) {
580 pr_info("%s: can't add xfrm type\n", __func__);
581 return -EAGAIN;
582 }
583 if (xfrm4_protocol_register(&ah4_protocol, IPPROTO_AH) < 0) {
584 pr_info("%s: can't add protocol\n", __func__);
585 xfrm_unregister_type(&ah_type, AF_INET);
586 return -EAGAIN;
587 }
588 return 0;
589 }
590
ah4_fini(void)591 static void __exit ah4_fini(void)
592 {
593 if (xfrm4_protocol_deregister(&ah4_protocol, IPPROTO_AH) < 0)
594 pr_info("%s: can't remove protocol\n", __func__);
595 xfrm_unregister_type(&ah_type, AF_INET);
596 }
597
598 module_init(ah4_init);
599 module_exit(ah4_fini);
600 MODULE_DESCRIPTION("IPv4 AH transformation library");
601 MODULE_LICENSE("GPL");
602 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);
603