1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * IPv6 IOAM Lightweight Tunnel implementation
4 *
5 * Author:
6 * Justin Iurman <justin.iurman@uliege.be>
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/net.h>
12 #include <linux/in6.h>
13 #include <linux/ioam6.h>
14 #include <linux/ioam6_iptunnel.h>
15 #include <net/dst.h>
16 #include <net/sock.h>
17 #include <net/lwtunnel.h>
18 #include <net/ioam6.h>
19 #include <net/netlink.h>
20 #include <net/ipv6.h>
21 #include <net/dst_cache.h>
22 #include <net/ip6_route.h>
23 #include <net/addrconf.h>
24
25 #define IOAM6_MASK_SHORT_FIELDS 0xff100000
26 #define IOAM6_MASK_WIDE_FIELDS 0xe00000
27
28 struct ioam6_lwt_encap {
29 struct ipv6_hopopt_hdr eh;
30 u8 pad[2]; /* 2-octet padding for 4n-alignment */
31 struct ioam6_hdr ioamh;
32 struct ioam6_trace_hdr traceh;
33 } __packed;
34
35 struct ioam6_lwt_freq {
36 u32 k;
37 u32 n;
38 };
39
40 struct ioam6_lwt {
41 struct dst_cache cache;
42 struct ioam6_lwt_freq freq;
43 atomic_t pkt_cnt;
44 u8 mode;
45 bool has_tunsrc;
46 struct in6_addr tunsrc;
47 struct in6_addr tundst;
48 struct ioam6_lwt_encap tuninfo;
49 };
50
51 static const struct netlink_range_validation freq_range = {
52 .min = IOAM6_IPTUNNEL_FREQ_MIN,
53 .max = IOAM6_IPTUNNEL_FREQ_MAX,
54 };
55
ioam6_lwt_state(struct lwtunnel_state * lwt)56 static struct ioam6_lwt *ioam6_lwt_state(struct lwtunnel_state *lwt)
57 {
58 return (struct ioam6_lwt *)lwt->data;
59 }
60
ioam6_lwt_info(struct lwtunnel_state * lwt)61 static struct ioam6_lwt_encap *ioam6_lwt_info(struct lwtunnel_state *lwt)
62 {
63 return &ioam6_lwt_state(lwt)->tuninfo;
64 }
65
ioam6_lwt_trace(struct lwtunnel_state * lwt)66 static struct ioam6_trace_hdr *ioam6_lwt_trace(struct lwtunnel_state *lwt)
67 {
68 return &(ioam6_lwt_state(lwt)->tuninfo.traceh);
69 }
70
71 static const struct nla_policy ioam6_iptunnel_policy[IOAM6_IPTUNNEL_MAX + 1] = {
72 [IOAM6_IPTUNNEL_FREQ_K] = NLA_POLICY_FULL_RANGE(NLA_U32, &freq_range),
73 [IOAM6_IPTUNNEL_FREQ_N] = NLA_POLICY_FULL_RANGE(NLA_U32, &freq_range),
74 [IOAM6_IPTUNNEL_MODE] = NLA_POLICY_RANGE(NLA_U8,
75 IOAM6_IPTUNNEL_MODE_MIN,
76 IOAM6_IPTUNNEL_MODE_MAX),
77 [IOAM6_IPTUNNEL_SRC] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
78 [IOAM6_IPTUNNEL_DST] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
79 [IOAM6_IPTUNNEL_TRACE] = NLA_POLICY_EXACT_LEN(
80 sizeof(struct ioam6_trace_hdr)),
81 };
82
ioam6_validate_trace_hdr(struct ioam6_trace_hdr * trace)83 static bool ioam6_validate_trace_hdr(struct ioam6_trace_hdr *trace)
84 {
85 u32 fields;
86
87 if (!trace->type_be32 || !trace->remlen ||
88 trace->remlen > IOAM6_TRACE_DATA_SIZE_MAX / 4 ||
89 trace->type.bit12 | trace->type.bit13 | trace->type.bit14 |
90 trace->type.bit15 | trace->type.bit16 | trace->type.bit17 |
91 trace->type.bit18 | trace->type.bit19 | trace->type.bit20 |
92 trace->type.bit21 | trace->type.bit23)
93 return false;
94
95 trace->nodelen = 0;
96 fields = be32_to_cpu(trace->type_be32);
97
98 trace->nodelen += hweight32(fields & IOAM6_MASK_SHORT_FIELDS)
99 * (sizeof(__be32) / 4);
100 trace->nodelen += hweight32(fields & IOAM6_MASK_WIDE_FIELDS)
101 * (sizeof(__be64) / 4);
102
103 return true;
104 }
105
ioam6_build_state(struct net * net,struct nlattr * nla,unsigned int family,const void * cfg,struct lwtunnel_state ** ts,struct netlink_ext_ack * extack)106 static int ioam6_build_state(struct net *net, struct nlattr *nla,
107 unsigned int family, const void *cfg,
108 struct lwtunnel_state **ts,
109 struct netlink_ext_ack *extack)
110 {
111 struct nlattr *tb[IOAM6_IPTUNNEL_MAX + 1];
112 struct ioam6_lwt_encap *tuninfo;
113 struct ioam6_trace_hdr *trace;
114 struct lwtunnel_state *lwt;
115 struct ioam6_lwt *ilwt;
116 int len_aligned, err;
117 u32 freq_k, freq_n;
118 u8 mode;
119
120 if (family != AF_INET6)
121 return -EINVAL;
122
123 err = nla_parse_nested(tb, IOAM6_IPTUNNEL_MAX, nla,
124 ioam6_iptunnel_policy, extack);
125 if (err < 0)
126 return err;
127
128 if ((!tb[IOAM6_IPTUNNEL_FREQ_K] && tb[IOAM6_IPTUNNEL_FREQ_N]) ||
129 (tb[IOAM6_IPTUNNEL_FREQ_K] && !tb[IOAM6_IPTUNNEL_FREQ_N])) {
130 NL_SET_ERR_MSG(extack, "freq: missing parameter");
131 return -EINVAL;
132 } else if (!tb[IOAM6_IPTUNNEL_FREQ_K] && !tb[IOAM6_IPTUNNEL_FREQ_N]) {
133 freq_k = IOAM6_IPTUNNEL_FREQ_MIN;
134 freq_n = IOAM6_IPTUNNEL_FREQ_MIN;
135 } else {
136 freq_k = nla_get_u32(tb[IOAM6_IPTUNNEL_FREQ_K]);
137 freq_n = nla_get_u32(tb[IOAM6_IPTUNNEL_FREQ_N]);
138
139 if (freq_k > freq_n) {
140 NL_SET_ERR_MSG(extack, "freq: k > n is forbidden");
141 return -EINVAL;
142 }
143 }
144
145 if (!tb[IOAM6_IPTUNNEL_MODE])
146 mode = IOAM6_IPTUNNEL_MODE_INLINE;
147 else
148 mode = nla_get_u8(tb[IOAM6_IPTUNNEL_MODE]);
149
150 if (tb[IOAM6_IPTUNNEL_SRC] && mode == IOAM6_IPTUNNEL_MODE_INLINE) {
151 NL_SET_ERR_MSG(extack, "no tunnel src expected with this mode");
152 return -EINVAL;
153 }
154
155 if (!tb[IOAM6_IPTUNNEL_DST] && mode != IOAM6_IPTUNNEL_MODE_INLINE) {
156 NL_SET_ERR_MSG(extack, "this mode needs a tunnel destination");
157 return -EINVAL;
158 }
159
160 if (!tb[IOAM6_IPTUNNEL_TRACE]) {
161 NL_SET_ERR_MSG(extack, "missing trace");
162 return -EINVAL;
163 }
164
165 trace = nla_data(tb[IOAM6_IPTUNNEL_TRACE]);
166 if (!ioam6_validate_trace_hdr(trace)) {
167 NL_SET_ERR_MSG_ATTR(extack, tb[IOAM6_IPTUNNEL_TRACE],
168 "invalid trace validation");
169 return -EINVAL;
170 }
171
172 len_aligned = ALIGN(trace->remlen * 4, 8);
173 lwt = lwtunnel_state_alloc(sizeof(*ilwt) + len_aligned);
174 if (!lwt)
175 return -ENOMEM;
176
177 ilwt = ioam6_lwt_state(lwt);
178 err = dst_cache_init(&ilwt->cache, GFP_ATOMIC);
179 if (err)
180 goto free_lwt;
181
182 atomic_set(&ilwt->pkt_cnt, 0);
183 ilwt->freq.k = freq_k;
184 ilwt->freq.n = freq_n;
185
186 ilwt->mode = mode;
187
188 if (!tb[IOAM6_IPTUNNEL_SRC]) {
189 ilwt->has_tunsrc = false;
190 } else {
191 ilwt->has_tunsrc = true;
192 ilwt->tunsrc = nla_get_in6_addr(tb[IOAM6_IPTUNNEL_SRC]);
193
194 if (ipv6_addr_any(&ilwt->tunsrc)) {
195 NL_SET_ERR_MSG_ATTR(extack, tb[IOAM6_IPTUNNEL_SRC],
196 "invalid tunnel source address");
197 err = -EINVAL;
198 goto free_cache;
199 }
200 }
201
202 if (tb[IOAM6_IPTUNNEL_DST]) {
203 ilwt->tundst = nla_get_in6_addr(tb[IOAM6_IPTUNNEL_DST]);
204
205 if (ipv6_addr_any(&ilwt->tundst)) {
206 NL_SET_ERR_MSG_ATTR(extack, tb[IOAM6_IPTUNNEL_DST],
207 "invalid tunnel dest address");
208 err = -EINVAL;
209 goto free_cache;
210 }
211 }
212
213 tuninfo = ioam6_lwt_info(lwt);
214 tuninfo->eh.hdrlen = ((sizeof(*tuninfo) + len_aligned) >> 3) - 1;
215 tuninfo->pad[0] = IPV6_TLV_PADN;
216 tuninfo->ioamh.type = IOAM6_TYPE_PREALLOC;
217 tuninfo->ioamh.opt_type = IPV6_TLV_IOAM;
218 tuninfo->ioamh.opt_len = sizeof(tuninfo->ioamh) - 2 + sizeof(*trace)
219 + trace->remlen * 4;
220
221 memcpy(&tuninfo->traceh, trace, sizeof(*trace));
222
223 if (len_aligned - trace->remlen * 4) {
224 tuninfo->traceh.data[trace->remlen * 4] = IPV6_TLV_PADN;
225 tuninfo->traceh.data[trace->remlen * 4 + 1] = 2;
226 }
227
228 lwt->type = LWTUNNEL_ENCAP_IOAM6;
229 lwt->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
230
231 *ts = lwt;
232
233 return 0;
234 free_cache:
235 dst_cache_destroy(&ilwt->cache);
236 free_lwt:
237 kfree(lwt);
238 return err;
239 }
240
ioam6_do_fill(struct net * net,struct sk_buff * skb)241 static int ioam6_do_fill(struct net *net, struct sk_buff *skb)
242 {
243 struct ioam6_trace_hdr *trace;
244 struct ioam6_namespace *ns;
245
246 trace = (struct ioam6_trace_hdr *)(skb_transport_header(skb)
247 + sizeof(struct ipv6_hopopt_hdr) + 2
248 + sizeof(struct ioam6_hdr));
249
250 ns = ioam6_namespace(net, trace->namespace_id);
251 if (ns)
252 ioam6_fill_trace_data(skb, ns, trace, false);
253
254 return 0;
255 }
256
ioam6_do_inline(struct net * net,struct sk_buff * skb,struct ioam6_lwt_encap * tuninfo)257 static int ioam6_do_inline(struct net *net, struct sk_buff *skb,
258 struct ioam6_lwt_encap *tuninfo)
259 {
260 struct ipv6hdr *oldhdr, *hdr;
261 int hdrlen, err;
262
263 hdrlen = (tuninfo->eh.hdrlen + 1) << 3;
264
265 err = skb_cow_head(skb, hdrlen + skb->mac_len);
266 if (unlikely(err))
267 return err;
268
269 oldhdr = ipv6_hdr(skb);
270 skb_pull(skb, sizeof(*oldhdr));
271 skb_postpull_rcsum(skb, skb_network_header(skb), sizeof(*oldhdr));
272
273 skb_push(skb, sizeof(*oldhdr) + hdrlen);
274 skb_reset_network_header(skb);
275 skb_mac_header_rebuild(skb);
276
277 hdr = ipv6_hdr(skb);
278 memmove(hdr, oldhdr, sizeof(*oldhdr));
279 tuninfo->eh.nexthdr = hdr->nexthdr;
280
281 skb_set_transport_header(skb, sizeof(*hdr));
282 skb_postpush_rcsum(skb, hdr, sizeof(*hdr) + hdrlen);
283
284 memcpy(skb_transport_header(skb), (u8 *)tuninfo, hdrlen);
285
286 hdr->nexthdr = NEXTHDR_HOP;
287 hdr->payload_len = cpu_to_be16(skb->len - sizeof(*hdr));
288
289 return ioam6_do_fill(net, skb);
290 }
291
ioam6_do_encap(struct net * net,struct sk_buff * skb,struct ioam6_lwt_encap * tuninfo,bool has_tunsrc,struct in6_addr * tunsrc,struct in6_addr * tundst)292 static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
293 struct ioam6_lwt_encap *tuninfo,
294 bool has_tunsrc,
295 struct in6_addr *tunsrc,
296 struct in6_addr *tundst)
297 {
298 struct dst_entry *dst = skb_dst(skb);
299 struct ipv6hdr *hdr, *inner_hdr;
300 int hdrlen, len, err;
301
302 hdrlen = (tuninfo->eh.hdrlen + 1) << 3;
303 len = sizeof(*hdr) + hdrlen;
304
305 err = skb_cow_head(skb, len + skb->mac_len);
306 if (unlikely(err))
307 return err;
308
309 inner_hdr = ipv6_hdr(skb);
310
311 skb_push(skb, len);
312 skb_reset_network_header(skb);
313 skb_mac_header_rebuild(skb);
314 skb_set_transport_header(skb, sizeof(*hdr));
315
316 tuninfo->eh.nexthdr = NEXTHDR_IPV6;
317 memcpy(skb_transport_header(skb), (u8 *)tuninfo, hdrlen);
318
319 hdr = ipv6_hdr(skb);
320 memcpy(hdr, inner_hdr, sizeof(*hdr));
321
322 hdr->nexthdr = NEXTHDR_HOP;
323 hdr->payload_len = cpu_to_be16(skb->len - sizeof(*hdr));
324 hdr->daddr = *tundst;
325
326 if (has_tunsrc)
327 memcpy(&hdr->saddr, tunsrc, sizeof(*tunsrc));
328 else
329 ipv6_dev_get_saddr(net, dst->dev, &hdr->daddr,
330 IPV6_PREFER_SRC_PUBLIC, &hdr->saddr);
331
332 skb_postpush_rcsum(skb, hdr, len);
333
334 return ioam6_do_fill(net, skb);
335 }
336
ioam6_output(struct net * net,struct sock * sk,struct sk_buff * skb)337 static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
338 {
339 struct dst_entry *dst = skb_dst(skb);
340 struct in6_addr orig_daddr;
341 struct ioam6_lwt *ilwt;
342 int err = -EINVAL;
343 u32 pkt_cnt;
344
345 if (skb->protocol != htons(ETH_P_IPV6))
346 goto drop;
347
348 ilwt = ioam6_lwt_state(dst->lwtstate);
349
350 /* Check for insertion frequency (i.e., "k over n" insertions) */
351 pkt_cnt = atomic_fetch_inc(&ilwt->pkt_cnt);
352 if (pkt_cnt % ilwt->freq.n >= ilwt->freq.k)
353 goto out;
354
355 orig_daddr = ipv6_hdr(skb)->daddr;
356
357 switch (ilwt->mode) {
358 case IOAM6_IPTUNNEL_MODE_INLINE:
359 do_inline:
360 /* Direct insertion - if there is no Hop-by-Hop yet */
361 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_HOP)
362 goto out;
363
364 err = ioam6_do_inline(net, skb, &ilwt->tuninfo);
365 if (unlikely(err))
366 goto drop;
367
368 break;
369 case IOAM6_IPTUNNEL_MODE_ENCAP:
370 do_encap:
371 /* Encapsulation (ip6ip6) */
372 err = ioam6_do_encap(net, skb, &ilwt->tuninfo,
373 ilwt->has_tunsrc, &ilwt->tunsrc,
374 &ilwt->tundst);
375 if (unlikely(err))
376 goto drop;
377
378 break;
379 case IOAM6_IPTUNNEL_MODE_AUTO:
380 /* Automatic (RFC8200 compliant):
381 * - local packets -> INLINE mode
382 * - in-transit packets -> ENCAP mode
383 */
384 if (!skb->dev)
385 goto do_inline;
386
387 goto do_encap;
388 default:
389 goto drop;
390 }
391
392 err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
393 if (unlikely(err))
394 goto drop;
395
396 if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
397 local_bh_disable();
398 dst = dst_cache_get(&ilwt->cache);
399 local_bh_enable();
400
401 if (unlikely(!dst)) {
402 struct ipv6hdr *hdr = ipv6_hdr(skb);
403 struct flowi6 fl6;
404
405 memset(&fl6, 0, sizeof(fl6));
406 fl6.daddr = hdr->daddr;
407 fl6.saddr = hdr->saddr;
408 fl6.flowlabel = ip6_flowinfo(hdr);
409 fl6.flowi6_mark = skb->mark;
410 fl6.flowi6_proto = hdr->nexthdr;
411
412 dst = ip6_route_output(net, NULL, &fl6);
413 if (dst->error) {
414 err = dst->error;
415 dst_release(dst);
416 goto drop;
417 }
418
419 local_bh_disable();
420 dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
421 local_bh_enable();
422 }
423
424 skb_dst_drop(skb);
425 skb_dst_set(skb, dst);
426
427 return dst_output(net, sk, skb);
428 }
429 out:
430 return dst->lwtstate->orig_output(net, sk, skb);
431 drop:
432 kfree_skb(skb);
433 return err;
434 }
435
ioam6_destroy_state(struct lwtunnel_state * lwt)436 static void ioam6_destroy_state(struct lwtunnel_state *lwt)
437 {
438 dst_cache_destroy(&ioam6_lwt_state(lwt)->cache);
439 }
440
ioam6_fill_encap_info(struct sk_buff * skb,struct lwtunnel_state * lwtstate)441 static int ioam6_fill_encap_info(struct sk_buff *skb,
442 struct lwtunnel_state *lwtstate)
443 {
444 struct ioam6_lwt *ilwt = ioam6_lwt_state(lwtstate);
445 int err;
446
447 err = nla_put_u32(skb, IOAM6_IPTUNNEL_FREQ_K, ilwt->freq.k);
448 if (err)
449 goto ret;
450
451 err = nla_put_u32(skb, IOAM6_IPTUNNEL_FREQ_N, ilwt->freq.n);
452 if (err)
453 goto ret;
454
455 err = nla_put_u8(skb, IOAM6_IPTUNNEL_MODE, ilwt->mode);
456 if (err)
457 goto ret;
458
459 if (ilwt->mode != IOAM6_IPTUNNEL_MODE_INLINE) {
460 if (ilwt->has_tunsrc) {
461 err = nla_put_in6_addr(skb, IOAM6_IPTUNNEL_SRC,
462 &ilwt->tunsrc);
463 if (err)
464 goto ret;
465 }
466
467 err = nla_put_in6_addr(skb, IOAM6_IPTUNNEL_DST, &ilwt->tundst);
468 if (err)
469 goto ret;
470 }
471
472 err = nla_put(skb, IOAM6_IPTUNNEL_TRACE, sizeof(ilwt->tuninfo.traceh),
473 &ilwt->tuninfo.traceh);
474 ret:
475 return err;
476 }
477
ioam6_encap_nlsize(struct lwtunnel_state * lwtstate)478 static int ioam6_encap_nlsize(struct lwtunnel_state *lwtstate)
479 {
480 struct ioam6_lwt *ilwt = ioam6_lwt_state(lwtstate);
481 int nlsize;
482
483 nlsize = nla_total_size(sizeof(ilwt->freq.k)) +
484 nla_total_size(sizeof(ilwt->freq.n)) +
485 nla_total_size(sizeof(ilwt->mode)) +
486 nla_total_size(sizeof(ilwt->tuninfo.traceh));
487
488 if (ilwt->mode != IOAM6_IPTUNNEL_MODE_INLINE) {
489 if (ilwt->has_tunsrc)
490 nlsize += nla_total_size(sizeof(ilwt->tunsrc));
491
492 nlsize += nla_total_size(sizeof(ilwt->tundst));
493 }
494
495 return nlsize;
496 }
497
ioam6_encap_cmp(struct lwtunnel_state * a,struct lwtunnel_state * b)498 static int ioam6_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
499 {
500 struct ioam6_trace_hdr *trace_a = ioam6_lwt_trace(a);
501 struct ioam6_trace_hdr *trace_b = ioam6_lwt_trace(b);
502 struct ioam6_lwt *ilwt_a = ioam6_lwt_state(a);
503 struct ioam6_lwt *ilwt_b = ioam6_lwt_state(b);
504
505 return (ilwt_a->freq.k != ilwt_b->freq.k ||
506 ilwt_a->freq.n != ilwt_b->freq.n ||
507 ilwt_a->mode != ilwt_b->mode ||
508 ilwt_a->has_tunsrc != ilwt_b->has_tunsrc ||
509 (ilwt_a->mode != IOAM6_IPTUNNEL_MODE_INLINE &&
510 !ipv6_addr_equal(&ilwt_a->tundst, &ilwt_b->tundst)) ||
511 (ilwt_a->mode != IOAM6_IPTUNNEL_MODE_INLINE &&
512 ilwt_a->has_tunsrc &&
513 !ipv6_addr_equal(&ilwt_a->tunsrc, &ilwt_b->tunsrc)) ||
514 trace_a->namespace_id != trace_b->namespace_id);
515 }
516
517 static const struct lwtunnel_encap_ops ioam6_iptun_ops = {
518 .build_state = ioam6_build_state,
519 .destroy_state = ioam6_destroy_state,
520 .output = ioam6_output,
521 .fill_encap = ioam6_fill_encap_info,
522 .get_encap_size = ioam6_encap_nlsize,
523 .cmp_encap = ioam6_encap_cmp,
524 .owner = THIS_MODULE,
525 };
526
ioam6_iptunnel_init(void)527 int __init ioam6_iptunnel_init(void)
528 {
529 return lwtunnel_encap_add_ops(&ioam6_iptun_ops, LWTUNNEL_ENCAP_IOAM6);
530 }
531
ioam6_iptunnel_exit(void)532 void ioam6_iptunnel_exit(void)
533 {
534 lwtunnel_encap_del_ops(&ioam6_iptun_ops, LWTUNNEL_ENCAP_IOAM6);
535 }
536