1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (c) 2007-2017 Nicira, Inc.
4   */
5  
6  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7  
8  #include <linux/skbuff.h>
9  #include <linux/in.h>
10  #include <linux/ip.h>
11  #include <linux/openvswitch.h>
12  #include <linux/sctp.h>
13  #include <linux/tcp.h>
14  #include <linux/udp.h>
15  #include <linux/in6.h>
16  #include <linux/if_arp.h>
17  #include <linux/if_vlan.h>
18  
19  #include <net/dst.h>
20  #include <net/gso.h>
21  #include <net/ip.h>
22  #include <net/ipv6.h>
23  #include <net/ip6_fib.h>
24  #include <net/checksum.h>
25  #include <net/dsfield.h>
26  #include <net/mpls.h>
27  
28  #if IS_ENABLED(CONFIG_PSAMPLE)
29  #include <net/psample.h>
30  #endif
31  
32  #include <net/sctp/checksum.h>
33  
34  #include "datapath.h"
35  #include "drop.h"
36  #include "flow.h"
37  #include "conntrack.h"
38  #include "vport.h"
39  #include "flow_netlink.h"
40  #include "openvswitch_trace.h"
41  
42  struct deferred_action {
43  	struct sk_buff *skb;
44  	const struct nlattr *actions;
45  	int actions_len;
46  
47  	/* Store pkt_key clone when creating deferred action. */
48  	struct sw_flow_key pkt_key;
49  };
50  
51  #define MAX_L2_LEN	(VLAN_ETH_HLEN + 3 * MPLS_HLEN)
52  struct ovs_frag_data {
53  	unsigned long dst;
54  	struct vport *vport;
55  	struct ovs_skb_cb cb;
56  	__be16 inner_protocol;
57  	u16 network_offset;	/* valid only for MPLS */
58  	u16 vlan_tci;
59  	__be16 vlan_proto;
60  	unsigned int l2_len;
61  	u8 mac_proto;
62  	u8 l2_data[MAX_L2_LEN];
63  };
64  
65  static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
66  
67  #define DEFERRED_ACTION_FIFO_SIZE 10
68  #define OVS_RECURSION_LIMIT 5
69  #define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
70  struct action_fifo {
71  	int head;
72  	int tail;
73  	/* Deferred action fifo queue storage. */
74  	struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
75  };
76  
77  struct action_flow_keys {
78  	struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
79  };
80  
81  static struct action_fifo __percpu *action_fifos;
82  static struct action_flow_keys __percpu *flow_keys;
83  static DEFINE_PER_CPU(int, exec_actions_level);
84  
85  /* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
86   * space. Return NULL if out of key spaces.
87   */
clone_key(const struct sw_flow_key * key_)88  static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
89  {
90  	struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
91  	int level = this_cpu_read(exec_actions_level);
92  	struct sw_flow_key *key = NULL;
93  
94  	if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
95  		key = &keys->key[level - 1];
96  		*key = *key_;
97  	}
98  
99  	return key;
100  }
101  
action_fifo_init(struct action_fifo * fifo)102  static void action_fifo_init(struct action_fifo *fifo)
103  {
104  	fifo->head = 0;
105  	fifo->tail = 0;
106  }
107  
action_fifo_is_empty(const struct action_fifo * fifo)108  static bool action_fifo_is_empty(const struct action_fifo *fifo)
109  {
110  	return (fifo->head == fifo->tail);
111  }
112  
action_fifo_get(struct action_fifo * fifo)113  static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
114  {
115  	if (action_fifo_is_empty(fifo))
116  		return NULL;
117  
118  	return &fifo->fifo[fifo->tail++];
119  }
120  
action_fifo_put(struct action_fifo * fifo)121  static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
122  {
123  	if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
124  		return NULL;
125  
126  	return &fifo->fifo[fifo->head++];
127  }
128  
129  /* Return true if fifo is not full */
add_deferred_actions(struct sk_buff * skb,const struct sw_flow_key * key,const struct nlattr * actions,const int actions_len)130  static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
131  				    const struct sw_flow_key *key,
132  				    const struct nlattr *actions,
133  				    const int actions_len)
134  {
135  	struct action_fifo *fifo;
136  	struct deferred_action *da;
137  
138  	fifo = this_cpu_ptr(action_fifos);
139  	da = action_fifo_put(fifo);
140  	if (da) {
141  		da->skb = skb;
142  		da->actions = actions;
143  		da->actions_len = actions_len;
144  		da->pkt_key = *key;
145  	}
146  
147  	return da;
148  }
149  
invalidate_flow_key(struct sw_flow_key * key)150  static void invalidate_flow_key(struct sw_flow_key *key)
151  {
152  	key->mac_proto |= SW_FLOW_KEY_INVALID;
153  }
154  
is_flow_key_valid(const struct sw_flow_key * key)155  static bool is_flow_key_valid(const struct sw_flow_key *key)
156  {
157  	return !(key->mac_proto & SW_FLOW_KEY_INVALID);
158  }
159  
160  static int clone_execute(struct datapath *dp, struct sk_buff *skb,
161  			 struct sw_flow_key *key,
162  			 u32 recirc_id,
163  			 const struct nlattr *actions, int len,
164  			 bool last, bool clone_flow_key);
165  
166  static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
167  			      struct sw_flow_key *key,
168  			      const struct nlattr *attr, int len);
169  
push_mpls(struct sk_buff * skb,struct sw_flow_key * key,__be32 mpls_lse,__be16 mpls_ethertype,__u16 mac_len)170  static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
171  		     __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
172  {
173  	int err;
174  
175  	err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
176  	if (err)
177  		return err;
178  
179  	if (!mac_len)
180  		key->mac_proto = MAC_PROTO_NONE;
181  
182  	invalidate_flow_key(key);
183  	return 0;
184  }
185  
pop_mpls(struct sk_buff * skb,struct sw_flow_key * key,const __be16 ethertype)186  static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
187  		    const __be16 ethertype)
188  {
189  	int err;
190  
191  	err = skb_mpls_pop(skb, ethertype, skb->mac_len,
192  			   ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
193  	if (err)
194  		return err;
195  
196  	if (ethertype == htons(ETH_P_TEB))
197  		key->mac_proto = MAC_PROTO_ETHERNET;
198  
199  	invalidate_flow_key(key);
200  	return 0;
201  }
202  
set_mpls(struct sk_buff * skb,struct sw_flow_key * flow_key,const __be32 * mpls_lse,const __be32 * mask)203  static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
204  		    const __be32 *mpls_lse, const __be32 *mask)
205  {
206  	struct mpls_shim_hdr *stack;
207  	__be32 lse;
208  	int err;
209  
210  	if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
211  		return -ENOMEM;
212  
213  	stack = mpls_hdr(skb);
214  	lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
215  	err = skb_mpls_update_lse(skb, lse);
216  	if (err)
217  		return err;
218  
219  	flow_key->mpls.lse[0] = lse;
220  	return 0;
221  }
222  
pop_vlan(struct sk_buff * skb,struct sw_flow_key * key)223  static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
224  {
225  	int err;
226  
227  	err = skb_vlan_pop(skb);
228  	if (skb_vlan_tag_present(skb)) {
229  		invalidate_flow_key(key);
230  	} else {
231  		key->eth.vlan.tci = 0;
232  		key->eth.vlan.tpid = 0;
233  	}
234  	return err;
235  }
236  
push_vlan(struct sk_buff * skb,struct sw_flow_key * key,const struct ovs_action_push_vlan * vlan)237  static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
238  		     const struct ovs_action_push_vlan *vlan)
239  {
240  	int err;
241  
242  	if (skb_vlan_tag_present(skb)) {
243  		invalidate_flow_key(key);
244  	} else {
245  		key->eth.vlan.tci = vlan->vlan_tci;
246  		key->eth.vlan.tpid = vlan->vlan_tpid;
247  	}
248  	err = skb_vlan_push(skb, vlan->vlan_tpid,
249  			    ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
250  	skb_reset_mac_len(skb);
251  	return err;
252  }
253  
254  /* 'src' is already properly masked. */
ether_addr_copy_masked(u8 * dst_,const u8 * src_,const u8 * mask_)255  static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
256  {
257  	u16 *dst = (u16 *)dst_;
258  	const u16 *src = (const u16 *)src_;
259  	const u16 *mask = (const u16 *)mask_;
260  
261  	OVS_SET_MASKED(dst[0], src[0], mask[0]);
262  	OVS_SET_MASKED(dst[1], src[1], mask[1]);
263  	OVS_SET_MASKED(dst[2], src[2], mask[2]);
264  }
265  
set_eth_addr(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_ethernet * key,const struct ovs_key_ethernet * mask)266  static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
267  			const struct ovs_key_ethernet *key,
268  			const struct ovs_key_ethernet *mask)
269  {
270  	int err;
271  
272  	err = skb_ensure_writable(skb, ETH_HLEN);
273  	if (unlikely(err))
274  		return err;
275  
276  	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
277  
278  	ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
279  			       mask->eth_src);
280  	ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
281  			       mask->eth_dst);
282  
283  	skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
284  
285  	ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
286  	ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
287  	return 0;
288  }
289  
290  /* pop_eth does not support VLAN packets as this action is never called
291   * for them.
292   */
pop_eth(struct sk_buff * skb,struct sw_flow_key * key)293  static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
294  {
295  	int err;
296  
297  	err = skb_eth_pop(skb);
298  	if (err)
299  		return err;
300  
301  	/* safe right before invalidate_flow_key */
302  	key->mac_proto = MAC_PROTO_NONE;
303  	invalidate_flow_key(key);
304  	return 0;
305  }
306  
push_eth(struct sk_buff * skb,struct sw_flow_key * key,const struct ovs_action_push_eth * ethh)307  static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
308  		    const struct ovs_action_push_eth *ethh)
309  {
310  	int err;
311  
312  	err = skb_eth_push(skb, ethh->addresses.eth_dst,
313  			   ethh->addresses.eth_src);
314  	if (err)
315  		return err;
316  
317  	/* safe right before invalidate_flow_key */
318  	key->mac_proto = MAC_PROTO_ETHERNET;
319  	invalidate_flow_key(key);
320  	return 0;
321  }
322  
push_nsh(struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * a)323  static noinline_for_stack int push_nsh(struct sk_buff *skb,
324  				       struct sw_flow_key *key,
325  				       const struct nlattr *a)
326  {
327  	u8 buffer[NSH_HDR_MAX_LEN];
328  	struct nshhdr *nh = (struct nshhdr *)buffer;
329  	int err;
330  
331  	err = nsh_hdr_from_nlattr(a, nh, NSH_HDR_MAX_LEN);
332  	if (err)
333  		return err;
334  
335  	err = nsh_push(skb, nh);
336  	if (err)
337  		return err;
338  
339  	/* safe right before invalidate_flow_key */
340  	key->mac_proto = MAC_PROTO_NONE;
341  	invalidate_flow_key(key);
342  	return 0;
343  }
344  
pop_nsh(struct sk_buff * skb,struct sw_flow_key * key)345  static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
346  {
347  	int err;
348  
349  	err = nsh_pop(skb);
350  	if (err)
351  		return err;
352  
353  	/* safe right before invalidate_flow_key */
354  	if (skb->protocol == htons(ETH_P_TEB))
355  		key->mac_proto = MAC_PROTO_ETHERNET;
356  	else
357  		key->mac_proto = MAC_PROTO_NONE;
358  	invalidate_flow_key(key);
359  	return 0;
360  }
361  
update_ip_l4_checksum(struct sk_buff * skb,struct iphdr * nh,__be32 addr,__be32 new_addr)362  static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
363  				  __be32 addr, __be32 new_addr)
364  {
365  	int transport_len = skb->len - skb_transport_offset(skb);
366  
367  	if (nh->frag_off & htons(IP_OFFSET))
368  		return;
369  
370  	if (nh->protocol == IPPROTO_TCP) {
371  		if (likely(transport_len >= sizeof(struct tcphdr)))
372  			inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
373  						 addr, new_addr, true);
374  	} else if (nh->protocol == IPPROTO_UDP) {
375  		if (likely(transport_len >= sizeof(struct udphdr))) {
376  			struct udphdr *uh = udp_hdr(skb);
377  
378  			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
379  				inet_proto_csum_replace4(&uh->check, skb,
380  							 addr, new_addr, true);
381  				if (!uh->check)
382  					uh->check = CSUM_MANGLED_0;
383  			}
384  		}
385  	}
386  }
387  
set_ip_addr(struct sk_buff * skb,struct iphdr * nh,__be32 * addr,__be32 new_addr)388  static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
389  			__be32 *addr, __be32 new_addr)
390  {
391  	update_ip_l4_checksum(skb, nh, *addr, new_addr);
392  	csum_replace4(&nh->check, *addr, new_addr);
393  	skb_clear_hash(skb);
394  	ovs_ct_clear(skb, NULL);
395  	*addr = new_addr;
396  }
397  
update_ipv6_checksum(struct sk_buff * skb,u8 l4_proto,__be32 addr[4],const __be32 new_addr[4])398  static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
399  				 __be32 addr[4], const __be32 new_addr[4])
400  {
401  	int transport_len = skb->len - skb_transport_offset(skb);
402  
403  	if (l4_proto == NEXTHDR_TCP) {
404  		if (likely(transport_len >= sizeof(struct tcphdr)))
405  			inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
406  						  addr, new_addr, true);
407  	} else if (l4_proto == NEXTHDR_UDP) {
408  		if (likely(transport_len >= sizeof(struct udphdr))) {
409  			struct udphdr *uh = udp_hdr(skb);
410  
411  			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
412  				inet_proto_csum_replace16(&uh->check, skb,
413  							  addr, new_addr, true);
414  				if (!uh->check)
415  					uh->check = CSUM_MANGLED_0;
416  			}
417  		}
418  	} else if (l4_proto == NEXTHDR_ICMP) {
419  		if (likely(transport_len >= sizeof(struct icmp6hdr)))
420  			inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
421  						  skb, addr, new_addr, true);
422  	}
423  }
424  
mask_ipv6_addr(const __be32 old[4],const __be32 addr[4],const __be32 mask[4],__be32 masked[4])425  static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
426  			   const __be32 mask[4], __be32 masked[4])
427  {
428  	masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
429  	masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
430  	masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
431  	masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
432  }
433  
set_ipv6_addr(struct sk_buff * skb,u8 l4_proto,__be32 addr[4],const __be32 new_addr[4],bool recalculate_csum)434  static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
435  			  __be32 addr[4], const __be32 new_addr[4],
436  			  bool recalculate_csum)
437  {
438  	if (recalculate_csum)
439  		update_ipv6_checksum(skb, l4_proto, addr, new_addr);
440  
441  	skb_clear_hash(skb);
442  	ovs_ct_clear(skb, NULL);
443  	memcpy(addr, new_addr, sizeof(__be32[4]));
444  }
445  
set_ipv6_dsfield(struct sk_buff * skb,struct ipv6hdr * nh,u8 ipv6_tclass,u8 mask)446  static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
447  {
448  	u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
449  
450  	ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
451  
452  	if (skb->ip_summed == CHECKSUM_COMPLETE)
453  		csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
454  			     (__force __wsum)(ipv6_tclass << 12));
455  
456  	ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
457  }
458  
set_ipv6_fl(struct sk_buff * skb,struct ipv6hdr * nh,u32 fl,u32 mask)459  static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
460  {
461  	u32 ofl;
462  
463  	ofl = nh->flow_lbl[0] << 16 |  nh->flow_lbl[1] << 8 |  nh->flow_lbl[2];
464  	fl = OVS_MASKED(ofl, fl, mask);
465  
466  	/* Bits 21-24 are always unmasked, so this retains their values. */
467  	nh->flow_lbl[0] = (u8)(fl >> 16);
468  	nh->flow_lbl[1] = (u8)(fl >> 8);
469  	nh->flow_lbl[2] = (u8)fl;
470  
471  	if (skb->ip_summed == CHECKSUM_COMPLETE)
472  		csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
473  }
474  
set_ipv6_ttl(struct sk_buff * skb,struct ipv6hdr * nh,u8 new_ttl,u8 mask)475  static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
476  {
477  	new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
478  
479  	if (skb->ip_summed == CHECKSUM_COMPLETE)
480  		csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
481  			     (__force __wsum)(new_ttl << 8));
482  	nh->hop_limit = new_ttl;
483  }
484  
set_ip_ttl(struct sk_buff * skb,struct iphdr * nh,u8 new_ttl,u8 mask)485  static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
486  		       u8 mask)
487  {
488  	new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
489  
490  	csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
491  	nh->ttl = new_ttl;
492  }
493  
set_ipv4(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_ipv4 * key,const struct ovs_key_ipv4 * mask)494  static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
495  		    const struct ovs_key_ipv4 *key,
496  		    const struct ovs_key_ipv4 *mask)
497  {
498  	struct iphdr *nh;
499  	__be32 new_addr;
500  	int err;
501  
502  	err = skb_ensure_writable(skb, skb_network_offset(skb) +
503  				  sizeof(struct iphdr));
504  	if (unlikely(err))
505  		return err;
506  
507  	nh = ip_hdr(skb);
508  
509  	/* Setting an IP addresses is typically only a side effect of
510  	 * matching on them in the current userspace implementation, so it
511  	 * makes sense to check if the value actually changed.
512  	 */
513  	if (mask->ipv4_src) {
514  		new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
515  
516  		if (unlikely(new_addr != nh->saddr)) {
517  			set_ip_addr(skb, nh, &nh->saddr, new_addr);
518  			flow_key->ipv4.addr.src = new_addr;
519  		}
520  	}
521  	if (mask->ipv4_dst) {
522  		new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
523  
524  		if (unlikely(new_addr != nh->daddr)) {
525  			set_ip_addr(skb, nh, &nh->daddr, new_addr);
526  			flow_key->ipv4.addr.dst = new_addr;
527  		}
528  	}
529  	if (mask->ipv4_tos) {
530  		ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
531  		flow_key->ip.tos = nh->tos;
532  	}
533  	if (mask->ipv4_ttl) {
534  		set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
535  		flow_key->ip.ttl = nh->ttl;
536  	}
537  
538  	return 0;
539  }
540  
is_ipv6_mask_nonzero(const __be32 addr[4])541  static bool is_ipv6_mask_nonzero(const __be32 addr[4])
542  {
543  	return !!(addr[0] | addr[1] | addr[2] | addr[3]);
544  }
545  
set_ipv6(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_ipv6 * key,const struct ovs_key_ipv6 * mask)546  static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
547  		    const struct ovs_key_ipv6 *key,
548  		    const struct ovs_key_ipv6 *mask)
549  {
550  	struct ipv6hdr *nh;
551  	int err;
552  
553  	err = skb_ensure_writable(skb, skb_network_offset(skb) +
554  				  sizeof(struct ipv6hdr));
555  	if (unlikely(err))
556  		return err;
557  
558  	nh = ipv6_hdr(skb);
559  
560  	/* Setting an IP addresses is typically only a side effect of
561  	 * matching on them in the current userspace implementation, so it
562  	 * makes sense to check if the value actually changed.
563  	 */
564  	if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
565  		__be32 *saddr = (__be32 *)&nh->saddr;
566  		__be32 masked[4];
567  
568  		mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
569  
570  		if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
571  			set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
572  				      true);
573  			memcpy(&flow_key->ipv6.addr.src, masked,
574  			       sizeof(flow_key->ipv6.addr.src));
575  		}
576  	}
577  	if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
578  		unsigned int offset = 0;
579  		int flags = IP6_FH_F_SKIP_RH;
580  		bool recalc_csum = true;
581  		__be32 *daddr = (__be32 *)&nh->daddr;
582  		__be32 masked[4];
583  
584  		mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
585  
586  		if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
587  			if (ipv6_ext_hdr(nh->nexthdr))
588  				recalc_csum = (ipv6_find_hdr(skb, &offset,
589  							     NEXTHDR_ROUTING,
590  							     NULL, &flags)
591  					       != NEXTHDR_ROUTING);
592  
593  			set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
594  				      recalc_csum);
595  			memcpy(&flow_key->ipv6.addr.dst, masked,
596  			       sizeof(flow_key->ipv6.addr.dst));
597  		}
598  	}
599  	if (mask->ipv6_tclass) {
600  		set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
601  		flow_key->ip.tos = ipv6_get_dsfield(nh);
602  	}
603  	if (mask->ipv6_label) {
604  		set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
605  			    ntohl(mask->ipv6_label));
606  		flow_key->ipv6.label =
607  		    *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
608  	}
609  	if (mask->ipv6_hlimit) {
610  		set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
611  		flow_key->ip.ttl = nh->hop_limit;
612  	}
613  	return 0;
614  }
615  
set_nsh(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct nlattr * a)616  static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
617  		   const struct nlattr *a)
618  {
619  	struct nshhdr *nh;
620  	size_t length;
621  	int err;
622  	u8 flags;
623  	u8 ttl;
624  	int i;
625  
626  	struct ovs_key_nsh key;
627  	struct ovs_key_nsh mask;
628  
629  	err = nsh_key_from_nlattr(a, &key, &mask);
630  	if (err)
631  		return err;
632  
633  	/* Make sure the NSH base header is there */
634  	if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
635  		return -ENOMEM;
636  
637  	nh = nsh_hdr(skb);
638  	length = nsh_hdr_len(nh);
639  
640  	/* Make sure the whole NSH header is there */
641  	err = skb_ensure_writable(skb, skb_network_offset(skb) +
642  				       length);
643  	if (unlikely(err))
644  		return err;
645  
646  	nh = nsh_hdr(skb);
647  	skb_postpull_rcsum(skb, nh, length);
648  	flags = nsh_get_flags(nh);
649  	flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
650  	flow_key->nsh.base.flags = flags;
651  	ttl = nsh_get_ttl(nh);
652  	ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
653  	flow_key->nsh.base.ttl = ttl;
654  	nsh_set_flags_and_ttl(nh, flags, ttl);
655  	nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
656  				  mask.base.path_hdr);
657  	flow_key->nsh.base.path_hdr = nh->path_hdr;
658  	switch (nh->mdtype) {
659  	case NSH_M_TYPE1:
660  		for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
661  			nh->md1.context[i] =
662  			    OVS_MASKED(nh->md1.context[i], key.context[i],
663  				       mask.context[i]);
664  		}
665  		memcpy(flow_key->nsh.context, nh->md1.context,
666  		       sizeof(nh->md1.context));
667  		break;
668  	case NSH_M_TYPE2:
669  		memset(flow_key->nsh.context, 0,
670  		       sizeof(flow_key->nsh.context));
671  		break;
672  	default:
673  		return -EINVAL;
674  	}
675  	skb_postpush_rcsum(skb, nh, length);
676  	return 0;
677  }
678  
679  /* Must follow skb_ensure_writable() since that can move the skb data. */
set_tp_port(struct sk_buff * skb,__be16 * port,__be16 new_port,__sum16 * check)680  static void set_tp_port(struct sk_buff *skb, __be16 *port,
681  			__be16 new_port, __sum16 *check)
682  {
683  	ovs_ct_clear(skb, NULL);
684  	inet_proto_csum_replace2(check, skb, *port, new_port, false);
685  	*port = new_port;
686  }
687  
set_udp(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_udp * key,const struct ovs_key_udp * mask)688  static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
689  		   const struct ovs_key_udp *key,
690  		   const struct ovs_key_udp *mask)
691  {
692  	struct udphdr *uh;
693  	__be16 src, dst;
694  	int err;
695  
696  	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
697  				  sizeof(struct udphdr));
698  	if (unlikely(err))
699  		return err;
700  
701  	uh = udp_hdr(skb);
702  	/* Either of the masks is non-zero, so do not bother checking them. */
703  	src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
704  	dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
705  
706  	if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
707  		if (likely(src != uh->source)) {
708  			set_tp_port(skb, &uh->source, src, &uh->check);
709  			flow_key->tp.src = src;
710  		}
711  		if (likely(dst != uh->dest)) {
712  			set_tp_port(skb, &uh->dest, dst, &uh->check);
713  			flow_key->tp.dst = dst;
714  		}
715  
716  		if (unlikely(!uh->check))
717  			uh->check = CSUM_MANGLED_0;
718  	} else {
719  		uh->source = src;
720  		uh->dest = dst;
721  		flow_key->tp.src = src;
722  		flow_key->tp.dst = dst;
723  		ovs_ct_clear(skb, NULL);
724  	}
725  
726  	skb_clear_hash(skb);
727  
728  	return 0;
729  }
730  
set_tcp(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_tcp * key,const struct ovs_key_tcp * mask)731  static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
732  		   const struct ovs_key_tcp *key,
733  		   const struct ovs_key_tcp *mask)
734  {
735  	struct tcphdr *th;
736  	__be16 src, dst;
737  	int err;
738  
739  	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
740  				  sizeof(struct tcphdr));
741  	if (unlikely(err))
742  		return err;
743  
744  	th = tcp_hdr(skb);
745  	src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
746  	if (likely(src != th->source)) {
747  		set_tp_port(skb, &th->source, src, &th->check);
748  		flow_key->tp.src = src;
749  	}
750  	dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
751  	if (likely(dst != th->dest)) {
752  		set_tp_port(skb, &th->dest, dst, &th->check);
753  		flow_key->tp.dst = dst;
754  	}
755  	skb_clear_hash(skb);
756  
757  	return 0;
758  }
759  
set_sctp(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_sctp * key,const struct ovs_key_sctp * mask)760  static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
761  		    const struct ovs_key_sctp *key,
762  		    const struct ovs_key_sctp *mask)
763  {
764  	unsigned int sctphoff = skb_transport_offset(skb);
765  	struct sctphdr *sh;
766  	__le32 old_correct_csum, new_csum, old_csum;
767  	int err;
768  
769  	err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
770  	if (unlikely(err))
771  		return err;
772  
773  	sh = sctp_hdr(skb);
774  	old_csum = sh->checksum;
775  	old_correct_csum = sctp_compute_cksum(skb, sctphoff);
776  
777  	sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
778  	sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
779  
780  	new_csum = sctp_compute_cksum(skb, sctphoff);
781  
782  	/* Carry any checksum errors through. */
783  	sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
784  
785  	skb_clear_hash(skb);
786  	ovs_ct_clear(skb, NULL);
787  
788  	flow_key->tp.src = sh->source;
789  	flow_key->tp.dst = sh->dest;
790  
791  	return 0;
792  }
793  
ovs_vport_output(struct net * net,struct sock * sk,struct sk_buff * skb)794  static int ovs_vport_output(struct net *net, struct sock *sk,
795  			    struct sk_buff *skb)
796  {
797  	struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
798  	struct vport *vport = data->vport;
799  
800  	if (skb_cow_head(skb, data->l2_len) < 0) {
801  		kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
802  		return -ENOMEM;
803  	}
804  
805  	__skb_dst_copy(skb, data->dst);
806  	*OVS_CB(skb) = data->cb;
807  	skb->inner_protocol = data->inner_protocol;
808  	if (data->vlan_tci & VLAN_CFI_MASK)
809  		__vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
810  	else
811  		__vlan_hwaccel_clear_tag(skb);
812  
813  	/* Reconstruct the MAC header.  */
814  	skb_push(skb, data->l2_len);
815  	memcpy(skb->data, &data->l2_data, data->l2_len);
816  	skb_postpush_rcsum(skb, skb->data, data->l2_len);
817  	skb_reset_mac_header(skb);
818  
819  	if (eth_p_mpls(skb->protocol)) {
820  		skb->inner_network_header = skb->network_header;
821  		skb_set_network_header(skb, data->network_offset);
822  		skb_reset_mac_len(skb);
823  	}
824  
825  	ovs_vport_send(vport, skb, data->mac_proto);
826  	return 0;
827  }
828  
829  static unsigned int
ovs_dst_get_mtu(const struct dst_entry * dst)830  ovs_dst_get_mtu(const struct dst_entry *dst)
831  {
832  	return dst->dev->mtu;
833  }
834  
835  static struct dst_ops ovs_dst_ops = {
836  	.family = AF_UNSPEC,
837  	.mtu = ovs_dst_get_mtu,
838  };
839  
840  /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
841   * ovs_vport_output(), which is called once per fragmented packet.
842   */
prepare_frag(struct vport * vport,struct sk_buff * skb,u16 orig_network_offset,u8 mac_proto)843  static void prepare_frag(struct vport *vport, struct sk_buff *skb,
844  			 u16 orig_network_offset, u8 mac_proto)
845  {
846  	unsigned int hlen = skb_network_offset(skb);
847  	struct ovs_frag_data *data;
848  
849  	data = this_cpu_ptr(&ovs_frag_data_storage);
850  	data->dst = skb->_skb_refdst;
851  	data->vport = vport;
852  	data->cb = *OVS_CB(skb);
853  	data->inner_protocol = skb->inner_protocol;
854  	data->network_offset = orig_network_offset;
855  	if (skb_vlan_tag_present(skb))
856  		data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
857  	else
858  		data->vlan_tci = 0;
859  	data->vlan_proto = skb->vlan_proto;
860  	data->mac_proto = mac_proto;
861  	data->l2_len = hlen;
862  	memcpy(&data->l2_data, skb->data, hlen);
863  
864  	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
865  	skb_pull(skb, hlen);
866  }
867  
ovs_fragment(struct net * net,struct vport * vport,struct sk_buff * skb,u16 mru,struct sw_flow_key * key)868  static void ovs_fragment(struct net *net, struct vport *vport,
869  			 struct sk_buff *skb, u16 mru,
870  			 struct sw_flow_key *key)
871  {
872  	enum ovs_drop_reason reason;
873  	u16 orig_network_offset = 0;
874  
875  	if (eth_p_mpls(skb->protocol)) {
876  		orig_network_offset = skb_network_offset(skb);
877  		skb->network_header = skb->inner_network_header;
878  	}
879  
880  	if (skb_network_offset(skb) > MAX_L2_LEN) {
881  		OVS_NLERR(1, "L2 header too long to fragment");
882  		reason = OVS_DROP_FRAG_L2_TOO_LONG;
883  		goto err;
884  	}
885  
886  	if (key->eth.type == htons(ETH_P_IP)) {
887  		struct rtable ovs_rt = { 0 };
888  		unsigned long orig_dst;
889  
890  		prepare_frag(vport, skb, orig_network_offset,
891  			     ovs_key_mac_proto(key));
892  		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
893  			 DST_OBSOLETE_NONE, DST_NOCOUNT);
894  		ovs_rt.dst.dev = vport->dev;
895  
896  		orig_dst = skb->_skb_refdst;
897  		skb_dst_set_noref(skb, &ovs_rt.dst);
898  		IPCB(skb)->frag_max_size = mru;
899  
900  		ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
901  		refdst_drop(orig_dst);
902  	} else if (key->eth.type == htons(ETH_P_IPV6)) {
903  		unsigned long orig_dst;
904  		struct rt6_info ovs_rt;
905  
906  		prepare_frag(vport, skb, orig_network_offset,
907  			     ovs_key_mac_proto(key));
908  		memset(&ovs_rt, 0, sizeof(ovs_rt));
909  		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
910  			 DST_OBSOLETE_NONE, DST_NOCOUNT);
911  		ovs_rt.dst.dev = vport->dev;
912  
913  		orig_dst = skb->_skb_refdst;
914  		skb_dst_set_noref(skb, &ovs_rt.dst);
915  		IP6CB(skb)->frag_max_size = mru;
916  
917  		ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
918  		refdst_drop(orig_dst);
919  	} else {
920  		WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
921  			  ovs_vport_name(vport), ntohs(key->eth.type), mru,
922  			  vport->dev->mtu);
923  		reason = OVS_DROP_FRAG_INVALID_PROTO;
924  		goto err;
925  	}
926  
927  	return;
928  err:
929  	ovs_kfree_skb_reason(skb, reason);
930  }
931  
do_output(struct datapath * dp,struct sk_buff * skb,int out_port,struct sw_flow_key * key)932  static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
933  		      struct sw_flow_key *key)
934  {
935  	struct vport *vport = ovs_vport_rcu(dp, out_port);
936  
937  	if (likely(vport && netif_carrier_ok(vport->dev))) {
938  		u16 mru = OVS_CB(skb)->mru;
939  		u32 cutlen = OVS_CB(skb)->cutlen;
940  
941  		if (unlikely(cutlen > 0)) {
942  			if (skb->len - cutlen > ovs_mac_header_len(key))
943  				pskb_trim(skb, skb->len - cutlen);
944  			else
945  				pskb_trim(skb, ovs_mac_header_len(key));
946  		}
947  
948  		/* Need to set the pkt_type to involve the routing layer.  The
949  		 * packet movement through the OVS datapath doesn't generally
950  		 * use routing, but this is needed for tunnel cases.
951  		 */
952  		skb->pkt_type = PACKET_OUTGOING;
953  
954  		if (likely(!mru ||
955  		           (skb->len <= mru + vport->dev->hard_header_len))) {
956  			ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
957  		} else if (mru <= vport->dev->mtu) {
958  			struct net *net = read_pnet(&dp->net);
959  
960  			ovs_fragment(net, vport, skb, mru, key);
961  		} else {
962  			kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
963  		}
964  	} else {
965  		kfree_skb_reason(skb, SKB_DROP_REASON_DEV_READY);
966  	}
967  }
968  
output_userspace(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr,const struct nlattr * actions,int actions_len,uint32_t cutlen)969  static int output_userspace(struct datapath *dp, struct sk_buff *skb,
970  			    struct sw_flow_key *key, const struct nlattr *attr,
971  			    const struct nlattr *actions, int actions_len,
972  			    uint32_t cutlen)
973  {
974  	struct dp_upcall_info upcall;
975  	const struct nlattr *a;
976  	int rem;
977  
978  	memset(&upcall, 0, sizeof(upcall));
979  	upcall.cmd = OVS_PACKET_CMD_ACTION;
980  	upcall.mru = OVS_CB(skb)->mru;
981  
982  	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
983  	     a = nla_next(a, &rem)) {
984  		switch (nla_type(a)) {
985  		case OVS_USERSPACE_ATTR_USERDATA:
986  			upcall.userdata = a;
987  			break;
988  
989  		case OVS_USERSPACE_ATTR_PID:
990  			if (dp->user_features &
991  			    OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
992  				upcall.portid =
993  				  ovs_dp_get_upcall_portid(dp,
994  							   smp_processor_id());
995  			else
996  				upcall.portid = nla_get_u32(a);
997  			break;
998  
999  		case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
1000  			/* Get out tunnel info. */
1001  			struct vport *vport;
1002  
1003  			vport = ovs_vport_rcu(dp, nla_get_u32(a));
1004  			if (vport) {
1005  				int err;
1006  
1007  				err = dev_fill_metadata_dst(vport->dev, skb);
1008  				if (!err)
1009  					upcall.egress_tun_info = skb_tunnel_info(skb);
1010  			}
1011  
1012  			break;
1013  		}
1014  
1015  		case OVS_USERSPACE_ATTR_ACTIONS: {
1016  			/* Include actions. */
1017  			upcall.actions = actions;
1018  			upcall.actions_len = actions_len;
1019  			break;
1020  		}
1021  
1022  		} /* End of switch. */
1023  	}
1024  
1025  	return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
1026  }
1027  
dec_ttl_exception_handler(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr)1028  static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
1029  				     struct sw_flow_key *key,
1030  				     const struct nlattr *attr)
1031  {
1032  	/* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
1033  	struct nlattr *actions = nla_data(attr);
1034  
1035  	if (nla_len(actions))
1036  		return clone_execute(dp, skb, key, 0, nla_data(actions),
1037  				     nla_len(actions), true, false);
1038  
1039  	ovs_kfree_skb_reason(skb, OVS_DROP_IP_TTL);
1040  	return 0;
1041  }
1042  
1043  /* When 'last' is true, sample() should always consume the 'skb'.
1044   * Otherwise, sample() should keep 'skb' intact regardless what
1045   * actions are executed within sample().
1046   */
sample(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr,bool last)1047  static int sample(struct datapath *dp, struct sk_buff *skb,
1048  		  struct sw_flow_key *key, const struct nlattr *attr,
1049  		  bool last)
1050  {
1051  	struct nlattr *actions;
1052  	struct nlattr *sample_arg;
1053  	int rem = nla_len(attr);
1054  	const struct sample_arg *arg;
1055  	u32 init_probability;
1056  	bool clone_flow_key;
1057  	int err;
1058  
1059  	/* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1060  	sample_arg = nla_data(attr);
1061  	arg = nla_data(sample_arg);
1062  	actions = nla_next(sample_arg, &rem);
1063  	init_probability = OVS_CB(skb)->probability;
1064  
1065  	if ((arg->probability != U32_MAX) &&
1066  	    (!arg->probability || get_random_u32() > arg->probability)) {
1067  		if (last)
1068  			ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
1069  		return 0;
1070  	}
1071  
1072  	OVS_CB(skb)->probability = arg->probability;
1073  
1074  	clone_flow_key = !arg->exec;
1075  	err = clone_execute(dp, skb, key, 0, actions, rem, last,
1076  			    clone_flow_key);
1077  
1078  	if (!last)
1079  		OVS_CB(skb)->probability = init_probability;
1080  
1081  	return err;
1082  }
1083  
1084  /* When 'last' is true, clone() should always consume the 'skb'.
1085   * Otherwise, clone() should keep 'skb' intact regardless what
1086   * actions are executed within clone().
1087   */
clone(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr,bool last)1088  static int clone(struct datapath *dp, struct sk_buff *skb,
1089  		 struct sw_flow_key *key, const struct nlattr *attr,
1090  		 bool last)
1091  {
1092  	struct nlattr *actions;
1093  	struct nlattr *clone_arg;
1094  	int rem = nla_len(attr);
1095  	bool dont_clone_flow_key;
1096  
1097  	/* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
1098  	clone_arg = nla_data(attr);
1099  	dont_clone_flow_key = nla_get_u32(clone_arg);
1100  	actions = nla_next(clone_arg, &rem);
1101  
1102  	return clone_execute(dp, skb, key, 0, actions, rem, last,
1103  			     !dont_clone_flow_key);
1104  }
1105  
execute_hash(struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr)1106  static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1107  			 const struct nlattr *attr)
1108  {
1109  	struct ovs_action_hash *hash_act = nla_data(attr);
1110  	u32 hash = 0;
1111  
1112  	if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
1113  		/* OVS_HASH_ALG_L4 hasing type. */
1114  		hash = skb_get_hash(skb);
1115  	} else if (hash_act->hash_alg == OVS_HASH_ALG_SYM_L4) {
1116  		/* OVS_HASH_ALG_SYM_L4 hashing type.  NOTE: this doesn't
1117  		 * extend past an encapsulated header.
1118  		 */
1119  		hash = __skb_get_hash_symmetric(skb);
1120  	}
1121  
1122  	hash = jhash_1word(hash, hash_act->hash_basis);
1123  	if (!hash)
1124  		hash = 0x1;
1125  
1126  	key->ovs_flow_hash = hash;
1127  }
1128  
execute_set_action(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct nlattr * a)1129  static int execute_set_action(struct sk_buff *skb,
1130  			      struct sw_flow_key *flow_key,
1131  			      const struct nlattr *a)
1132  {
1133  	/* Only tunnel set execution is supported without a mask. */
1134  	if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1135  		struct ovs_tunnel_info *tun = nla_data(a);
1136  
1137  		skb_dst_drop(skb);
1138  		dst_hold((struct dst_entry *)tun->tun_dst);
1139  		skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1140  		return 0;
1141  	}
1142  
1143  	return -EINVAL;
1144  }
1145  
1146  /* Mask is at the midpoint of the data. */
1147  #define get_mask(a, type) ((const type)nla_data(a) + 1)
1148  
execute_masked_set_action(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct nlattr * a)1149  static int execute_masked_set_action(struct sk_buff *skb,
1150  				     struct sw_flow_key *flow_key,
1151  				     const struct nlattr *a)
1152  {
1153  	int err = 0;
1154  
1155  	switch (nla_type(a)) {
1156  	case OVS_KEY_ATTR_PRIORITY:
1157  		OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1158  			       *get_mask(a, u32 *));
1159  		flow_key->phy.priority = skb->priority;
1160  		break;
1161  
1162  	case OVS_KEY_ATTR_SKB_MARK:
1163  		OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1164  		flow_key->phy.skb_mark = skb->mark;
1165  		break;
1166  
1167  	case OVS_KEY_ATTR_TUNNEL_INFO:
1168  		/* Masked data not supported for tunnel. */
1169  		err = -EINVAL;
1170  		break;
1171  
1172  	case OVS_KEY_ATTR_ETHERNET:
1173  		err = set_eth_addr(skb, flow_key, nla_data(a),
1174  				   get_mask(a, struct ovs_key_ethernet *));
1175  		break;
1176  
1177  	case OVS_KEY_ATTR_NSH:
1178  		err = set_nsh(skb, flow_key, a);
1179  		break;
1180  
1181  	case OVS_KEY_ATTR_IPV4:
1182  		err = set_ipv4(skb, flow_key, nla_data(a),
1183  			       get_mask(a, struct ovs_key_ipv4 *));
1184  		break;
1185  
1186  	case OVS_KEY_ATTR_IPV6:
1187  		err = set_ipv6(skb, flow_key, nla_data(a),
1188  			       get_mask(a, struct ovs_key_ipv6 *));
1189  		break;
1190  
1191  	case OVS_KEY_ATTR_TCP:
1192  		err = set_tcp(skb, flow_key, nla_data(a),
1193  			      get_mask(a, struct ovs_key_tcp *));
1194  		break;
1195  
1196  	case OVS_KEY_ATTR_UDP:
1197  		err = set_udp(skb, flow_key, nla_data(a),
1198  			      get_mask(a, struct ovs_key_udp *));
1199  		break;
1200  
1201  	case OVS_KEY_ATTR_SCTP:
1202  		err = set_sctp(skb, flow_key, nla_data(a),
1203  			       get_mask(a, struct ovs_key_sctp *));
1204  		break;
1205  
1206  	case OVS_KEY_ATTR_MPLS:
1207  		err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1208  								    __be32 *));
1209  		break;
1210  
1211  	case OVS_KEY_ATTR_CT_STATE:
1212  	case OVS_KEY_ATTR_CT_ZONE:
1213  	case OVS_KEY_ATTR_CT_MARK:
1214  	case OVS_KEY_ATTR_CT_LABELS:
1215  	case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1216  	case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1217  		err = -EINVAL;
1218  		break;
1219  	}
1220  
1221  	return err;
1222  }
1223  
execute_recirc(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * a,bool last)1224  static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1225  			  struct sw_flow_key *key,
1226  			  const struct nlattr *a, bool last)
1227  {
1228  	u32 recirc_id;
1229  
1230  	if (!is_flow_key_valid(key)) {
1231  		int err;
1232  
1233  		err = ovs_flow_key_update(skb, key);
1234  		if (err)
1235  			return err;
1236  	}
1237  	BUG_ON(!is_flow_key_valid(key));
1238  
1239  	recirc_id = nla_get_u32(a);
1240  	return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1241  }
1242  
execute_check_pkt_len(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr,bool last)1243  static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1244  				 struct sw_flow_key *key,
1245  				 const struct nlattr *attr, bool last)
1246  {
1247  	struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
1248  	const struct nlattr *actions, *cpl_arg;
1249  	int len, max_len, rem = nla_len(attr);
1250  	const struct check_pkt_len_arg *arg;
1251  	bool clone_flow_key;
1252  
1253  	/* The first netlink attribute in 'attr' is always
1254  	 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1255  	 */
1256  	cpl_arg = nla_data(attr);
1257  	arg = nla_data(cpl_arg);
1258  
1259  	len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
1260  	max_len = arg->pkt_len;
1261  
1262  	if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
1263  	    len <= max_len) {
1264  		/* Second netlink attribute in 'attr' is always
1265  		 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1266  		 */
1267  		actions = nla_next(cpl_arg, &rem);
1268  		clone_flow_key = !arg->exec_for_lesser_equal;
1269  	} else {
1270  		/* Third netlink attribute in 'attr' is always
1271  		 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1272  		 */
1273  		actions = nla_next(cpl_arg, &rem);
1274  		actions = nla_next(actions, &rem);
1275  		clone_flow_key = !arg->exec_for_greater;
1276  	}
1277  
1278  	return clone_execute(dp, skb, key, 0, nla_data(actions),
1279  			     nla_len(actions), last, clone_flow_key);
1280  }
1281  
execute_dec_ttl(struct sk_buff * skb,struct sw_flow_key * key)1282  static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
1283  {
1284  	int err;
1285  
1286  	if (skb->protocol == htons(ETH_P_IPV6)) {
1287  		struct ipv6hdr *nh;
1288  
1289  		err = skb_ensure_writable(skb, skb_network_offset(skb) +
1290  					  sizeof(*nh));
1291  		if (unlikely(err))
1292  			return err;
1293  
1294  		nh = ipv6_hdr(skb);
1295  
1296  		if (nh->hop_limit <= 1)
1297  			return -EHOSTUNREACH;
1298  
1299  		key->ip.ttl = --nh->hop_limit;
1300  	} else if (skb->protocol == htons(ETH_P_IP)) {
1301  		struct iphdr *nh;
1302  		u8 old_ttl;
1303  
1304  		err = skb_ensure_writable(skb, skb_network_offset(skb) +
1305  					  sizeof(*nh));
1306  		if (unlikely(err))
1307  			return err;
1308  
1309  		nh = ip_hdr(skb);
1310  		if (nh->ttl <= 1)
1311  			return -EHOSTUNREACH;
1312  
1313  		old_ttl = nh->ttl--;
1314  		csum_replace2(&nh->check, htons(old_ttl << 8),
1315  			      htons(nh->ttl << 8));
1316  		key->ip.ttl = nh->ttl;
1317  	}
1318  	return 0;
1319  }
1320  
1321  #if IS_ENABLED(CONFIG_PSAMPLE)
execute_psample(struct datapath * dp,struct sk_buff * skb,const struct nlattr * attr)1322  static void execute_psample(struct datapath *dp, struct sk_buff *skb,
1323  			    const struct nlattr *attr)
1324  {
1325  	struct psample_group psample_group = {};
1326  	struct psample_metadata md = {};
1327  	const struct nlattr *a;
1328  	u32 rate;
1329  	int rem;
1330  
1331  	nla_for_each_attr(a, nla_data(attr), nla_len(attr), rem) {
1332  		switch (nla_type(a)) {
1333  		case OVS_PSAMPLE_ATTR_GROUP:
1334  			psample_group.group_num = nla_get_u32(a);
1335  			break;
1336  
1337  		case OVS_PSAMPLE_ATTR_COOKIE:
1338  			md.user_cookie = nla_data(a);
1339  			md.user_cookie_len = nla_len(a);
1340  			break;
1341  		}
1342  	}
1343  
1344  	psample_group.net = ovs_dp_get_net(dp);
1345  	md.in_ifindex = OVS_CB(skb)->input_vport->dev->ifindex;
1346  	md.trunc_size = skb->len - OVS_CB(skb)->cutlen;
1347  	md.rate_as_probability = 1;
1348  
1349  	rate = OVS_CB(skb)->probability ? OVS_CB(skb)->probability : U32_MAX;
1350  
1351  	psample_sample_packet(&psample_group, skb, rate, &md);
1352  }
1353  #else
execute_psample(struct datapath * dp,struct sk_buff * skb,const struct nlattr * attr)1354  static void execute_psample(struct datapath *dp, struct sk_buff *skb,
1355  			    const struct nlattr *attr)
1356  {}
1357  #endif
1358  
1359  /* Execute a list of actions against 'skb'. */
do_execute_actions(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr,int len)1360  static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1361  			      struct sw_flow_key *key,
1362  			      const struct nlattr *attr, int len)
1363  {
1364  	const struct nlattr *a;
1365  	int rem;
1366  
1367  	for (a = attr, rem = len; rem > 0;
1368  	     a = nla_next(a, &rem)) {
1369  		int err = 0;
1370  
1371  		if (trace_ovs_do_execute_action_enabled())
1372  			trace_ovs_do_execute_action(dp, skb, key, a, rem);
1373  
1374  		/* Actions that rightfully have to consume the skb should do it
1375  		 * and return directly.
1376  		 */
1377  		switch (nla_type(a)) {
1378  		case OVS_ACTION_ATTR_OUTPUT: {
1379  			int port = nla_get_u32(a);
1380  			struct sk_buff *clone;
1381  
1382  			/* Every output action needs a separate clone
1383  			 * of 'skb', In case the output action is the
1384  			 * last action, cloning can be avoided.
1385  			 */
1386  			if (nla_is_last(a, rem)) {
1387  				do_output(dp, skb, port, key);
1388  				/* 'skb' has been used for output.
1389  				 */
1390  				return 0;
1391  			}
1392  
1393  			clone = skb_clone(skb, GFP_ATOMIC);
1394  			if (clone)
1395  				do_output(dp, clone, port, key);
1396  			OVS_CB(skb)->cutlen = 0;
1397  			break;
1398  		}
1399  
1400  		case OVS_ACTION_ATTR_TRUNC: {
1401  			struct ovs_action_trunc *trunc = nla_data(a);
1402  
1403  			if (skb->len > trunc->max_len)
1404  				OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1405  			break;
1406  		}
1407  
1408  		case OVS_ACTION_ATTR_USERSPACE:
1409  			output_userspace(dp, skb, key, a, attr,
1410  						     len, OVS_CB(skb)->cutlen);
1411  			OVS_CB(skb)->cutlen = 0;
1412  			if (nla_is_last(a, rem)) {
1413  				consume_skb(skb);
1414  				return 0;
1415  			}
1416  			break;
1417  
1418  		case OVS_ACTION_ATTR_HASH:
1419  			execute_hash(skb, key, a);
1420  			break;
1421  
1422  		case OVS_ACTION_ATTR_PUSH_MPLS: {
1423  			struct ovs_action_push_mpls *mpls = nla_data(a);
1424  
1425  			err = push_mpls(skb, key, mpls->mpls_lse,
1426  					mpls->mpls_ethertype, skb->mac_len);
1427  			break;
1428  		}
1429  		case OVS_ACTION_ATTR_ADD_MPLS: {
1430  			struct ovs_action_add_mpls *mpls = nla_data(a);
1431  			__u16 mac_len = 0;
1432  
1433  			if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
1434  				mac_len = skb->mac_len;
1435  
1436  			err = push_mpls(skb, key, mpls->mpls_lse,
1437  					mpls->mpls_ethertype, mac_len);
1438  			break;
1439  		}
1440  		case OVS_ACTION_ATTR_POP_MPLS:
1441  			err = pop_mpls(skb, key, nla_get_be16(a));
1442  			break;
1443  
1444  		case OVS_ACTION_ATTR_PUSH_VLAN:
1445  			err = push_vlan(skb, key, nla_data(a));
1446  			break;
1447  
1448  		case OVS_ACTION_ATTR_POP_VLAN:
1449  			err = pop_vlan(skb, key);
1450  			break;
1451  
1452  		case OVS_ACTION_ATTR_RECIRC: {
1453  			bool last = nla_is_last(a, rem);
1454  
1455  			err = execute_recirc(dp, skb, key, a, last);
1456  			if (last) {
1457  				/* If this is the last action, the skb has
1458  				 * been consumed or freed.
1459  				 * Return immediately.
1460  				 */
1461  				return err;
1462  			}
1463  			break;
1464  		}
1465  
1466  		case OVS_ACTION_ATTR_SET:
1467  			err = execute_set_action(skb, key, nla_data(a));
1468  			break;
1469  
1470  		case OVS_ACTION_ATTR_SET_MASKED:
1471  		case OVS_ACTION_ATTR_SET_TO_MASKED:
1472  			err = execute_masked_set_action(skb, key, nla_data(a));
1473  			break;
1474  
1475  		case OVS_ACTION_ATTR_SAMPLE: {
1476  			bool last = nla_is_last(a, rem);
1477  
1478  			err = sample(dp, skb, key, a, last);
1479  			if (last)
1480  				return err;
1481  
1482  			break;
1483  		}
1484  
1485  		case OVS_ACTION_ATTR_CT:
1486  			if (!is_flow_key_valid(key)) {
1487  				err = ovs_flow_key_update(skb, key);
1488  				if (err)
1489  					return err;
1490  			}
1491  
1492  			err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1493  					     nla_data(a));
1494  
1495  			/* Hide stolen IP fragments from user space. */
1496  			if (err)
1497  				return err == -EINPROGRESS ? 0 : err;
1498  			break;
1499  
1500  		case OVS_ACTION_ATTR_CT_CLEAR:
1501  			err = ovs_ct_clear(skb, key);
1502  			break;
1503  
1504  		case OVS_ACTION_ATTR_PUSH_ETH:
1505  			err = push_eth(skb, key, nla_data(a));
1506  			break;
1507  
1508  		case OVS_ACTION_ATTR_POP_ETH:
1509  			err = pop_eth(skb, key);
1510  			break;
1511  
1512  		case OVS_ACTION_ATTR_PUSH_NSH:
1513  			err = push_nsh(skb, key, nla_data(a));
1514  			break;
1515  
1516  		case OVS_ACTION_ATTR_POP_NSH:
1517  			err = pop_nsh(skb, key);
1518  			break;
1519  
1520  		case OVS_ACTION_ATTR_METER:
1521  			if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1522  				ovs_kfree_skb_reason(skb, OVS_DROP_METER);
1523  				return 0;
1524  			}
1525  			break;
1526  
1527  		case OVS_ACTION_ATTR_CLONE: {
1528  			bool last = nla_is_last(a, rem);
1529  
1530  			err = clone(dp, skb, key, a, last);
1531  			if (last)
1532  				return err;
1533  
1534  			break;
1535  		}
1536  
1537  		case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1538  			bool last = nla_is_last(a, rem);
1539  
1540  			err = execute_check_pkt_len(dp, skb, key, a, last);
1541  			if (last)
1542  				return err;
1543  
1544  			break;
1545  		}
1546  
1547  		case OVS_ACTION_ATTR_DEC_TTL:
1548  			err = execute_dec_ttl(skb, key);
1549  			if (err == -EHOSTUNREACH)
1550  				return dec_ttl_exception_handler(dp, skb,
1551  								 key, a);
1552  			break;
1553  
1554  		case OVS_ACTION_ATTR_DROP: {
1555  			enum ovs_drop_reason reason = nla_get_u32(a)
1556  				? OVS_DROP_EXPLICIT_WITH_ERROR
1557  				: OVS_DROP_EXPLICIT;
1558  
1559  			ovs_kfree_skb_reason(skb, reason);
1560  			return 0;
1561  		}
1562  
1563  		case OVS_ACTION_ATTR_PSAMPLE:
1564  			execute_psample(dp, skb, a);
1565  			OVS_CB(skb)->cutlen = 0;
1566  			if (nla_is_last(a, rem)) {
1567  				consume_skb(skb);
1568  				return 0;
1569  			}
1570  			break;
1571  		}
1572  
1573  		if (unlikely(err)) {
1574  			ovs_kfree_skb_reason(skb, OVS_DROP_ACTION_ERROR);
1575  			return err;
1576  		}
1577  	}
1578  
1579  	ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
1580  	return 0;
1581  }
1582  
1583  /* Execute the actions on the clone of the packet. The effect of the
1584   * execution does not affect the original 'skb' nor the original 'key'.
1585   *
1586   * The execution may be deferred in case the actions can not be executed
1587   * immediately.
1588   */
clone_execute(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,u32 recirc_id,const struct nlattr * actions,int len,bool last,bool clone_flow_key)1589  static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1590  			 struct sw_flow_key *key, u32 recirc_id,
1591  			 const struct nlattr *actions, int len,
1592  			 bool last, bool clone_flow_key)
1593  {
1594  	struct deferred_action *da;
1595  	struct sw_flow_key *clone;
1596  
1597  	skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1598  	if (!skb) {
1599  		/* Out of memory, skip this action.
1600  		 */
1601  		return 0;
1602  	}
1603  
1604  	/* When clone_flow_key is false, the 'key' will not be change
1605  	 * by the actions, then the 'key' can be used directly.
1606  	 * Otherwise, try to clone key from the next recursion level of
1607  	 * 'flow_keys'. If clone is successful, execute the actions
1608  	 * without deferring.
1609  	 */
1610  	clone = clone_flow_key ? clone_key(key) : key;
1611  	if (clone) {
1612  		int err = 0;
1613  
1614  		if (actions) { /* Sample action */
1615  			if (clone_flow_key)
1616  				__this_cpu_inc(exec_actions_level);
1617  
1618  			err = do_execute_actions(dp, skb, clone,
1619  						 actions, len);
1620  
1621  			if (clone_flow_key)
1622  				__this_cpu_dec(exec_actions_level);
1623  		} else { /* Recirc action */
1624  			clone->recirc_id = recirc_id;
1625  			ovs_dp_process_packet(skb, clone);
1626  		}
1627  		return err;
1628  	}
1629  
1630  	/* Out of 'flow_keys' space. Defer actions */
1631  	da = add_deferred_actions(skb, key, actions, len);
1632  	if (da) {
1633  		if (!actions) { /* Recirc action */
1634  			key = &da->pkt_key;
1635  			key->recirc_id = recirc_id;
1636  		}
1637  	} else {
1638  		/* Out of per CPU action FIFO space. Drop the 'skb' and
1639  		 * log an error.
1640  		 */
1641  		ovs_kfree_skb_reason(skb, OVS_DROP_DEFERRED_LIMIT);
1642  
1643  		if (net_ratelimit()) {
1644  			if (actions) { /* Sample action */
1645  				pr_warn("%s: deferred action limit reached, drop sample action\n",
1646  					ovs_dp_name(dp));
1647  			} else {  /* Recirc action */
1648  				pr_warn("%s: deferred action limit reached, drop recirc action (recirc_id=%#x)\n",
1649  					ovs_dp_name(dp), recirc_id);
1650  			}
1651  		}
1652  	}
1653  	return 0;
1654  }
1655  
process_deferred_actions(struct datapath * dp)1656  static void process_deferred_actions(struct datapath *dp)
1657  {
1658  	struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1659  
1660  	/* Do not touch the FIFO in case there is no deferred actions. */
1661  	if (action_fifo_is_empty(fifo))
1662  		return;
1663  
1664  	/* Finishing executing all deferred actions. */
1665  	do {
1666  		struct deferred_action *da = action_fifo_get(fifo);
1667  		struct sk_buff *skb = da->skb;
1668  		struct sw_flow_key *key = &da->pkt_key;
1669  		const struct nlattr *actions = da->actions;
1670  		int actions_len = da->actions_len;
1671  
1672  		if (actions)
1673  			do_execute_actions(dp, skb, key, actions, actions_len);
1674  		else
1675  			ovs_dp_process_packet(skb, key);
1676  	} while (!action_fifo_is_empty(fifo));
1677  
1678  	/* Reset FIFO for the next packet.  */
1679  	action_fifo_init(fifo);
1680  }
1681  
1682  /* Execute a list of actions against 'skb'. */
ovs_execute_actions(struct datapath * dp,struct sk_buff * skb,const struct sw_flow_actions * acts,struct sw_flow_key * key)1683  int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1684  			const struct sw_flow_actions *acts,
1685  			struct sw_flow_key *key)
1686  {
1687  	int err, level;
1688  
1689  	level = __this_cpu_inc_return(exec_actions_level);
1690  	if (unlikely(level > OVS_RECURSION_LIMIT)) {
1691  		net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1692  				     ovs_dp_name(dp));
1693  		ovs_kfree_skb_reason(skb, OVS_DROP_RECURSION_LIMIT);
1694  		err = -ENETDOWN;
1695  		goto out;
1696  	}
1697  
1698  	OVS_CB(skb)->acts_origlen = acts->orig_len;
1699  	err = do_execute_actions(dp, skb, key,
1700  				 acts->actions, acts->actions_len);
1701  
1702  	if (level == 1)
1703  		process_deferred_actions(dp);
1704  
1705  out:
1706  	__this_cpu_dec(exec_actions_level);
1707  	return err;
1708  }
1709  
action_fifos_init(void)1710  int action_fifos_init(void)
1711  {
1712  	action_fifos = alloc_percpu(struct action_fifo);
1713  	if (!action_fifos)
1714  		return -ENOMEM;
1715  
1716  	flow_keys = alloc_percpu(struct action_flow_keys);
1717  	if (!flow_keys) {
1718  		free_percpu(action_fifos);
1719  		return -ENOMEM;
1720  	}
1721  
1722  	return 0;
1723  }
1724  
action_fifos_exit(void)1725  void action_fifos_exit(void)
1726  {
1727  	free_percpu(action_fifos);
1728  	free_percpu(flow_keys);
1729  }
1730