1  // SPDX-License-Identifier: GPL-2.0-or-later
2  #include <net/gro.h>
3  #include <net/dst_metadata.h>
4  #include <net/busy_poll.h>
5  #include <trace/events/net.h>
6  #include <linux/skbuff_ref.h>
7  
8  #define MAX_GRO_SKBS 8
9  
10  /* This should be increased if a protocol with a bigger head is added. */
11  #define GRO_MAX_HEAD (MAX_HEADER + 128)
12  
13  static DEFINE_SPINLOCK(offload_lock);
14  
15  /**
16   *	dev_add_offload - register offload handlers
17   *	@po: protocol offload declaration
18   *
19   *	Add protocol offload handlers to the networking stack. The passed
20   *	&proto_offload is linked into kernel lists and may not be freed until
21   *	it has been removed from the kernel lists.
22   *
23   *	This call does not sleep therefore it can not
24   *	guarantee all CPU's that are in middle of receiving packets
25   *	will see the new offload handlers (until the next received packet).
26   */
dev_add_offload(struct packet_offload * po)27  void dev_add_offload(struct packet_offload *po)
28  {
29  	struct packet_offload *elem;
30  
31  	spin_lock(&offload_lock);
32  	list_for_each_entry(elem, &net_hotdata.offload_base, list) {
33  		if (po->priority < elem->priority)
34  			break;
35  	}
36  	list_add_rcu(&po->list, elem->list.prev);
37  	spin_unlock(&offload_lock);
38  }
39  EXPORT_SYMBOL(dev_add_offload);
40  
41  /**
42   *	__dev_remove_offload	 - remove offload handler
43   *	@po: packet offload declaration
44   *
45   *	Remove a protocol offload handler that was previously added to the
46   *	kernel offload handlers by dev_add_offload(). The passed &offload_type
47   *	is removed from the kernel lists and can be freed or reused once this
48   *	function returns.
49   *
50   *      The packet type might still be in use by receivers
51   *	and must not be freed until after all the CPU's have gone
52   *	through a quiescent state.
53   */
__dev_remove_offload(struct packet_offload * po)54  static void __dev_remove_offload(struct packet_offload *po)
55  {
56  	struct list_head *head = &net_hotdata.offload_base;
57  	struct packet_offload *po1;
58  
59  	spin_lock(&offload_lock);
60  
61  	list_for_each_entry(po1, head, list) {
62  		if (po == po1) {
63  			list_del_rcu(&po->list);
64  			goto out;
65  		}
66  	}
67  
68  	pr_warn("dev_remove_offload: %p not found\n", po);
69  out:
70  	spin_unlock(&offload_lock);
71  }
72  
73  /**
74   *	dev_remove_offload	 - remove packet offload handler
75   *	@po: packet offload declaration
76   *
77   *	Remove a packet offload handler that was previously added to the kernel
78   *	offload handlers by dev_add_offload(). The passed &offload_type is
79   *	removed from the kernel lists and can be freed or reused once this
80   *	function returns.
81   *
82   *	This call sleeps to guarantee that no CPU is looking at the packet
83   *	type after return.
84   */
dev_remove_offload(struct packet_offload * po)85  void dev_remove_offload(struct packet_offload *po)
86  {
87  	__dev_remove_offload(po);
88  
89  	synchronize_net();
90  }
91  EXPORT_SYMBOL(dev_remove_offload);
92  
93  
skb_gro_receive(struct sk_buff * p,struct sk_buff * skb)94  int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
95  {
96  	struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
97  	unsigned int offset = skb_gro_offset(skb);
98  	unsigned int headlen = skb_headlen(skb);
99  	unsigned int len = skb_gro_len(skb);
100  	unsigned int delta_truesize;
101  	unsigned int new_truesize;
102  	struct sk_buff *lp;
103  	int segs;
104  
105  	/* Do not splice page pool based packets w/ non-page pool
106  	 * packets. This can result in reference count issues as page
107  	 * pool pages will not decrement the reference count and will
108  	 * instead be immediately returned to the pool or have frag
109  	 * count decremented.
110  	 */
111  	if (p->pp_recycle != skb->pp_recycle)
112  		return -ETOOMANYREFS;
113  
114  	if (unlikely(p->len + len >= netif_get_gro_max_size(p->dev, p) ||
115  		     NAPI_GRO_CB(skb)->flush))
116  		return -E2BIG;
117  
118  	if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
119  		if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
120  		    (p->protocol == htons(ETH_P_IPV6) &&
121  		     skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
122  		    p->encapsulation)
123  			return -E2BIG;
124  	}
125  
126  	segs = NAPI_GRO_CB(skb)->count;
127  	lp = NAPI_GRO_CB(p)->last;
128  	pinfo = skb_shinfo(lp);
129  
130  	if (headlen <= offset) {
131  		skb_frag_t *frag;
132  		skb_frag_t *frag2;
133  		int i = skbinfo->nr_frags;
134  		int nr_frags = pinfo->nr_frags + i;
135  
136  		if (nr_frags > MAX_SKB_FRAGS)
137  			goto merge;
138  
139  		offset -= headlen;
140  		pinfo->nr_frags = nr_frags;
141  		skbinfo->nr_frags = 0;
142  
143  		frag = pinfo->frags + nr_frags;
144  		frag2 = skbinfo->frags + i;
145  		do {
146  			*--frag = *--frag2;
147  		} while (--i);
148  
149  		skb_frag_off_add(frag, offset);
150  		skb_frag_size_sub(frag, offset);
151  
152  		/* all fragments truesize : remove (head size + sk_buff) */
153  		new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
154  		delta_truesize = skb->truesize - new_truesize;
155  
156  		skb->truesize = new_truesize;
157  		skb->len -= skb->data_len;
158  		skb->data_len = 0;
159  
160  		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
161  		goto done;
162  	} else if (skb->head_frag) {
163  		int nr_frags = pinfo->nr_frags;
164  		skb_frag_t *frag = pinfo->frags + nr_frags;
165  		struct page *page = virt_to_head_page(skb->head);
166  		unsigned int first_size = headlen - offset;
167  		unsigned int first_offset;
168  
169  		if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
170  			goto merge;
171  
172  		first_offset = skb->data -
173  			       (unsigned char *)page_address(page) +
174  			       offset;
175  
176  		pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
177  
178  		skb_frag_fill_page_desc(frag, page, first_offset, first_size);
179  
180  		memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
181  		/* We dont need to clear skbinfo->nr_frags here */
182  
183  		new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
184  		delta_truesize = skb->truesize - new_truesize;
185  		skb->truesize = new_truesize;
186  		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
187  		goto done;
188  	}
189  
190  merge:
191  	/* sk ownership - if any - completely transferred to the aggregated packet */
192  	skb->destructor = NULL;
193  	skb->sk = NULL;
194  	delta_truesize = skb->truesize;
195  	if (offset > headlen) {
196  		unsigned int eat = offset - headlen;
197  
198  		skb_frag_off_add(&skbinfo->frags[0], eat);
199  		skb_frag_size_sub(&skbinfo->frags[0], eat);
200  		skb->data_len -= eat;
201  		skb->len -= eat;
202  		offset = headlen;
203  	}
204  
205  	__skb_pull(skb, offset);
206  
207  	if (NAPI_GRO_CB(p)->last == p)
208  		skb_shinfo(p)->frag_list = skb;
209  	else
210  		NAPI_GRO_CB(p)->last->next = skb;
211  	NAPI_GRO_CB(p)->last = skb;
212  	__skb_header_release(skb);
213  	lp = p;
214  
215  done:
216  	NAPI_GRO_CB(p)->count += segs;
217  	p->data_len += len;
218  	p->truesize += delta_truesize;
219  	p->len += len;
220  	if (lp != p) {
221  		lp->data_len += len;
222  		lp->truesize += delta_truesize;
223  		lp->len += len;
224  	}
225  	NAPI_GRO_CB(skb)->same_flow = 1;
226  	return 0;
227  }
228  
skb_gro_receive_list(struct sk_buff * p,struct sk_buff * skb)229  int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
230  {
231  	if (unlikely(p->len + skb->len >= 65536))
232  		return -E2BIG;
233  
234  	if (NAPI_GRO_CB(p)->last == p)
235  		skb_shinfo(p)->frag_list = skb;
236  	else
237  		NAPI_GRO_CB(p)->last->next = skb;
238  
239  	skb_pull(skb, skb_gro_offset(skb));
240  
241  	NAPI_GRO_CB(p)->last = skb;
242  	NAPI_GRO_CB(p)->count++;
243  	p->data_len += skb->len;
244  
245  	/* sk ownership - if any - completely transferred to the aggregated packet */
246  	skb->destructor = NULL;
247  	skb->sk = NULL;
248  	p->truesize += skb->truesize;
249  	p->len += skb->len;
250  
251  	NAPI_GRO_CB(skb)->same_flow = 1;
252  
253  	return 0;
254  }
255  
256  
napi_gro_complete(struct napi_struct * napi,struct sk_buff * skb)257  static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
258  {
259  	struct list_head *head = &net_hotdata.offload_base;
260  	struct packet_offload *ptype;
261  	__be16 type = skb->protocol;
262  	int err = -ENOENT;
263  
264  	BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
265  
266  	if (NAPI_GRO_CB(skb)->count == 1) {
267  		skb_shinfo(skb)->gso_size = 0;
268  		goto out;
269  	}
270  
271  	rcu_read_lock();
272  	list_for_each_entry_rcu(ptype, head, list) {
273  		if (ptype->type != type || !ptype->callbacks.gro_complete)
274  			continue;
275  
276  		err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
277  					 ipv6_gro_complete, inet_gro_complete,
278  					 skb, 0);
279  		break;
280  	}
281  	rcu_read_unlock();
282  
283  	if (err) {
284  		WARN_ON(&ptype->list == head);
285  		kfree_skb(skb);
286  		return;
287  	}
288  
289  out:
290  	gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
291  }
292  
__napi_gro_flush_chain(struct napi_struct * napi,u32 index,bool flush_old)293  static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
294  				   bool flush_old)
295  {
296  	struct list_head *head = &napi->gro_hash[index].list;
297  	struct sk_buff *skb, *p;
298  
299  	list_for_each_entry_safe_reverse(skb, p, head, list) {
300  		if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
301  			return;
302  		skb_list_del_init(skb);
303  		napi_gro_complete(napi, skb);
304  		napi->gro_hash[index].count--;
305  	}
306  
307  	if (!napi->gro_hash[index].count)
308  		__clear_bit(index, &napi->gro_bitmask);
309  }
310  
311  /* napi->gro_hash[].list contains packets ordered by age.
312   * youngest packets at the head of it.
313   * Complete skbs in reverse order to reduce latencies.
314   */
napi_gro_flush(struct napi_struct * napi,bool flush_old)315  void napi_gro_flush(struct napi_struct *napi, bool flush_old)
316  {
317  	unsigned long bitmask = napi->gro_bitmask;
318  	unsigned int i, base = ~0U;
319  
320  	while ((i = ffs(bitmask)) != 0) {
321  		bitmask >>= i;
322  		base += i;
323  		__napi_gro_flush_chain(napi, base, flush_old);
324  	}
325  }
326  EXPORT_SYMBOL(napi_gro_flush);
327  
gro_list_prepare_tc_ext(const struct sk_buff * skb,const struct sk_buff * p,unsigned long diffs)328  static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
329  					     const struct sk_buff *p,
330  					     unsigned long diffs)
331  {
332  #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
333  	struct tc_skb_ext *skb_ext;
334  	struct tc_skb_ext *p_ext;
335  
336  	skb_ext = skb_ext_find(skb, TC_SKB_EXT);
337  	p_ext = skb_ext_find(p, TC_SKB_EXT);
338  
339  	diffs |= (!!p_ext) ^ (!!skb_ext);
340  	if (!diffs && unlikely(skb_ext))
341  		diffs |= p_ext->chain ^ skb_ext->chain;
342  #endif
343  	return diffs;
344  }
345  
gro_list_prepare(const struct list_head * head,const struct sk_buff * skb)346  static void gro_list_prepare(const struct list_head *head,
347  			     const struct sk_buff *skb)
348  {
349  	unsigned int maclen = skb->dev->hard_header_len;
350  	u32 hash = skb_get_hash_raw(skb);
351  	struct sk_buff *p;
352  
353  	list_for_each_entry(p, head, list) {
354  		unsigned long diffs;
355  
356  		if (hash != skb_get_hash_raw(p)) {
357  			NAPI_GRO_CB(p)->same_flow = 0;
358  			continue;
359  		}
360  
361  		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
362  		diffs |= p->vlan_all ^ skb->vlan_all;
363  		diffs |= skb_metadata_differs(p, skb);
364  		if (maclen == ETH_HLEN)
365  			diffs |= compare_ether_header(skb_mac_header(p),
366  						      skb_mac_header(skb));
367  		else if (!diffs)
368  			diffs = memcmp(skb_mac_header(p),
369  				       skb_mac_header(skb),
370  				       maclen);
371  
372  		/* in most common scenarios 'slow_gro' is 0
373  		 * otherwise we are already on some slower paths
374  		 * either skip all the infrequent tests altogether or
375  		 * avoid trying too hard to skip each of them individually
376  		 */
377  		if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
378  			diffs |= p->sk != skb->sk;
379  			diffs |= skb_metadata_dst_cmp(p, skb);
380  			diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
381  
382  			diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
383  		}
384  
385  		NAPI_GRO_CB(p)->same_flow = !diffs;
386  	}
387  }
388  
skb_gro_reset_offset(struct sk_buff * skb,u32 nhoff)389  static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
390  {
391  	const struct skb_shared_info *pinfo;
392  	const skb_frag_t *frag0;
393  	unsigned int headlen;
394  
395  	NAPI_GRO_CB(skb)->network_offset = 0;
396  	NAPI_GRO_CB(skb)->data_offset = 0;
397  	headlen = skb_headlen(skb);
398  	NAPI_GRO_CB(skb)->frag0 = skb->data;
399  	NAPI_GRO_CB(skb)->frag0_len = headlen;
400  	if (headlen)
401  		return;
402  
403  	pinfo = skb_shinfo(skb);
404  	frag0 = &pinfo->frags[0];
405  
406  	if (pinfo->nr_frags && skb_frag_page(frag0) &&
407  	    !PageHighMem(skb_frag_page(frag0)) &&
408  	    (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
409  		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
410  		NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
411  						    skb_frag_size(frag0),
412  						    skb->end - skb->tail);
413  	}
414  }
415  
gro_pull_from_frag0(struct sk_buff * skb,int grow)416  static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
417  {
418  	struct skb_shared_info *pinfo = skb_shinfo(skb);
419  
420  	BUG_ON(skb->end - skb->tail < grow);
421  
422  	memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
423  
424  	skb->data_len -= grow;
425  	skb->tail += grow;
426  
427  	skb_frag_off_add(&pinfo->frags[0], grow);
428  	skb_frag_size_sub(&pinfo->frags[0], grow);
429  
430  	if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
431  		skb_frag_unref(skb, 0);
432  		memmove(pinfo->frags, pinfo->frags + 1,
433  			--pinfo->nr_frags * sizeof(pinfo->frags[0]));
434  	}
435  }
436  
gro_try_pull_from_frag0(struct sk_buff * skb)437  static void gro_try_pull_from_frag0(struct sk_buff *skb)
438  {
439  	int grow = skb_gro_offset(skb) - skb_headlen(skb);
440  
441  	if (grow > 0)
442  		gro_pull_from_frag0(skb, grow);
443  }
444  
gro_flush_oldest(struct napi_struct * napi,struct list_head * head)445  static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
446  {
447  	struct sk_buff *oldest;
448  
449  	oldest = list_last_entry(head, struct sk_buff, list);
450  
451  	/* We are called with head length >= MAX_GRO_SKBS, so this is
452  	 * impossible.
453  	 */
454  	if (WARN_ON_ONCE(!oldest))
455  		return;
456  
457  	/* Do not adjust napi->gro_hash[].count, caller is adding a new
458  	 * SKB to the chain.
459  	 */
460  	skb_list_del_init(oldest);
461  	napi_gro_complete(napi, oldest);
462  }
463  
dev_gro_receive(struct napi_struct * napi,struct sk_buff * skb)464  static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
465  {
466  	u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
467  	struct gro_list *gro_list = &napi->gro_hash[bucket];
468  	struct list_head *head = &net_hotdata.offload_base;
469  	struct packet_offload *ptype;
470  	__be16 type = skb->protocol;
471  	struct sk_buff *pp = NULL;
472  	enum gro_result ret;
473  	int same_flow;
474  
475  	if (netif_elide_gro(skb->dev))
476  		goto normal;
477  
478  	gro_list_prepare(&gro_list->list, skb);
479  
480  	rcu_read_lock();
481  	list_for_each_entry_rcu(ptype, head, list) {
482  		if (ptype->type == type && ptype->callbacks.gro_receive)
483  			goto found_ptype;
484  	}
485  	rcu_read_unlock();
486  	goto normal;
487  
488  found_ptype:
489  	skb_set_network_header(skb, skb_gro_offset(skb));
490  	skb_reset_mac_len(skb);
491  	BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
492  	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
493  					sizeof(u32))); /* Avoid slow unaligned acc */
494  	*(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
495  	NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
496  	NAPI_GRO_CB(skb)->count = 1;
497  	if (unlikely(skb_is_gso(skb))) {
498  		NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
499  		/* Only support TCP and non DODGY users. */
500  		if (!skb_is_gso_tcp(skb) ||
501  		    (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
502  			NAPI_GRO_CB(skb)->flush = 1;
503  	}
504  
505  	/* Setup for GRO checksum validation */
506  	switch (skb->ip_summed) {
507  	case CHECKSUM_COMPLETE:
508  		NAPI_GRO_CB(skb)->csum = skb->csum;
509  		NAPI_GRO_CB(skb)->csum_valid = 1;
510  		break;
511  	case CHECKSUM_UNNECESSARY:
512  		NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
513  		break;
514  	}
515  
516  	pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
517  				ipv6_gro_receive, inet_gro_receive,
518  				&gro_list->list, skb);
519  
520  	rcu_read_unlock();
521  
522  	if (PTR_ERR(pp) == -EINPROGRESS) {
523  		ret = GRO_CONSUMED;
524  		goto ok;
525  	}
526  
527  	same_flow = NAPI_GRO_CB(skb)->same_flow;
528  	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
529  
530  	if (pp) {
531  		skb_list_del_init(pp);
532  		napi_gro_complete(napi, pp);
533  		gro_list->count--;
534  	}
535  
536  	if (same_flow)
537  		goto ok;
538  
539  	if (NAPI_GRO_CB(skb)->flush)
540  		goto normal;
541  
542  	if (unlikely(gro_list->count >= MAX_GRO_SKBS))
543  		gro_flush_oldest(napi, &gro_list->list);
544  	else
545  		gro_list->count++;
546  
547  	/* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
548  	gro_try_pull_from_frag0(skb);
549  	NAPI_GRO_CB(skb)->age = jiffies;
550  	NAPI_GRO_CB(skb)->last = skb;
551  	if (!skb_is_gso(skb))
552  		skb_shinfo(skb)->gso_size = skb_gro_len(skb);
553  	list_add(&skb->list, &gro_list->list);
554  	ret = GRO_HELD;
555  ok:
556  	if (gro_list->count) {
557  		if (!test_bit(bucket, &napi->gro_bitmask))
558  			__set_bit(bucket, &napi->gro_bitmask);
559  	} else if (test_bit(bucket, &napi->gro_bitmask)) {
560  		__clear_bit(bucket, &napi->gro_bitmask);
561  	}
562  
563  	return ret;
564  
565  normal:
566  	ret = GRO_NORMAL;
567  	gro_try_pull_from_frag0(skb);
568  	goto ok;
569  }
570  
gro_find_receive_by_type(__be16 type)571  struct packet_offload *gro_find_receive_by_type(__be16 type)
572  {
573  	struct list_head *offload_head = &net_hotdata.offload_base;
574  	struct packet_offload *ptype;
575  
576  	list_for_each_entry_rcu(ptype, offload_head, list) {
577  		if (ptype->type != type || !ptype->callbacks.gro_receive)
578  			continue;
579  		return ptype;
580  	}
581  	return NULL;
582  }
583  EXPORT_SYMBOL(gro_find_receive_by_type);
584  
gro_find_complete_by_type(__be16 type)585  struct packet_offload *gro_find_complete_by_type(__be16 type)
586  {
587  	struct list_head *offload_head = &net_hotdata.offload_base;
588  	struct packet_offload *ptype;
589  
590  	list_for_each_entry_rcu(ptype, offload_head, list) {
591  		if (ptype->type != type || !ptype->callbacks.gro_complete)
592  			continue;
593  		return ptype;
594  	}
595  	return NULL;
596  }
597  EXPORT_SYMBOL(gro_find_complete_by_type);
598  
napi_skb_finish(struct napi_struct * napi,struct sk_buff * skb,gro_result_t ret)599  static gro_result_t napi_skb_finish(struct napi_struct *napi,
600  				    struct sk_buff *skb,
601  				    gro_result_t ret)
602  {
603  	switch (ret) {
604  	case GRO_NORMAL:
605  		gro_normal_one(napi, skb, 1);
606  		break;
607  
608  	case GRO_MERGED_FREE:
609  		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
610  			napi_skb_free_stolen_head(skb);
611  		else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
612  			__kfree_skb(skb);
613  		else
614  			__napi_kfree_skb(skb, SKB_CONSUMED);
615  		break;
616  
617  	case GRO_HELD:
618  	case GRO_MERGED:
619  	case GRO_CONSUMED:
620  		break;
621  	}
622  
623  	return ret;
624  }
625  
napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)626  gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
627  {
628  	gro_result_t ret;
629  
630  	skb_mark_napi_id(skb, napi);
631  	trace_napi_gro_receive_entry(skb);
632  
633  	skb_gro_reset_offset(skb, 0);
634  
635  	ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
636  	trace_napi_gro_receive_exit(ret);
637  
638  	return ret;
639  }
640  EXPORT_SYMBOL(napi_gro_receive);
641  
napi_reuse_skb(struct napi_struct * napi,struct sk_buff * skb)642  static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
643  {
644  	if (unlikely(skb->pfmemalloc)) {
645  		consume_skb(skb);
646  		return;
647  	}
648  	__skb_pull(skb, skb_headlen(skb));
649  	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
650  	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
651  	__vlan_hwaccel_clear_tag(skb);
652  	skb->dev = napi->dev;
653  	skb->skb_iif = 0;
654  
655  	/* eth_type_trans() assumes pkt_type is PACKET_HOST */
656  	skb->pkt_type = PACKET_HOST;
657  
658  	skb->encapsulation = 0;
659  	skb_shinfo(skb)->gso_type = 0;
660  	skb_shinfo(skb)->gso_size = 0;
661  	if (unlikely(skb->slow_gro)) {
662  		skb_orphan(skb);
663  		skb_ext_reset(skb);
664  		nf_reset_ct(skb);
665  		skb->slow_gro = 0;
666  	}
667  
668  	napi->skb = skb;
669  }
670  
napi_get_frags(struct napi_struct * napi)671  struct sk_buff *napi_get_frags(struct napi_struct *napi)
672  {
673  	struct sk_buff *skb = napi->skb;
674  
675  	if (!skb) {
676  		skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
677  		if (skb) {
678  			napi->skb = skb;
679  			skb_mark_napi_id(skb, napi);
680  		}
681  	}
682  	return skb;
683  }
684  EXPORT_SYMBOL(napi_get_frags);
685  
napi_frags_finish(struct napi_struct * napi,struct sk_buff * skb,gro_result_t ret)686  static gro_result_t napi_frags_finish(struct napi_struct *napi,
687  				      struct sk_buff *skb,
688  				      gro_result_t ret)
689  {
690  	switch (ret) {
691  	case GRO_NORMAL:
692  	case GRO_HELD:
693  		__skb_push(skb, ETH_HLEN);
694  		skb->protocol = eth_type_trans(skb, skb->dev);
695  		if (ret == GRO_NORMAL)
696  			gro_normal_one(napi, skb, 1);
697  		break;
698  
699  	case GRO_MERGED_FREE:
700  		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
701  			napi_skb_free_stolen_head(skb);
702  		else
703  			napi_reuse_skb(napi, skb);
704  		break;
705  
706  	case GRO_MERGED:
707  	case GRO_CONSUMED:
708  		break;
709  	}
710  
711  	return ret;
712  }
713  
714  /* Upper GRO stack assumes network header starts at gro_offset=0
715   * Drivers could call both napi_gro_frags() and napi_gro_receive()
716   * We copy ethernet header into skb->data to have a common layout.
717   */
napi_frags_skb(struct napi_struct * napi)718  static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
719  {
720  	struct sk_buff *skb = napi->skb;
721  	const struct ethhdr *eth;
722  	unsigned int hlen = sizeof(*eth);
723  
724  	napi->skb = NULL;
725  
726  	skb_reset_mac_header(skb);
727  	skb_gro_reset_offset(skb, hlen);
728  
729  	if (unlikely(!skb_gro_may_pull(skb, hlen))) {
730  		eth = skb_gro_header_slow(skb, hlen, 0);
731  		if (unlikely(!eth)) {
732  			net_warn_ratelimited("%s: dropping impossible skb from %s\n",
733  					     __func__, napi->dev->name);
734  			napi_reuse_skb(napi, skb);
735  			return NULL;
736  		}
737  	} else {
738  		eth = (const struct ethhdr *)skb->data;
739  
740  		if (NAPI_GRO_CB(skb)->frag0 != skb->data)
741  			gro_pull_from_frag0(skb, hlen);
742  
743  		NAPI_GRO_CB(skb)->frag0 += hlen;
744  		NAPI_GRO_CB(skb)->frag0_len -= hlen;
745  	}
746  	__skb_pull(skb, hlen);
747  
748  	/*
749  	 * This works because the only protocols we care about don't require
750  	 * special handling.
751  	 * We'll fix it up properly in napi_frags_finish()
752  	 */
753  	skb->protocol = eth->h_proto;
754  
755  	return skb;
756  }
757  
napi_gro_frags(struct napi_struct * napi)758  gro_result_t napi_gro_frags(struct napi_struct *napi)
759  {
760  	gro_result_t ret;
761  	struct sk_buff *skb = napi_frags_skb(napi);
762  
763  	trace_napi_gro_frags_entry(skb);
764  
765  	ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
766  	trace_napi_gro_frags_exit(ret);
767  
768  	return ret;
769  }
770  EXPORT_SYMBOL(napi_gro_frags);
771  
772  /* Compute the checksum from gro_offset and return the folded value
773   * after adding in any pseudo checksum.
774   */
__skb_gro_checksum_complete(struct sk_buff * skb)775  __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
776  {
777  	__wsum wsum;
778  	__sum16 sum;
779  
780  	wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
781  
782  	/* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
783  	sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
784  	/* See comments in __skb_checksum_complete(). */
785  	if (likely(!sum)) {
786  		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
787  		    !skb->csum_complete_sw)
788  			netdev_rx_csum_fault(skb->dev, skb);
789  	}
790  
791  	NAPI_GRO_CB(skb)->csum = wsum;
792  	NAPI_GRO_CB(skb)->csum_valid = 1;
793  
794  	return sum;
795  }
796  EXPORT_SYMBOL(__skb_gro_checksum_complete);
797