Lines Matching +full:mss +full:- +full:supply

1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * Florian La Roche <rzsfl@rz.uni-sb.de>
19 * Ray VanTassle : Fixed --skb->lock in free
150 * drop_reasons_register_subsys - register another drop reason subsystem
169 * drop_reasons_unregister_subsys - unregister a drop reason subsystem
188 * skb_panic - private function for out-of-line support
194 * Out-of-line support for skb_put() and skb_push().
203 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
204 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
205 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
231 * page - to avoid excessive truesize underestimation
245 offset = nc->offset - SZ_1K; in page_frag_alloc_1k()
253 nc->va = page_address(page); in page_frag_alloc_1k()
254 nc->pfmemalloc = page_is_pfmemalloc(page); in page_frag_alloc_1k()
255 offset = PAGE_SIZE - SZ_1K; in page_frag_alloc_1k()
259 nc->offset = offset; in page_frag_alloc_1k()
260 return nc->va + offset; in page_frag_alloc_1k()
294 * skb->head being backed by slab, not a page fragment.
296 * ("net: avoid 32 x truesize under-estimation for tiny skbs")
305 WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag); in napi_get_frags_check()
318 data = __page_frag_alloc_align(&nc->page, fragsz, in __napi_alloc_frag_align()
352 if (unlikely(!nc->skb_count)) { in napi_skb_cache_get()
353 nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, in napi_skb_cache_get()
356 nc->skb_cache); in napi_skb_cache_get()
357 if (unlikely(!nc->skb_count)) { in napi_skb_cache_get()
363 skb = nc->skb_cache[--nc->skb_count]; in napi_skb_cache_get()
375 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in __finalize_skb_around()
378 skb->truesize = SKB_TRUESIZE(size); in __finalize_skb_around()
379 refcount_set(&skb->users, 1); in __finalize_skb_around()
380 skb->head = data; in __finalize_skb_around()
381 skb->data = data; in __finalize_skb_around()
384 skb->mac_header = (typeof(skb->mac_header))~0U; in __finalize_skb_around()
385 skb->transport_header = (typeof(skb->transport_header))~0U; in __finalize_skb_around()
386 skb->alloc_cpu = raw_smp_processor_id(); in __finalize_skb_around()
390 atomic_set(&shinfo->dataref, 1); in __finalize_skb_around()
452 * __build_skb - build a network buffer
487 * takes care of skb->head and skb->pfmemalloc
494 skb->head_frag = 1; in build_skb()
502 * build_skb_around - build a network buffer around provided skb
516 skb->head_frag = 1; in build_skb_around()
524 * __napi_build_skb - build a network buffer
548 * napi_build_skb - build a network buffer
552 * Version of __napi_build_skb() that takes care of skb->head_frag
553 * and skb->pfmemalloc when the data is a page or page fragment.
562 skb->head_frag = 1; in napi_build_skb()
600 /* The following cast might truncate high-order bits of obj_size, this in kmalloc_reserve()
633 * __alloc_skb - allocate a network buffer
676 * Both skb->head and skb_shared_info are cache line aligned. in __alloc_skb()
694 skb->pfmemalloc = pfmemalloc; in __alloc_skb()
701 skb->fclone = SKB_FCLONE_ORIG; in __alloc_skb()
702 refcount_set(&fclones->fclone_ref, 1); in __alloc_skb()
714 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
737 * we use kmalloc() for skb->head allocation. in __netdev_alloc_skb()
756 pfmemalloc = nc->pfmemalloc; in __netdev_alloc_skb()
763 pfmemalloc = nc->pfmemalloc; in __netdev_alloc_skb()
779 skb->pfmemalloc = 1; in __netdev_alloc_skb()
780 skb->head_frag = 1; in __netdev_alloc_skb()
784 skb->dev = dev; in __netdev_alloc_skb()
792 * napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
799 * CPU cycles by avoiding having to disable and re-enable IRQs.
815 * we use kmalloc() for skb->head allocation. in napi_alloc_skb()
837 * - 'len' less than GRO_MAX_HEAD makes little sense in napi_alloc_skb()
838 * - On most systems, larger 'len' values lead to fragment in napi_alloc_skb()
840 * - kmalloc would use the kmalloc-1k slab for such values in napi_alloc_skb()
841 * - Builds with smaller GRO_MAX_HEAD will very likely do in napi_alloc_skb()
847 data = page_frag_alloc_1k(&nc->page_small, gfp_mask); in napi_alloc_skb()
848 pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small); in napi_alloc_skb()
852 data = page_frag_alloc(&nc->page, len, gfp_mask); in napi_alloc_skb()
853 pfmemalloc = nc->page.pfmemalloc; in napi_alloc_skb()
867 skb->pfmemalloc = 1; in napi_alloc_skb()
868 skb->head_frag = 1; in napi_alloc_skb()
872 skb->dev = napi->dev; in napi_alloc_skb()
885 skb->len += size; in skb_add_rx_frag_netmem()
886 skb->data_len += size; in skb_add_rx_frag_netmem()
887 skb->truesize += truesize; in skb_add_rx_frag_netmem()
894 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag()
899 skb->len += size; in skb_coalesce_rx_frag()
900 skb->data_len += size; in skb_coalesce_rx_frag()
901 skb->truesize += truesize; in skb_coalesce_rx_frag()
913 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
942 return -EOPNOTSUPP; in skb_pp_cow_data()
944 max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom); in skb_pp_cow_data()
945 if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) in skb_pp_cow_data()
946 return -ENOMEM; in skb_pp_cow_data()
948 size = min_t(u32, skb->len, max_head_size); in skb_pp_cow_data()
952 return -ENOMEM; in skb_pp_cow_data()
957 return -ENOMEM; in skb_pp_cow_data()
964 err = skb_copy_bits(skb, 0, nskb->data, size); in skb_pp_cow_data()
971 head_off = skb_headroom(nskb) - skb_headroom(skb); in skb_pp_cow_data()
975 len = skb->len - off; in skb_pp_cow_data()
976 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { in skb_pp_cow_data()
986 return -ENOMEM; in skb_pp_cow_data()
997 len -= size; in skb_pp_cow_data()
1006 return -EOPNOTSUPP; in skb_pp_cow_data()
1014 if (!prog->aux->xdp_has_frags) in skb_cow_data_for_xdp()
1015 return -EINVAL; in skb_cow_data_for_xdp()
1026 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation in napi_pp_put_page()
1045 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) in skb_pp_recycle()
1051 * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
1056 * i.e. when skb->pp_recycle is true, and not for fragments in a
1057 * non-pp-recycling skb. It has a fallback to increase references on normal
1066 if (!skb->pp_recycle) in skb_pp_frag_ref()
1067 return -EINVAL; in skb_pp_frag_ref()
1071 for (i = 0; i < shinfo->nr_frags; i++) { in skb_pp_frag_ref()
1072 head_netmem = netmem_compound_head(shinfo->frags[i].netmem); in skb_pp_frag_ref()
1091 unsigned char *head = skb->head; in skb_free_head()
1093 if (skb->head_frag) { in skb_free_head()
1111 bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS; in skb_release_data()
1118 for (i = 0; i < shinfo->nr_frags; i++) in skb_release_data()
1119 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); in skb_release_data()
1122 if (shinfo->frag_list) in skb_release_data()
1123 kfree_skb_list_reason(shinfo->frag_list, reason); in skb_release_data()
1136 skb->pp_recycle = 0; in skb_release_data()
1146 switch (skb->fclone) { in kfree_skbmem()
1158 if (refcount_read(&fclones->fclone_ref) == 1) in kfree_skbmem()
1166 if (!refcount_dec_and_test(&fclones->fclone_ref)) in kfree_skbmem()
1175 if (skb->destructor) { in skb_release_head_state()
1177 skb->destructor(skb); in skb_release_head_state()
1189 if (likely(skb->head)) in skb_release_all()
1194 * __kfree_skb - private function
1229 * sk_skb_reason_drop - free an sk_buff with special reason
1258 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { in kfree_skb_add_bulk()
1264 sa->skb_array[sa->skb_count++] = skb; in kfree_skb_add_bulk()
1266 if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { in kfree_skb_add_bulk()
1268 sa->skb_array); in kfree_skb_add_bulk()
1269 sa->skb_count = 0; in kfree_skb_add_bulk()
1281 struct sk_buff *next = segs->next; in kfree_skb_list_reason()
1298 * Must only be called from net_ratelimit()-ed paths.
1305 struct net_device *dev = skb->dev; in skb_dump()
1306 struct sock *sk = skb->sk; in skb_dump()
1313 len = skb->len; in skb_dump()
1315 len = min_t(int, skb->len, MAX_HEADER + 128); in skb_dump()
1330 level, skb->len, headroom, skb_headlen(skb), tailroom, in skb_dump()
1331 has_mac ? skb->mac_header : -1, in skb_dump()
1332 has_mac ? skb_mac_header_len(skb) : -1, in skb_dump()
1333 skb->mac_len, in skb_dump()
1334 skb->network_header, in skb_dump()
1335 has_trans ? skb_network_header_len(skb) : -1, in skb_dump()
1336 has_trans ? skb->transport_header : -1, in skb_dump()
1337 sh->tx_flags, sh->nr_frags, in skb_dump()
1338 sh->gso_size, sh->gso_type, sh->gso_segs, in skb_dump()
1339 skb->csum, skb->csum_start, skb->csum_offset, skb->ip_summed, in skb_dump()
1340 skb->csum_complete_sw, skb->csum_valid, skb->csum_level, in skb_dump()
1341 skb->hash, skb->sw_hash, skb->l4_hash, in skb_dump()
1342 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif, in skb_dump()
1343 skb->priority, skb->mark, skb->alloc_cpu, skb->vlan_all, in skb_dump()
1344 skb->encapsulation, skb->inner_protocol, skb->inner_mac_header, in skb_dump()
1345 skb->inner_network_header, skb->inner_transport_header); in skb_dump()
1349 level, dev->name, &dev->features); in skb_dump()
1352 level, sk->sk_family, sk->sk_type, sk->sk_protocol); in skb_dump()
1356 16, 1, skb->head, headroom, false); in skb_dump()
1361 16, 1, skb->data, seg_len, false); in skb_dump()
1362 len -= seg_len; in skb_dump()
1368 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { in skb_dump()
1369 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_dump()
1376 len -= skb_frag_size(frag); in skb_dump()
1391 len -= seg_len; in skb_dump()
1406 * skb_tx_error - report an sk_buff xmit error
1423 * consume_skb - free an skbuff
1442 * __consume_stateless_skb - free an skbuff, assuming it is stateless
1464 nc->skb_cache[nc->skb_count++] = skb; in napi_skb_cache_put()
1466 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { in napi_skb_cache_put()
1468 kasan_mempool_unpoison_object(nc->skb_cache[i], in napi_skb_cache_put()
1472 nc->skb_cache + NAPI_SKB_CACHE_HALF); in napi_skb_cache_put()
1473 nc->skb_count = NAPI_SKB_CACHE_HALF; in napi_skb_cache_put()
1486 if (unlikely(skb->slow_gro)) { in napi_skb_free_stolen_head()
1491 skb->slow_gro = 0; in napi_skb_free_stolen_head()
1498 /* Zero budget indicate non-NAPI context called us, like netpoll */ in napi_consume_skb()
1513 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in napi_consume_skb()
1530 new->tstamp = old->tstamp; in __copy_skb_header()
1531 /* We do not copy old->sk */ in __copy_skb_header()
1532 new->dev = old->dev; in __copy_skb_header()
1533 memcpy(new->cb, old->cb, sizeof(old->cb)); in __copy_skb_header()
1541 new->queue_mapping = old->queue_mapping; in __copy_skb_header()
1543 memcpy(&new->headers, &old->headers, sizeof(new->headers)); in __copy_skb_header()
1581 #define C(x) n->x = skb->x in __skb_clone()
1583 n->next = n->prev = NULL; in __skb_clone()
1584 n->sk = NULL; in __skb_clone()
1590 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; in __skb_clone()
1591 n->cloned = 1; in __skb_clone()
1592 n->nohdr = 0; in __skb_clone()
1593 n->peeked = 0; in __skb_clone()
1596 n->destructor = NULL; in __skb_clone()
1603 refcount_set(&n->users, 1); in __skb_clone()
1605 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
1606 skb->cloned = 1; in __skb_clone()
1613 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
1624 n->len = first->len; in alloc_skb_for_msg()
1625 n->data_len = first->len; in alloc_skb_for_msg()
1626 n->truesize = first->truesize; in alloc_skb_for_msg()
1628 skb_shinfo(n)->frag_list = first; in alloc_skb_for_msg()
1631 n->destructor = NULL; in alloc_skb_for_msg()
1638 * skb_morph - morph one skb into another
1640 * @src: the skb to supply the contents
1668 user = mmp->user ? : current_user(); in mm_account_pinned_pages()
1670 old_pg = atomic_long_read(&user->locked_vm); in mm_account_pinned_pages()
1674 return -ENOBUFS; in mm_account_pinned_pages()
1675 } while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg)); in mm_account_pinned_pages()
1677 if (!mmp->user) { in mm_account_pinned_pages()
1678 mmp->user = get_uid(user); in mm_account_pinned_pages()
1679 mmp->num_pg = num_pg; in mm_account_pinned_pages()
1681 mmp->num_pg += num_pg; in mm_account_pinned_pages()
1690 if (mmp->user) { in mm_unaccount_pinned_pages()
1691 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); in mm_unaccount_pinned_pages()
1692 free_uid(mmp->user); in mm_unaccount_pinned_pages()
1708 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); in msg_zerocopy_alloc()
1709 uarg = (void *)skb->cb; in msg_zerocopy_alloc()
1710 uarg->mmp.user = NULL; in msg_zerocopy_alloc()
1712 if (mm_account_pinned_pages(&uarg->mmp, size)) { in msg_zerocopy_alloc()
1717 uarg->ubuf.ops = &msg_zerocopy_ubuf_ops; in msg_zerocopy_alloc()
1718 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; in msg_zerocopy_alloc()
1719 uarg->len = 1; in msg_zerocopy_alloc()
1720 uarg->bytelen = size; in msg_zerocopy_alloc()
1721 uarg->zerocopy = 1; in msg_zerocopy_alloc()
1722 uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN; in msg_zerocopy_alloc()
1723 refcount_set(&uarg->ubuf.refcnt, 1); in msg_zerocopy_alloc()
1726 return &uarg->ubuf; in msg_zerocopy_alloc()
1743 if (uarg->ops != &msg_zerocopy_ubuf_ops) in msg_zerocopy_realloc()
1747 * so uarg->len and sk_zckey access is serialized in msg_zerocopy_realloc()
1755 bytelen = uarg_zc->bytelen + size; in msg_zerocopy_realloc()
1756 if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) { in msg_zerocopy_realloc()
1758 if (sk->sk_type == SOCK_STREAM) in msg_zerocopy_realloc()
1763 next = (u32)atomic_read(&sk->sk_zckey); in msg_zerocopy_realloc()
1764 if ((u32)(uarg_zc->id + uarg_zc->len) == next) { in msg_zerocopy_realloc()
1765 if (mm_account_pinned_pages(&uarg_zc->mmp, size)) in msg_zerocopy_realloc()
1767 uarg_zc->len++; in msg_zerocopy_realloc()
1768 uarg_zc->bytelen = bytelen; in msg_zerocopy_realloc()
1769 atomic_set(&sk->sk_zckey, ++next); in msg_zerocopy_realloc()
1772 if (sk->sk_type == SOCK_STREAM) in msg_zerocopy_realloc()
1790 old_lo = serr->ee.ee_info; in skb_zerocopy_notify_extend()
1791 old_hi = serr->ee.ee_data; in skb_zerocopy_notify_extend()
1792 sum_len = old_hi - old_lo + 1ULL + len; in skb_zerocopy_notify_extend()
1800 serr->ee.ee_data += len; in skb_zerocopy_notify_extend()
1808 struct sock *sk = skb->sk; in __msg_zerocopy_callback()
1815 mm_unaccount_pinned_pages(&uarg->mmp); in __msg_zerocopy_callback()
1820 if (!uarg->len || sock_flag(sk, SOCK_DEAD)) in __msg_zerocopy_callback()
1823 len = uarg->len; in __msg_zerocopy_callback()
1824 lo = uarg->id; in __msg_zerocopy_callback()
1825 hi = uarg->id + len - 1; in __msg_zerocopy_callback()
1826 is_zerocopy = uarg->zerocopy; in __msg_zerocopy_callback()
1830 serr->ee.ee_errno = 0; in __msg_zerocopy_callback()
1831 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; in __msg_zerocopy_callback()
1832 serr->ee.ee_data = hi; in __msg_zerocopy_callback()
1833 serr->ee.ee_info = lo; in __msg_zerocopy_callback()
1835 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; in __msg_zerocopy_callback()
1837 q = &sk->sk_error_queue; in __msg_zerocopy_callback()
1838 spin_lock_irqsave(&q->lock, flags); in __msg_zerocopy_callback()
1840 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || in __msg_zerocopy_callback()
1845 spin_unlock_irqrestore(&q->lock, flags); in __msg_zerocopy_callback()
1859 uarg_zc->zerocopy = uarg_zc->zerocopy & success; in msg_zerocopy_complete()
1861 if (refcount_dec_and_test(&uarg->refcnt)) in msg_zerocopy_complete()
1867 struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk; in msg_zerocopy_put_abort()
1869 atomic_dec(&sk->sk_zckey); in msg_zerocopy_put_abort()
1870 uarg_to_msgzc(uarg)->len--; in msg_zerocopy_put_abort()
1886 int err, orig_len = skb->len; in skb_zerocopy_iter_stream()
1888 if (uarg->ops->link_skb) { in skb_zerocopy_iter_stream()
1889 err = uarg->ops->link_skb(skb, uarg); in skb_zerocopy_iter_stream()
1900 return -EEXIST; in skb_zerocopy_iter_stream()
1903 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_stream()
1904 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { in skb_zerocopy_iter_stream()
1905 struct sock *save_sk = skb->sk; in skb_zerocopy_iter_stream()
1908 iov_iter_revert(&msg->msg_iter, skb->len - orig_len); in skb_zerocopy_iter_stream()
1909 skb->sk = sk; in skb_zerocopy_iter_stream()
1911 skb->sk = save_sk; in skb_zerocopy_iter_stream()
1916 return skb->len - orig_len; in skb_zerocopy_iter_stream()
1924 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; in __skb_zcopy_downgrade_managed()
1925 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in __skb_zcopy_downgrade_managed()
1938 return -ENOMEM; in skb_zerocopy_clone()
1943 return -EIO; in skb_zerocopy_clone()
1951 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1967 int num_frags = skb_shinfo(skb)->nr_frags; in skb_copy_ubufs()
1973 return -EINVAL; in skb_copy_ubufs()
1976 return -EFAULT; in skb_copy_ubufs()
1989 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); in skb_copy_ubufs()
1998 return -ENOMEM; in skb_copy_ubufs()
2007 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs()
2022 copy = min_t(u32, psize - d_off, p_len - done); in skb_copy_ubufs()
2037 for (i = 0; i < new_frags - 1; i++) { in skb_copy_ubufs()
2041 __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0, in skb_copy_ubufs()
2043 skb_shinfo(skb)->nr_frags = new_frags; in skb_copy_ubufs()
2052 * skb_clone - duplicate an sk_buff
2075 if (skb->fclone == SKB_FCLONE_ORIG && in skb_clone()
2076 refcount_read(&fclones->fclone_ref) == 1) { in skb_clone()
2077 n = &fclones->skb2; in skb_clone()
2078 refcount_set(&fclones->fclone_ref, 2); in skb_clone()
2079 n->fclone = SKB_FCLONE_CLONE; in skb_clone()
2088 n->fclone = SKB_FCLONE_UNAVAILABLE; in skb_clone()
2098 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_headers_offset_update()
2099 skb->csum_start += off; in skb_headers_offset_update()
2100 /* {transport,network,mac}_header and tail are relative to skb->head */ in skb_headers_offset_update()
2101 skb->transport_header += off; in skb_headers_offset_update()
2102 skb->network_header += off; in skb_headers_offset_update()
2104 skb->mac_header += off; in skb_headers_offset_update()
2105 skb->inner_transport_header += off; in skb_headers_offset_update()
2106 skb->inner_network_header += off; in skb_headers_offset_update()
2107 skb->inner_mac_header += off; in skb_headers_offset_update()
2115 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; in skb_copy_header()
2116 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; in skb_copy_header()
2117 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; in skb_copy_header()
2129 * skb_copy - create private copy of an sk_buff
2138 * As by-product this function converts non-linear &sk_buff to linear
2154 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) in skb_copy()
2158 size = skb_end_offset(skb) + skb->data_len; in skb_copy()
2167 skb_put(n, skb->len); in skb_copy()
2169 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); in skb_copy()
2177 * __pskb_copy_fclone - create copy of an sk_buff with private head.
2208 skb_copy_from_linear_data(skb, n->data, n->len); in __pskb_copy_fclone()
2210 n->truesize += skb->data_len; in __pskb_copy_fclone()
2211 n->data_len = skb->data_len; in __pskb_copy_fclone()
2212 n->len = skb->len; in __pskb_copy_fclone()
2214 if (skb_shinfo(skb)->nr_frags) { in __pskb_copy_fclone()
2223 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_copy_fclone()
2224 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy_fclone()
2227 skb_shinfo(n)->nr_frags = i; in __pskb_copy_fclone()
2231 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; in __pskb_copy_fclone()
2242 * pskb_expand_head - reallocate header of &sk_buff
2283 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); in pskb_expand_head()
2287 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); in pskb_expand_head()
2298 refcount_inc(&skb_uarg(skb)->refcnt); in pskb_expand_head()
2299 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_expand_head()
2309 off = (data + nhead) - skb->head; in pskb_expand_head()
2311 skb->head = data; in pskb_expand_head()
2312 skb->head_frag = 0; in pskb_expand_head()
2313 skb->data += off; in pskb_expand_head()
2319 skb->tail += off; in pskb_expand_head()
2321 skb->cloned = 0; in pskb_expand_head()
2322 skb->hdr_len = 0; in pskb_expand_head()
2323 skb->nohdr = 0; in pskb_expand_head()
2324 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_expand_head()
2328 /* It is not generally safe to change skb->truesize. in pskb_expand_head()
2332 if (!skb->sk || skb->destructor == sock_edemux) in pskb_expand_head()
2333 skb->truesize += size - osize; in pskb_expand_head()
2340 return -ENOMEM; in pskb_expand_head()
2349 int delta = headroom - skb_headroom(skb); in skb_realloc_headroom()
2365 /* Note: We plan to rework this in linux-6.4 */
2373 saved_truesize = skb->truesize; in __skb_unclone_keeptruesize()
2379 skb->truesize = saved_truesize; in __skb_unclone_keeptruesize()
2384 /* We can not change skb->end if the original or new value in __skb_unclone_keeptruesize()
2392 pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n", in __skb_unclone_keeptruesize()
2400 /* We are about to change back skb->end, in __skb_unclone_keeptruesize()
2403 memmove(skb->head + saved_end_offset, in __skb_unclone_keeptruesize()
2405 offsetof(struct skb_shared_info, frags[shinfo->nr_frags])); in __skb_unclone_keeptruesize()
2413 * skb_expand_head - reallocate header of &sk_buff
2418 * if possible; copies skb->sk to new skb as needed
2426 int delta = headroom - skb_headroom(skb); in skb_expand_head()
2428 struct sock *sk = skb->sk; in skb_expand_head()
2451 delta = skb_end_offset(skb) - osize; in skb_expand_head()
2452 refcount_add(delta, &sk->sk_wmem_alloc); in skb_expand_head()
2453 skb->truesize += delta; in skb_expand_head()
2464 * skb_copy_expand - copy and expand sk_buff
2495 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) in skb_copy_expand()
2499 n = __alloc_skb(newheadroom + skb->len + newtailroom, in skb_copy_expand()
2508 skb_put(n, skb->len); in skb_copy_expand()
2515 head_copy_off = newheadroom - head_copy_len; in skb_copy_expand()
2518 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, in skb_copy_expand()
2519 skb->len + head_copy_len)); in skb_copy_expand()
2523 skb_headers_offset_update(n, newheadroom - oldheadroom); in skb_copy_expand()
2530 * __skb_pad - zero pad the tail of an skb
2550 memset(skb->data+skb->len, 0, pad); in __skb_pad()
2554 ntail = skb->data_len + pad - (skb->end - skb->tail); in __skb_pad()
2561 /* FIXME: The use of this function with non-linear skb's really needs in __skb_pad()
2568 memset(skb->data + skb->len, 0, pad); in __skb_pad()
2579 * pskb_put - add data to the tail of a potentially fragmented buffer
2585 * fragmented buffer. @tail must be the last fragment of @skb -- or
2594 skb->data_len += len; in pskb_put()
2595 skb->len += len; in pskb_put()
2602 * skb_put - add data to a buffer
2614 skb->tail += len; in skb_put()
2615 skb->len += len; in skb_put()
2616 if (unlikely(skb->tail > skb->end)) in skb_put()
2623 * skb_push - add data to the start of a buffer
2633 skb->data -= len; in skb_push()
2634 skb->len += len; in skb_push()
2635 if (unlikely(skb->data < skb->head)) in skb_push()
2637 return skb->data; in skb_push()
2642 * skb_pull - remove data from the start of a buffer
2658 * skb_pull_data - remove data from the start of a buffer returning its
2670 void *data = skb->data; in skb_pull_data()
2672 if (skb->len < len) in skb_pull_data()
2682 * skb_trim - remove end from a buffer
2692 if (skb->len > len) in skb_trim()
2705 int nfrags = skb_shinfo(skb)->nr_frags; in ___pskb_trim()
2718 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
2725 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
2728 skb_shinfo(skb)->nr_frags = i; in ___pskb_trim()
2738 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); in ___pskb_trim()
2739 fragp = &frag->next) { in ___pskb_trim()
2740 int end = offset + frag->len; in ___pskb_trim()
2747 return -ENOMEM; in ___pskb_trim()
2749 nfrag->next = frag->next; in ___pskb_trim()
2761 unlikely((err = pskb_trim(frag, len - offset)))) in ___pskb_trim()
2764 if (frag->next) in ___pskb_trim()
2765 skb_drop_list(&frag->next); in ___pskb_trim()
2771 skb->data_len -= skb->len - len; in ___pskb_trim()
2772 skb->len = len; in ___pskb_trim()
2774 skb->len = len; in ___pskb_trim()
2775 skb->data_len = 0; in ___pskb_trim()
2779 if (!skb->sk || skb->destructor == sock_edemux) in ___pskb_trim()
2789 if (skb->ip_summed == CHECKSUM_COMPLETE) { in pskb_trim_rcsum_slow()
2790 int delta = skb->len - len; in pskb_trim_rcsum_slow()
2792 skb->csum = csum_block_sub(skb->csum, in pskb_trim_rcsum_slow()
2795 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in pskb_trim_rcsum_slow()
2797 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; in pskb_trim_rcsum_slow()
2800 return -EINVAL; in pskb_trim_rcsum_slow()
2807 * __pskb_pull_tail - advance tail of skb header
2837 int i, k, eat = (skb->tail + delta) - skb->end; in __pskb_pull_tail()
2859 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2860 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2864 eat -= size; in __pskb_pull_tail()
2875 struct sk_buff *list = skb_shinfo(skb)->frag_list; in __pskb_pull_tail()
2880 if (list->len <= eat) { in __pskb_pull_tail()
2882 eat -= list->len; in __pskb_pull_tail()
2883 list = list->next; in __pskb_pull_tail()
2887 if (skb_is_gso(skb) && !list->head_frag && in __pskb_pull_tail()
2889 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; in __pskb_pull_tail()
2892 /* Sucks! We need to fork list. :-( */ in __pskb_pull_tail()
2896 insp = list->next; in __pskb_pull_tail()
2912 while ((list = skb_shinfo(skb)->frag_list) != insp) { in __pskb_pull_tail()
2913 skb_shinfo(skb)->frag_list = list->next; in __pskb_pull_tail()
2918 clone->next = list; in __pskb_pull_tail()
2919 skb_shinfo(skb)->frag_list = clone; in __pskb_pull_tail()
2927 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2928 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2932 eat -= size; in __pskb_pull_tail()
2934 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; in __pskb_pull_tail()
2936 *frag = skb_shinfo(skb)->frags[i]; in __pskb_pull_tail()
2947 skb_shinfo(skb)->nr_frags = k; in __pskb_pull_tail()
2950 skb->tail += delta; in __pskb_pull_tail()
2951 skb->data_len -= delta; in __pskb_pull_tail()
2953 if (!skb->data_len) in __pskb_pull_tail()
2961 * skb_copy_bits - copy bits from skb to kernel buffer
2981 if (offset > (int)skb->len - len) in skb_copy_bits()
2985 if ((copy = start - offset) > 0) { in skb_copy_bits()
2989 if ((len -= copy) == 0) in skb_copy_bits()
2998 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_bits()
3000 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_bits()
3005 if ((copy = end - offset) > 0) { in skb_copy_bits()
3014 skb_frag_off(f) + offset - start, in skb_copy_bits()
3021 if ((len -= copy) == 0) in skb_copy_bits()
3034 end = start + frag_iter->len; in skb_copy_bits()
3035 if ((copy = end - offset) > 0) { in skb_copy_bits()
3038 if (skb_copy_bits(frag_iter, offset - start, to, copy)) in skb_copy_bits()
3040 if ((len -= copy) == 0) in skb_copy_bits()
3052 return -EFAULT; in skb_copy_bits()
3062 put_page(spd->pages[i]); in sock_spd_release()
3074 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); in linear_to_page()
3076 memcpy(page_address(pfrag->page) + pfrag->offset, in linear_to_page()
3078 *offset = pfrag->offset; in linear_to_page()
3079 pfrag->offset += *len; in linear_to_page()
3081 return pfrag->page; in linear_to_page()
3088 return spd->nr_pages && in spd_can_coalesce()
3089 spd->pages[spd->nr_pages - 1] == page && in spd_can_coalesce()
3090 (spd->partial[spd->nr_pages - 1].offset + in spd_can_coalesce()
3091 spd->partial[spd->nr_pages - 1].len == offset); in spd_can_coalesce()
3103 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) in spd_fill_page()
3112 spd->partial[spd->nr_pages - 1].len += *len; in spd_fill_page()
3116 spd->pages[spd->nr_pages] = page; in spd_fill_page()
3117 spd->partial[spd->nr_pages].len = *len; in spd_fill_page()
3118 spd->partial[spd->nr_pages].offset = offset; in spd_fill_page()
3119 spd->nr_pages++; in spd_fill_page()
3136 *off -= plen; in __splice_segment()
3142 plen -= *off; in __splice_segment()
3152 plen -= flen; in __splice_segment()
3153 *len -= flen; in __splice_segment()
3171 * If skb->head_frag is set, this 'linear' part is backed by a in __skb_splice_bits()
3175 if (__splice_segment(virt_to_page(skb->data), in __skb_splice_bits()
3176 (unsigned long) skb->data & (PAGE_SIZE - 1), in __skb_splice_bits()
3189 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { in __skb_splice_bits()
3190 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; in __skb_splice_bits()
3202 if (*offset >= iter->len) { in __skb_splice_bits()
3203 *offset -= iter->len; in __skb_splice_bits()
3247 struct socket *sock = sk->sk_socket; in sendmsg_locked()
3251 return -EINVAL; in sendmsg_locked()
3253 if (!sock->ops->sendmsg_locked) in sendmsg_locked()
3256 return sock->ops->sendmsg_locked(sk, msg, size); in sendmsg_locked()
3261 struct socket *sock = sk->sk_socket; in sendmsg_unlocked()
3264 return -EINVAL; in sendmsg_unlocked()
3284 slen = min_t(int, len, skb_headlen(skb) - offset); in __skb_send_sock()
3285 kv.iov_base = skb->data + offset; in __skb_send_sock()
3297 len -= ret; in __skb_send_sock()
3305 offset -= skb_headlen(skb); in __skb_send_sock()
3308 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
3309 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
3314 offset -= skb_frag_size(frag); in __skb_send_sock()
3317 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
3318 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
3320 slen = min_t(size_t, len, skb_frag_size(frag) - offset); in __skb_send_sock()
3338 len -= ret; in __skb_send_sock()
3340 slen -= ret; in __skb_send_sock()
3351 skb = skb_shinfo(skb)->frag_list; in __skb_send_sock()
3354 } else if (skb->next) { in __skb_send_sock()
3355 skb = skb->next; in __skb_send_sock()
3361 return orig_len - len; in __skb_send_sock()
3364 return orig_len == len ? ret : orig_len - len; in __skb_send_sock()
3382 * skb_store_bits - store bits from kernel buffer to skb
3399 if (offset > (int)skb->len - len) in skb_store_bits()
3402 if ((copy = start - offset) > 0) { in skb_store_bits()
3406 if ((len -= copy) == 0) in skb_store_bits()
3415 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_store_bits()
3416 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_store_bits()
3422 if ((copy = end - offset) > 0) { in skb_store_bits()
3431 skb_frag_off(frag) + offset - start, in skb_store_bits()
3438 if ((len -= copy) == 0) in skb_store_bits()
3451 end = start + frag_iter->len; in skb_store_bits()
3452 if ((copy = end - offset) > 0) { in skb_store_bits()
3455 if (skb_store_bits(frag_iter, offset - start, in skb_store_bits()
3458 if ((len -= copy) == 0) in skb_store_bits()
3469 return -EFAULT; in skb_store_bits()
3478 int i, copy = start - offset; in __skb_checksum()
3486 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, in __skb_checksum()
3487 skb->data + offset, copy, csum); in __skb_checksum()
3488 if ((len -= copy) == 0) in __skb_checksum()
3497 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_checksum()
3499 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_checksum()
3504 if ((copy = end - offset) > 0) { in __skb_checksum()
3514 skb_frag_off(frag) + offset - start, in __skb_checksum()
3517 csum2 = INDIRECT_CALL_1(ops->update, in __skb_checksum()
3521 csum = INDIRECT_CALL_1(ops->combine, in __skb_checksum()
3527 if (!(len -= copy)) in __skb_checksum()
3539 end = start + frag_iter->len; in __skb_checksum()
3540 if ((copy = end - offset) > 0) { in __skb_checksum()
3544 csum2 = __skb_checksum(frag_iter, offset - start, in __skb_checksum()
3546 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, in __skb_checksum()
3548 if ((len -= copy) == 0) in __skb_checksum()
3579 int i, copy = start - offset; in skb_copy_and_csum_bits()
3588 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
3590 if ((len -= copy) == 0) in skb_copy_and_csum_bits()
3600 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_and_csum_bits()
3605 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_copy_and_csum_bits()
3606 if ((copy = end - offset) > 0) { in skb_copy_and_csum_bits()
3607 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_and_csum_bits()
3617 skb_frag_off(frag) + offset - start, in skb_copy_and_csum_bits()
3628 if (!(len -= copy)) in skb_copy_and_csum_bits()
3642 end = start + frag_iter->len; in skb_copy_and_csum_bits()
3643 if ((copy = end - offset) > 0) { in skb_copy_and_csum_bits()
3647 offset - start, in skb_copy_and_csum_bits()
3650 if ((len -= copy) == 0) in skb_copy_and_csum_bits()
3667 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); in __skb_checksum_complete_head()
3670 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete_head()
3671 !skb->csum_complete_sw) in __skb_checksum_complete_head()
3672 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete_head()
3675 skb->csum_valid = !sum; in __skb_checksum_complete_head()
3680 /* This function assumes skb->csum already holds pseudo header's checksum,
3682 * __skb_checksum_validate_complete(). And, the original skb->csum must
3685 * It returns non-zero if the recomputed checksum is still invalid, otherwise
3686 * zero. The new checksum is stored back into skb->csum unless the skb is
3694 csum = skb_checksum(skb, 0, skb->len, 0); in __skb_checksum_complete()
3696 sum = csum_fold(csum_add(skb->csum, csum)); in __skb_checksum_complete()
3699 * re-computed checksum is valid instead, then we have a mismatch in __skb_checksum_complete()
3700 * between the original skb->csum and skb_checksum(). This means either in __skb_checksum_complete()
3701 * the original hardware checksum is incorrect or we screw up skb->csum in __skb_checksum_complete()
3702 * when moving skb->data around. in __skb_checksum_complete()
3705 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete()
3706 !skb->csum_complete_sw) in __skb_checksum_complete()
3707 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete()
3712 skb->csum = csum; in __skb_checksum_complete()
3713 skb->ip_summed = CHECKSUM_COMPLETE; in __skb_checksum_complete()
3714 skb->csum_complete_sw = 1; in __skb_checksum_complete()
3715 skb->csum_valid = !sum; in __skb_checksum_complete()
3749 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
3760 if (!from->head_frag || in skb_zerocopy_headlen()
3762 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { in skb_zerocopy_headlen()
3765 hlen = from->len; in skb_zerocopy_headlen()
3769 hlen = from->len; in skb_zerocopy_headlen()
3776 * skb_zerocopy - Zero copy skb to skb
3790 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
3791 * -EFAULT: skb_copy_bits() found some problem with skb geometry
3797 int plen = 0; /* length of skb->head fragment */ in skb_zerocopy()
3802 BUG_ON(!from->head_frag && !hlen); in skb_zerocopy()
3812 len -= hlen; in skb_zerocopy()
3816 page = virt_to_head_page(from->head); in skb_zerocopy()
3817 offset = from->data - (unsigned char *)page_address(page); in skb_zerocopy()
3822 len -= plen; in skb_zerocopy()
3830 return -ENOMEM; in skb_zerocopy()
3834 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { in skb_zerocopy()
3839 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; in skb_zerocopy()
3840 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), in skb_zerocopy()
3842 skb_frag_size_set(&skb_shinfo(to)->frags[j], size); in skb_zerocopy()
3843 len -= size; in skb_zerocopy()
3847 skb_shinfo(to)->nr_frags = j; in skb_zerocopy()
3858 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_copy_and_csum_dev()
3868 if (csstart != skb->len) in skb_copy_and_csum_dev()
3870 skb->len - csstart); in skb_copy_and_csum_dev()
3872 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skb_copy_and_csum_dev()
3873 long csstuff = csstart + skb->csum_offset; in skb_copy_and_csum_dev()
3881 * skb_dequeue - remove from the head of the queue
3894 spin_lock_irqsave(&list->lock, flags); in skb_dequeue()
3896 spin_unlock_irqrestore(&list->lock, flags); in skb_dequeue()
3902 * skb_dequeue_tail - remove from the tail of the queue
3914 spin_lock_irqsave(&list->lock, flags); in skb_dequeue_tail()
3916 spin_unlock_irqrestore(&list->lock, flags); in skb_dequeue_tail()
3922 * skb_queue_purge_reason - empty a list
3941 spin_lock_irqsave(&list->lock, flags); in skb_queue_purge_reason()
3943 spin_unlock_irqrestore(&list->lock, flags); in skb_queue_purge_reason()
3950 * skb_rbtree_purge - empty a skb rbtree
3957 * out-of-order queue is protected by the socket lock).
3968 rb_erase(&skb->rbnode, root); in skb_rbtree_purge()
3969 sum += skb->truesize; in skb_rbtree_purge()
3983 spin_lock_irqsave(&list->lock, flags); in skb_errqueue_purge()
3985 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || in skb_errqueue_purge()
3986 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) in skb_errqueue_purge()
3991 spin_unlock_irqrestore(&list->lock, flags); in skb_errqueue_purge()
3997 * skb_queue_head - queue a buffer at the list head
4011 spin_lock_irqsave(&list->lock, flags); in skb_queue_head()
4013 spin_unlock_irqrestore(&list->lock, flags); in skb_queue_head()
4018 * skb_queue_tail - queue a buffer at the list tail
4032 spin_lock_irqsave(&list->lock, flags); in skb_queue_tail()
4034 spin_unlock_irqrestore(&list->lock, flags); in skb_queue_tail()
4039 * skb_unlink - remove a buffer from a list
4052 spin_lock_irqsave(&list->lock, flags); in skb_unlink()
4054 spin_unlock_irqrestore(&list->lock, flags); in skb_unlink()
4059 * skb_append - append a buffer
4072 spin_lock_irqsave(&list->lock, flags); in skb_append()
4074 spin_unlock_irqrestore(&list->lock, flags); in skb_append()
4084 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), in skb_split_inside_header()
4085 pos - len); in skb_split_inside_header()
4087 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in skb_split_inside_header()
4088 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header()
4090 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; in skb_split_inside_header()
4091 skb1->unreadable = skb->unreadable; in skb_split_inside_header()
4092 skb_shinfo(skb)->nr_frags = 0; in skb_split_inside_header()
4093 skb1->data_len = skb->data_len; in skb_split_inside_header()
4094 skb1->len += skb1->data_len; in skb_split_inside_header()
4095 skb->data_len = 0; in skb_split_inside_header()
4096 skb->len = len; in skb_split_inside_header()
4105 const int nfrags = skb_shinfo(skb)->nr_frags; in skb_split_no_header()
4107 skb_shinfo(skb)->nr_frags = 0; in skb_split_no_header()
4108 skb1->len = skb1->data_len = skb->len - len; in skb_split_no_header()
4109 skb->len = len; in skb_split_no_header()
4110 skb->data_len = len - pos; in skb_split_no_header()
4113 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_split_no_header()
4116 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; in skb_split_no_header()
4128 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); in skb_split_no_header()
4129 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); in skb_split_no_header()
4130 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); in skb_split_no_header()
4131 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
4135 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
4138 skb_shinfo(skb1)->nr_frags = k; in skb_split_no_header()
4140 skb1->unreadable = skb->unreadable; in skb_split_no_header()
4144 * skb_split - Split fragmented skb to two parts at length len.
4156 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; in skb_split()
4165 /* Shifting from/to a cloned skb is a no-go.
4175 * skb_shift - Shifts paged data partially from skb to another
4187 * to have non-paged data as well.
4190 * specialized skb free'er to handle frags without up-to-date nr_frags.
4197 BUG_ON(shiftlen > skb->len); in skb_shift()
4204 DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle); in skb_shift()
4209 to = skb_shinfo(tgt)->nr_frags; in skb_shift()
4210 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4217 merge = -1; in skb_shift()
4219 merge = to - 1; in skb_shift()
4221 todo -= skb_frag_size(fragfrom); in skb_shift()
4228 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4229 fragto = &skb_shinfo(tgt)->frags[merge]; in skb_shift()
4241 /* Skip full, not-fitting skb to avoid expensive operations */ in skb_shift()
4242 if ((shiftlen == skb->len) && in skb_shift()
4243 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
4249 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { in skb_shift()
4253 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4254 fragto = &skb_shinfo(tgt)->frags[to]; in skb_shift()
4258 todo -= skb_frag_size(fragfrom); in skb_shift()
4278 skb_shinfo(tgt)->nr_frags = to; in skb_shift()
4281 fragfrom = &skb_shinfo(skb)->frags[0]; in skb_shift()
4282 fragto = &skb_shinfo(tgt)->frags[merge]; in skb_shift()
4285 __skb_frag_unref(fragfrom, skb->pp_recycle); in skb_shift()
4290 while (from < skb_shinfo(skb)->nr_frags) in skb_shift()
4291 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift()
4292 skb_shinfo(skb)->nr_frags = to; in skb_shift()
4294 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); in skb_shift()
4300 tgt->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
4301 skb->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
4303 skb_len_add(skb, -shiftlen); in skb_shift()
4310 * skb_prepare_seq_read - Prepare a sequential read of skb data
4322 st->lower_offset = from; in skb_prepare_seq_read()
4323 st->upper_offset = to; in skb_prepare_seq_read()
4324 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read()
4325 st->frag_idx = st->stepped_offset = 0; in skb_prepare_seq_read()
4326 st->frag_data = NULL; in skb_prepare_seq_read()
4327 st->frag_off = 0; in skb_prepare_seq_read()
4332 * skb_seq_read - Sequentially read skb data
4353 * at the moment, state->root_skb could be replaced with
4359 unsigned int block_limit, abs_offset = consumed + st->lower_offset; in skb_seq_read()
4362 if (unlikely(abs_offset >= st->upper_offset)) { in skb_seq_read()
4363 if (st->frag_data) { in skb_seq_read()
4364 kunmap_atomic(st->frag_data); in skb_seq_read()
4365 st->frag_data = NULL; in skb_seq_read()
4371 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; in skb_seq_read()
4373 if (abs_offset < block_limit && !st->frag_data) { in skb_seq_read()
4374 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); in skb_seq_read()
4375 return block_limit - abs_offset; in skb_seq_read()
4378 if (!skb_frags_readable(st->cur_skb)) in skb_seq_read()
4381 if (st->frag_idx == 0 && !st->frag_data) in skb_seq_read()
4382 st->stepped_offset += skb_headlen(st->cur_skb); in skb_seq_read()
4384 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { in skb_seq_read()
4387 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; in skb_seq_read()
4394 pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; in skb_seq_read()
4395 pg_off = offset_in_page(pg_off + st->frag_off); in skb_seq_read()
4396 pg_sz = min_t(unsigned int, pg_sz - st->frag_off, in skb_seq_read()
4397 PAGE_SIZE - pg_off); in skb_seq_read()
4400 block_limit = pg_sz + st->stepped_offset; in skb_seq_read()
4402 if (!st->frag_data) in skb_seq_read()
4403 st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); in skb_seq_read()
4405 *data = (u8 *)st->frag_data + pg_off + in skb_seq_read()
4406 (abs_offset - st->stepped_offset); in skb_seq_read()
4408 return block_limit - abs_offset; in skb_seq_read()
4411 if (st->frag_data) { in skb_seq_read()
4412 kunmap_atomic(st->frag_data); in skb_seq_read()
4413 st->frag_data = NULL; in skb_seq_read()
4416 st->stepped_offset += pg_sz; in skb_seq_read()
4417 st->frag_off += pg_sz; in skb_seq_read()
4418 if (st->frag_off == skb_frag_size(frag)) { in skb_seq_read()
4419 st->frag_off = 0; in skb_seq_read()
4420 st->frag_idx++; in skb_seq_read()
4424 if (st->frag_data) { in skb_seq_read()
4425 kunmap_atomic(st->frag_data); in skb_seq_read()
4426 st->frag_data = NULL; in skb_seq_read()
4429 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { in skb_seq_read()
4430 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; in skb_seq_read()
4431 st->frag_idx = 0; in skb_seq_read()
4433 } else if (st->cur_skb->next) { in skb_seq_read()
4434 st->cur_skb = st->cur_skb->next; in skb_seq_read()
4435 st->frag_idx = 0; in skb_seq_read()
4444 * skb_abort_seq_read - Abort a sequential read of skb data
4452 if (st->frag_data) in skb_abort_seq_read()
4453 kunmap_atomic(st->frag_data); in skb_abort_seq_read()
4458 * skb_copy_seq_read() - copy from a skb_seq_state to a buffer
4469 * Return: 0 on success or -EINVAL if the copy ended early
4479 return -EINVAL; in skb_copy_seq_read()
4487 len -= sqlen; in skb_copy_seq_read()
4492 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
4507 * skb_find_text - Find a text pattern in skb data
4521 unsigned int patlen = config->ops->get_pattern_len(config); in skb_find_text()
4527 config->get_next_block = skb_ts_get_next_block; in skb_find_text()
4528 config->finish = skb_ts_finish; in skb_find_text()
4533 return (ret + patlen <= to - from ? ret : UINT_MAX); in skb_find_text()
4540 int i = skb_shinfo(skb)->nr_frags; in skb_append_pagefrags()
4543 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); in skb_append_pagefrags()
4549 return -EMSGSIZE; in skb_append_pagefrags()
4557 * skb_pull_rcsum - pull skb and update receive checksum
4569 unsigned char *data = skb->data; in skb_pull_rcsum()
4571 BUG_ON(len > skb->len); in skb_pull_rcsum()
4574 return skb->data; in skb_pull_rcsum()
4583 page = virt_to_head_page(frag_skb->head); in skb_head_frag_to_page_desc()
4584 skb_frag_fill_page_desc(&head_frag, page, frag_skb->data - in skb_head_frag_to_page_desc()
4594 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; in skb_segment_list()
4602 skb_push(skb, -skb_network_offset(skb) + offset); in skb_segment_list()
4609 skb_shinfo(skb)->frag_list = NULL; in skb_segment_list()
4613 list_skb = list_skb->next; in skb_segment_list()
4616 delta_truesize += nskb->truesize; in skb_segment_list()
4624 err = -ENOMEM; in skb_segment_list()
4629 skb->next = nskb; in skb_segment_list()
4631 tail->next = nskb; in skb_segment_list()
4634 nskb->next = list_skb; in skb_segment_list()
4640 delta_len += nskb->len; in skb_segment_list()
4642 skb_push(nskb, -skb_network_offset(nskb) + offset); in skb_segment_list()
4645 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); in skb_segment_list()
4648 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); in skb_segment_list()
4649 nskb->transport_header += len_diff; in skb_segment_list()
4650 skb_copy_from_linear_data_offset(skb, -tnl_hlen, in skb_segment_list()
4651 nskb->data - tnl_hlen, in skb_segment_list()
4659 skb->truesize = skb->truesize - delta_truesize; in skb_segment_list()
4660 skb->data_len = skb->data_len - delta_len; in skb_segment_list()
4661 skb->len = skb->len - delta_len; in skb_segment_list()
4665 skb->prev = tail; in skb_segment_list()
4676 kfree_skb_list(skb->next); in skb_segment_list()
4677 skb->next = NULL; in skb_segment_list()
4678 return ERR_PTR(-ENOMEM); in skb_segment_list()
4683 * skb_segment - Perform protocol segmentation on skb.
4685 * @features: features for the output path (see dev->features)
4696 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; in skb_segment()
4697 unsigned int mss = skb_shinfo(head_skb)->gso_size; in skb_segment() local
4698 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); in skb_segment()
4703 unsigned int len = head_skb->len; in skb_segment()
4708 int err = -ENOMEM; in skb_segment()
4712 if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) && in skb_segment()
4713 mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) { in skb_segment()
4716 for (check_skb = list_skb; check_skb; check_skb = check_skb->next) { in skb_segment()
4717 if (skb_headlen(check_skb) && !check_skb->head_frag) { in skb_segment()
4736 return ERR_PTR(-EINVAL); in skb_segment()
4741 if (sg && csum && (mss != GSO_BY_FRAGS)) { in skb_segment()
4747 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) in skb_segment()
4758 frag_len = list_skb->len; in skb_segment()
4760 if (frag_len != iter->len && iter->next) in skb_segment()
4762 if (skb_headlen(iter) && !iter->head_frag) in skb_segment()
4765 len -= iter->len; in skb_segment()
4773 * doesn't fit into an MSS sized block, so take care of that in skb_segment()
4777 partial_segs = min(len, GSO_BY_FRAGS - 1) / mss; in skb_segment()
4779 mss *= partial_segs; in skb_segment()
4789 return ERR_PTR(-ENOMEM); in skb_segment()
4791 nfrags = skb_shinfo(head_skb)->nr_frags; in skb_segment()
4792 frag = skb_shinfo(head_skb)->frags; in skb_segment()
4801 if (unlikely(mss == GSO_BY_FRAGS)) { in skb_segment()
4802 len = list_skb->len; in skb_segment()
4804 len = head_skb->len - offset; in skb_segment()
4805 if (len > mss) in skb_segment()
4806 len = mss; in skb_segment()
4809 hsize = skb_headlen(head_skb) - offset; in skb_segment()
4820 nfrags = skb_shinfo(list_skb)->nr_frags; in skb_segment()
4821 frag = skb_shinfo(list_skb)->frags; in skb_segment()
4837 list_skb = list_skb->next; in skb_segment()
4850 nskb->truesize += skb_end_offset(nskb) - hsize; in skb_segment()
4871 tail->next = nskb; in skb_segment()
4878 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); in skb_segment()
4881 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, in skb_segment()
4882 nskb->data - tnl_hlen, in skb_segment()
4885 if (nskb->len == len + doffset) in skb_segment()
4890 if (!nskb->remcsum_offload) in skb_segment()
4891 nskb->ip_summed = CHECKSUM_NONE; in skb_segment()
4892 SKB_GSO_CB(nskb)->csum = in skb_segment()
4897 SKB_GSO_CB(nskb)->csum_start = in skb_segment()
4906 nskb_frag = skb_shinfo(nskb)->frags; in skb_segment()
4911 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & in skb_segment()
4925 nfrags = skb_shinfo(list_skb)->nr_frags; in skb_segment()
4926 frag = skb_shinfo(list_skb)->frags; in skb_segment()
4931 BUG_ON(!list_skb->head_frag); in skb_segment()
4934 i--; in skb_segment()
4935 frag--; in skb_segment()
4938 list_skb = list_skb->next; in skb_segment()
4941 if (unlikely(skb_shinfo(nskb)->nr_frags >= in skb_segment()
4945 pos, mss); in skb_segment()
4946 err = -EINVAL; in skb_segment()
4955 skb_frag_off_add(nskb_frag, offset - pos); in skb_segment()
4956 skb_frag_size_sub(nskb_frag, offset - pos); in skb_segment()
4959 skb_shinfo(nskb)->nr_frags++; in skb_segment()
4966 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); in skb_segment()
4974 nskb->data_len = len - hsize; in skb_segment()
4975 nskb->len += nskb->data_len; in skb_segment()
4976 nskb->truesize += nskb->data_len; in skb_segment()
4984 if (!nskb->remcsum_offload) in skb_segment()
4985 nskb->ip_summed = CHECKSUM_NONE; in skb_segment()
4986 SKB_GSO_CB(nskb)->csum = in skb_segment()
4988 nskb->len - doffset, 0); in skb_segment()
4989 SKB_GSO_CB(nskb)->csum_start = in skb_segment()
4992 } while ((offset += len) < head_skb->len); in skb_segment()
4995 * Put it in segs->prev to avoid walking the list. in skb_segment()
4998 segs->prev = tail; in skb_segment()
5002 int type = skb_shinfo(head_skb)->gso_type; in skb_segment()
5003 unsigned short gso_size = skb_shinfo(head_skb)->gso_size; in skb_segment()
5012 for (iter = segs; iter; iter = iter->next) { in skb_segment()
5013 skb_shinfo(iter)->gso_size = gso_size; in skb_segment()
5014 skb_shinfo(iter)->gso_segs = partial_segs; in skb_segment()
5015 skb_shinfo(iter)->gso_type = type; in skb_segment()
5016 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; in skb_segment()
5019 if (tail->len - doffset <= gso_size) in skb_segment()
5020 skb_shinfo(tail)->gso_size = 0; in skb_segment()
5022 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); in skb_segment()
5029 if (head_skb->destructor == sock_wfree) { in skb_segment()
5030 swap(tail->truesize, head_skb->truesize); in skb_segment()
5031 swap(tail->destructor, head_skb->destructor); in skb_segment()
5032 swap(tail->sk, head_skb->sk); in skb_segment()
5098 #else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */
5118 * struct skb_shared_info is located at the end of skb->head, in skb_init()
5136 int i, copy = start - offset; in __skb_to_sgvec()
5141 return -EMSGSIZE; in __skb_to_sgvec()
5146 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
5148 if ((len -= copy) == 0) in __skb_to_sgvec()
5153 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_to_sgvec()
5158 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_to_sgvec()
5159 if ((copy = end - offset) > 0) { in __skb_to_sgvec()
5160 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_to_sgvec()
5161 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) in __skb_to_sgvec()
5162 return -EMSGSIZE; in __skb_to_sgvec()
5167 skb_frag_off(frag) + offset - start); in __skb_to_sgvec()
5169 if (!(len -= copy)) in __skb_to_sgvec()
5181 end = start + frag_iter->len; in __skb_to_sgvec()
5182 if ((copy = end - offset) > 0) { in __skb_to_sgvec()
5183 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) in __skb_to_sgvec()
5184 return -EMSGSIZE; in __skb_to_sgvec()
5188 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, in __skb_to_sgvec()
5193 if ((len -= copy) == 0) in __skb_to_sgvec()
5204 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
5206 * @sg: The scatter-gather list to map into
5210 * Fill the specified scatter-gather list with mappings/pointers into a
5212 * the number of scatterlist items used, or -EMSGSIZE if the contents
5222 sg_mark_end(&sg[nsg - 1]); in skb_to_sgvec()
5257 * skb_cow_data - Check that a socket buffer's data buffers are writable
5283 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && in skb_cow_data()
5285 return -ENOMEM; in skb_cow_data()
5295 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) in skb_cow_data()
5296 return -ENOMEM; in skb_cow_data()
5306 skb_p = &skb_shinfo(skb)->frag_list; in skb_cow_data()
5321 if (skb1->next == NULL && tailbits) { in skb_cow_data()
5322 if (skb_shinfo(skb1)->nr_frags || in skb_cow_data()
5331 skb_shinfo(skb1)->nr_frags || in skb_cow_data()
5344 return -ENOMEM; in skb_cow_data()
5346 if (skb1->sk) in skb_cow_data()
5347 skb_set_owner_w(skb2, skb1->sk); in skb_cow_data()
5352 skb2->next = skb1->next; in skb_cow_data()
5359 skb_p = &skb1->next; in skb_cow_data()
5368 struct sock *sk = skb->sk; in sock_rmem_free()
5370 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in sock_rmem_free()
5378 skb->pkt_type = PACKET_OUTGOING; in skb_set_err_queue()
5387 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in sock_queue_err_skb()
5388 (unsigned int)READ_ONCE(sk->sk_rcvbuf)) in sock_queue_err_skb()
5389 return -ENOMEM; in sock_queue_err_skb()
5392 skb->sk = sk; in sock_queue_err_skb()
5393 skb->destructor = sock_rmem_free; in sock_queue_err_skb()
5394 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in sock_queue_err_skb()
5400 skb_queue_tail(&sk->sk_error_queue, skb); in sock_queue_err_skb()
5409 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || in is_icmp_err_skb()
5410 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); in is_icmp_err_skb()
5415 struct sk_buff_head *q = &sk->sk_error_queue; in sock_dequeue_err_skb()
5423 spin_lock_irqsave(&q->lock, flags); in sock_dequeue_err_skb()
5428 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; in sock_dequeue_err_skb()
5430 spin_unlock_irqrestore(&q->lock, flags); in sock_dequeue_err_skb()
5433 sk->sk_err = 0; in sock_dequeue_err_skb()
5443 * skb_clone_sk - create clone of skb, and take reference to socket
5457 struct sock *sk = skb->sk; in skb_clone_sk()
5460 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) in skb_clone_sk()
5469 clone->sk = sk; in skb_clone_sk()
5470 clone->destructor = sock_efree; in skb_clone_sk()
5484 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); in __skb_complete_tx_timestamp()
5488 serr->ee.ee_errno = ENOMSG; in __skb_complete_tx_timestamp()
5489 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; in __skb_complete_tx_timestamp()
5490 serr->ee.ee_info = tstype; in __skb_complete_tx_timestamp()
5491 serr->opt_stats = opt_stats; in __skb_complete_tx_timestamp()
5492 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; in __skb_complete_tx_timestamp()
5493 if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) { in __skb_complete_tx_timestamp()
5494 serr->ee.ee_data = skb_shinfo(skb)->tskey; in __skb_complete_tx_timestamp()
5496 serr->ee.ee_data -= atomic_read(&sk->sk_tskey); in __skb_complete_tx_timestamp()
5512 read_lock_bh(&sk->sk_callback_lock); in skb_may_tx_timestamp()
5513 ret = sk->sk_socket && sk->sk_socket->file && in skb_may_tx_timestamp()
5514 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); in skb_may_tx_timestamp()
5515 read_unlock_bh(&sk->sk_callback_lock); in skb_may_tx_timestamp()
5522 struct sock *sk = skb->sk; in skb_complete_tx_timestamp()
5530 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { in skb_complete_tx_timestamp()
5554 tsflags = READ_ONCE(sk->sk_tsflags); in __skb_tstamp_tx()
5556 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) in __skb_tstamp_tx()
5585 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & in __skb_tstamp_tx()
5587 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; in __skb_tstamp_tx()
5602 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, in skb_tstamp_tx()
5610 struct sock *sk = skb->sk; in skb_complete_wifi_ack()
5614 skb->wifi_acked_valid = 1; in skb_complete_wifi_ack()
5615 skb->wifi_acked = acked; in skb_complete_wifi_ack()
5619 serr->ee.ee_errno = ENOMSG; in skb_complete_wifi_ack()
5620 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; in skb_complete_wifi_ack()
5625 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { in skb_complete_wifi_ack()
5636 * skb_partial_csum_set - set up and verify partial csum values for packet
5638 * @start: the number of bytes after skb->data to start checksumming.
5641 * For untrusted partially-checksummed packets, we need to make sure the values
5642 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
5644 * This function checks and sets those values and skb->ip_summed: if this
5657 skb->ip_summed = CHECKSUM_PARTIAL; in skb_partial_csum_set()
5658 skb->csum_start = csum_start; in skb_partial_csum_set()
5659 skb->csum_offset = off; in skb_partial_csum_set()
5660 skb->transport_header = csum_start; in skb_partial_csum_set()
5674 if (max > skb->len) in skb_maybe_pull_tail()
5675 max = skb->len; in skb_maybe_pull_tail()
5677 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) in skb_maybe_pull_tail()
5678 return -ENOMEM; in skb_maybe_pull_tail()
5681 return -EPROTO; in skb_maybe_pull_tail()
5701 err = -EPROTO; in skb_checksum_setup_ip()
5702 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; in skb_checksum_setup_ip()
5710 err = -EPROTO; in skb_checksum_setup_ip()
5711 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; in skb_checksum_setup_ip()
5714 return ERR_PTR(-EPROTO); in skb_checksum_setup_ip()
5742 err = -EPROTO; in skb_checksum_setup_ipv4()
5747 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); in skb_checksum_setup_ipv4()
5752 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in skb_checksum_setup_ipv4()
5753 ip_hdr(skb)->daddr, in skb_checksum_setup_ipv4()
5754 skb->len - off, in skb_checksum_setup_ipv4()
5755 ip_hdr(skb)->protocol, 0); in skb_checksum_setup_ipv4()
5789 nexthdr = ipv6_hdr(skb)->nexthdr; in skb_checksum_setup_ipv6()
5791 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); in skb_checksum_setup_ipv6()
5807 nexthdr = hp->nexthdr; in skb_checksum_setup_ipv6()
5822 nexthdr = hp->nexthdr; in skb_checksum_setup_ipv6()
5838 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) in skb_checksum_setup_ipv6()
5841 nexthdr = hp->nexthdr; in skb_checksum_setup_ipv6()
5851 err = -EPROTO; in skb_checksum_setup_ipv6()
5861 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in skb_checksum_setup_ipv6()
5862 &ipv6_hdr(skb)->daddr, in skb_checksum_setup_ipv6()
5863 skb->len - off, nexthdr, 0); in skb_checksum_setup_ipv6()
5871 * skb_checksum_setup - set up partial checksum offset
5873 * @recalculate: if true the pseudo-header checksum will be recalculated
5879 switch (skb->protocol) { in skb_checksum_setup()
5889 err = -EPROTO; in skb_checksum_setup()
5898 * skb_checksum_maybe_trim - maybe trims the given skb
5905 * (e.g. transport_len exceeds skb length or out-of-memory).
5917 if (skb->len < len) in skb_checksum_maybe_trim()
5919 else if (skb->len == len) in skb_checksum_maybe_trim()
5936 * skb_checksum_trimmed - validate checksum of an skb
5986 skb->dev->name); in __skb_warn_lro_forwarding()
6002 * skb_try_coalesce - try to merge skb to prior one
6012 int i, delta, len = from->len; in skb_try_coalesce()
6019 /* In general, avoid mixing page_pool and non-page_pool allocated in skb_try_coalesce()
6021 * references if @from is cloned and !@to->pp_recycle but its in skb_try_coalesce()
6025 if (to->pp_recycle != from->pp_recycle) in skb_try_coalesce()
6040 if (to_shinfo->frag_list || from_shinfo->frag_list) in skb_try_coalesce()
6049 if (to_shinfo->nr_frags + in skb_try_coalesce()
6050 from_shinfo->nr_frags >= MAX_SKB_FRAGS) in skb_try_coalesce()
6056 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); in skb_try_coalesce()
6058 page = virt_to_head_page(from->head); in skb_try_coalesce()
6059 offset = from->data - (unsigned char *)page_address(page); in skb_try_coalesce()
6061 skb_fill_page_desc(to, to_shinfo->nr_frags, in skb_try_coalesce()
6065 if (to_shinfo->nr_frags + in skb_try_coalesce()
6066 from_shinfo->nr_frags > MAX_SKB_FRAGS) in skb_try_coalesce()
6069 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); in skb_try_coalesce()
6074 memcpy(to_shinfo->frags + to_shinfo->nr_frags, in skb_try_coalesce()
6075 from_shinfo->frags, in skb_try_coalesce()
6076 from_shinfo->nr_frags * sizeof(skb_frag_t)); in skb_try_coalesce()
6077 to_shinfo->nr_frags += from_shinfo->nr_frags; in skb_try_coalesce()
6080 from_shinfo->nr_frags = 0; in skb_try_coalesce()
6086 for (i = 0; i < from_shinfo->nr_frags; i++) in skb_try_coalesce()
6087 __skb_frag_ref(&from_shinfo->frags[i]); in skb_try_coalesce()
6090 to->truesize += delta; in skb_try_coalesce()
6091 to->len += len; in skb_try_coalesce()
6092 to->data_len += len; in skb_try_coalesce()
6100 * skb_scrub_packet - scrub an skb
6114 skb->pkt_type = PACKET_HOST; in skb_scrub_packet()
6115 skb->skb_iif = 0; in skb_scrub_packet()
6116 skb->ignore_df = 0; in skb_scrub_packet()
6123 skb->offload_fwd_mark = 0; in skb_scrub_packet()
6124 skb->offload_l3_fwd_mark = 0; in skb_scrub_packet()
6131 skb->mark = 0; in skb_scrub_packet()
6146 mac_len = skb->data - skb_mac_header(skb); in skb_reorder_vlan_header()
6149 mac_len - VLAN_HLEN - ETH_TLEN); in skb_reorder_vlan_header()
6154 meta = skb_metadata_end(skb) - meta_len; in skb_reorder_vlan_header()
6158 skb->mac_header += VLAN_HLEN; in skb_reorder_vlan_header()
6168 /* vlan_tci is already set-up so leave this for another time */ in skb_vlan_untag()
6179 vhdr = (struct vlan_hdr *)skb->data; in skb_vlan_untag()
6180 vlan_tci = ntohs(vhdr->h_vlan_TCI); in skb_vlan_untag()
6181 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); in skb_vlan_untag()
6206 return -ENOMEM; in skb_ensure_writable()
6209 return -EFAULT; in skb_ensure_writable()
6220 int needed_headroom = dev->needed_headroom; in skb_ensure_writable_head_tail()
6221 int needed_tailroom = dev->needed_tailroom; in skb_ensure_writable_head_tail()
6228 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) in skb_ensure_writable_head_tail()
6229 needed_tailroom += ETH_ZLEN - skb->len; in skb_ensure_writable_head_tail()
6231 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); in skb_ensure_writable_head_tail()
6232 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); in skb_ensure_writable_head_tail()
6248 int offset = skb->data - skb_mac_header(skb); in __skb_vlan_pop()
6252 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", in __skb_vlan_pop()
6254 return -EINVAL; in __skb_vlan_pop()
6261 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in __skb_vlan_pop()
6265 skb->mac_header += VLAN_HLEN; in __skb_vlan_pop()
6277 * Expects skb->data at mac header.
6288 if (unlikely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
6296 if (likely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
6299 vlan_proto = skb->protocol; in skb_vlan_pop()
6310 * Expects skb->data at mac header.
6315 int offset = skb->data - skb_mac_header(skb); in skb_vlan_push()
6319 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", in skb_vlan_push()
6321 return -EINVAL; in skb_vlan_push()
6324 err = __vlan_insert_tag(skb, skb->vlan_proto, in skb_vlan_push()
6329 skb->protocol = skb->vlan_proto; in skb_vlan_push()
6330 skb->network_header -= VLAN_HLEN; in skb_vlan_push()
6332 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in skb_vlan_push()
6340 * skb_eth_pop() - Drop the Ethernet header at the head of a packet
6346 * Expects that skb->data points to the mac header and that no VLAN tags are
6349 * Returns 0 on success, -errno otherwise.
6355 return -EPROTO; in skb_eth_pop()
6366 * skb_eth_push() - Add a new Ethernet header at the head of a packet
6374 * Expects that skb->data points to the mac header, which must be empty.
6376 * Returns 0 on success, -errno otherwise.
6385 return -EPROTO; in skb_eth_push()
6396 ether_addr_copy(eth->h_dest, dst); in skb_eth_push()
6397 ether_addr_copy(eth->h_source, src); in skb_eth_push()
6398 eth->h_proto = skb->protocol; in skb_eth_push()
6410 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mod_eth_type()
6411 __be16 diff[] = { ~hdr->h_proto, ethertype }; in skb_mod_eth_type()
6413 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mod_eth_type()
6416 hdr->h_proto = ethertype; in skb_mod_eth_type()
6420 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of
6430 * Expects skb->data at mac header.
6432 * Returns 0 on success, -errno otherwise.
6441 return -EINVAL; in skb_mpls_push()
6444 if (skb->encapsulation) in skb_mpls_push()
6445 return -EINVAL; in skb_mpls_push()
6451 if (!skb->inner_protocol) { in skb_mpls_push()
6453 skb_set_inner_protocol(skb, skb->protocol); in skb_mpls_push()
6457 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), in skb_mpls_push()
6464 lse->label_stack_entry = mpls_lse; in skb_mpls_push()
6469 skb->protocol = mpls_proto; in skb_mpls_push()
6476 * skb_mpls_pop() - pop the outermost MPLS header
6483 * Expects skb->data at mac header.
6485 * Returns 0 on success, -errno otherwise.
6492 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_pop()
6511 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); in skb_mpls_pop()
6514 skb->protocol = next_proto; in skb_mpls_pop()
6521 * skb_mpls_update_lse() - modify outermost MPLS header and update csum
6526 * Expects skb->data at mac header.
6528 * Returns 0 on success, -errno otherwise.
6534 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_update_lse()
6535 return -EINVAL; in skb_mpls_update_lse()
6537 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); in skb_mpls_update_lse()
6541 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mpls_update_lse()
6542 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; in skb_mpls_update_lse()
6544 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mpls_update_lse()
6547 mpls_hdr(skb)->label_stack_entry = mpls_lse; in skb_mpls_update_lse()
6554 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header
6558 * Expects skb->data at mac header.
6560 * Returns 0 on success, -errno otherwise.
6567 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_dec_ttl()
6568 return -EINVAL; in skb_mpls_dec_ttl()
6571 return -ENOMEM; in skb_mpls_dec_ttl()
6573 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); in skb_mpls_dec_ttl()
6575 if (!--ttl) in skb_mpls_dec_ttl()
6576 return -EINVAL; in skb_mpls_dec_ttl()
6586 * alloc_skb_with_frags - allocate skb with page frags
6607 *errcode = -EMSGSIZE; in alloc_skb_with_frags()
6611 *errcode = -ENOBUFS; in alloc_skb_with_frags()
6617 if (nr_frags == MAX_SKB_FRAGS - 1) in alloc_skb_with_frags()
6620 order--; in alloc_skb_with_frags()
6628 order--; in alloc_skb_with_frags()
6640 skb->truesize += (PAGE_SIZE << order); in alloc_skb_with_frags()
6641 data_len -= chunk; in alloc_skb_with_frags()
6657 int new_hlen = headlen - off; in pskb_carve_inside_header()
6665 return -ENOMEM; in pskb_carve_inside_header()
6670 skb->len -= off; in pskb_carve_inside_header()
6675 frags[skb_shinfo(skb)->nr_frags])); in pskb_carve_inside_header()
6680 return -ENOMEM; in pskb_carve_inside_header()
6682 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_carve_inside_header()
6688 /* we can reuse existing recount- all we did was in pskb_carve_inside_header()
6694 skb->head = data; in pskb_carve_inside_header()
6695 skb->data = data; in pskb_carve_inside_header()
6696 skb->head_frag = 0; in pskb_carve_inside_header()
6700 skb->cloned = 0; in pskb_carve_inside_header()
6701 skb->hdr_len = 0; in pskb_carve_inside_header()
6702 skb->nohdr = 0; in pskb_carve_inside_header()
6703 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_header()
6717 struct sk_buff *list = shinfo->frag_list; in pskb_carve_frag_list()
6724 return -EFAULT; in pskb_carve_frag_list()
6726 if (list->len <= eat) { in pskb_carve_frag_list()
6728 eat -= list->len; in pskb_carve_frag_list()
6729 list = list->next; in pskb_carve_frag_list()
6736 return -ENOMEM; in pskb_carve_frag_list()
6737 insp = list->next; in pskb_carve_frag_list()
6745 return -ENOMEM; in pskb_carve_frag_list()
6752 while ((list = shinfo->frag_list) != insp) { in pskb_carve_frag_list()
6753 shinfo->frag_list = list->next; in pskb_carve_frag_list()
6758 clone->next = list; in pskb_carve_frag_list()
6759 shinfo->frag_list = clone; in pskb_carve_frag_list()
6765 * non-linear part of skb
6773 const int nfrags = skb_shinfo(skb)->nr_frags; in pskb_carve_inside_nonlinear()
6781 return -ENOMEM; in pskb_carve_inside_nonlinear()
6788 return -ENOMEM; in pskb_carve_inside_nonlinear()
6792 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); in pskb_carve_inside_nonlinear()
6795 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; in pskb_carve_inside_nonlinear()
6806 skb_frag_off_add(&shinfo->frags[0], off - pos); in pskb_carve_inside_nonlinear()
6807 skb_frag_size_sub(&shinfo->frags[0], off - pos); in pskb_carve_inside_nonlinear()
6814 shinfo->nr_frags = k; in pskb_carve_inside_nonlinear()
6819 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { in pskb_carve_inside_nonlinear()
6820 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ in pskb_carve_inside_nonlinear()
6822 kfree_skb_list(skb_shinfo(skb)->frag_list); in pskb_carve_inside_nonlinear()
6824 return -ENOMEM; in pskb_carve_inside_nonlinear()
6828 skb->head = data; in pskb_carve_inside_nonlinear()
6829 skb->head_frag = 0; in pskb_carve_inside_nonlinear()
6830 skb->data = data; in pskb_carve_inside_nonlinear()
6834 skb->cloned = 0; in pskb_carve_inside_nonlinear()
6835 skb->hdr_len = 0; in pskb_carve_inside_nonlinear()
6836 skb->nohdr = 0; in pskb_carve_inside_nonlinear()
6837 skb->len -= off; in pskb_carve_inside_nonlinear()
6838 skb->data_len = skb->len; in pskb_carve_inside_nonlinear()
6839 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_nonlinear()
6875 * skb_condense - try to get rid of fragments/frag_list if possible
6879 * If packet has bytes in frags and enough tail room in skb->head,
6883 * We do not reallocate skb->head thus can not fail.
6884 * Caller must re-evaluate skb->truesize if needed.
6888 if (skb->data_len) { in skb_condense()
6889 if (skb->data_len > skb->end - skb->tail || in skb_condense()
6894 __pskb_pull_tail(skb, skb->data_len); in skb_condense()
6896 /* At this point, skb->truesize might be over estimated, in skb_condense()
6899 * When we pulled its content into skb->head, fragment in skb_condense()
6901 * adjust skb->truesize, not knowing the frag truesize. in skb_condense()
6903 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_condense()
6910 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); in skb_ext_get_ptr()
6914 * __skb_ext_alloc - allocate a new skb extensions storage
6927 memset(new->offset, 0, sizeof(new->offset)); in __skb_ext_alloc()
6928 refcount_set(&new->refcnt, 1); in __skb_ext_alloc()
6939 if (refcount_read(&old->refcnt) == 1) in skb_ext_maybe_cow()
6946 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); in skb_ext_maybe_cow()
6947 refcount_set(&new->refcnt, 1); in skb_ext_maybe_cow()
6954 for (i = 0; i < sp->len; i++) in skb_ext_maybe_cow()
6955 xfrm_state_hold(sp->xvec[i]); in skb_ext_maybe_cow()
6962 if (flow->key) in skb_ext_maybe_cow()
6963 refcount_inc(&flow->key->refs); in skb_ext_maybe_cow()
6971 * __skb_ext_set - attach the specified extension storage to this skb
6987 ext->chunks = newlen; in __skb_ext_set()
6988 ext->offset[id] = newoff; in __skb_ext_set()
6989 skb->extensions = ext; in __skb_ext_set()
6990 skb->active_extensions = 1 << id; in __skb_ext_set()
6995 * skb_ext_add - allocate space for given extension, COW if needed
7013 if (skb->active_extensions) { in skb_ext_add()
7014 old = skb->extensions; in skb_ext_add()
7016 new = skb_ext_maybe_cow(old, skb->active_extensions); in skb_ext_add()
7023 newoff = new->chunks; in skb_ext_add()
7033 new->chunks = newlen; in skb_ext_add()
7034 new->offset[id] = newoff; in skb_ext_add()
7036 skb->slow_gro = 1; in skb_ext_add()
7037 skb->extensions = new; in skb_ext_add()
7038 skb->active_extensions |= 1 << id; in skb_ext_add()
7048 for (i = 0; i < sp->len; i++) in skb_ext_put_sp()
7049 xfrm_state_put(sp->xvec[i]); in skb_ext_put_sp()
7056 if (flow->key) in skb_ext_put_mctp()
7057 mctp_key_unref(flow->key); in skb_ext_put_mctp()
7063 struct skb_ext *ext = skb->extensions; in __skb_ext_del()
7065 skb->active_extensions &= ~(1 << id); in __skb_ext_del()
7066 if (skb->active_extensions == 0) { in __skb_ext_del()
7067 skb->extensions = NULL; in __skb_ext_del()
7071 refcount_read(&ext->refcnt) == 1) { in __skb_ext_del()
7075 sp->len = 0; in __skb_ext_del()
7086 if (refcount_read(&ext->refcnt) == 1) in __skb_ext_put()
7089 if (!refcount_dec_and_test(&ext->refcnt)) in __skb_ext_put()
7109 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in kfree_skb_napi_cache()
7120 * skb_attempt_defer_free - queue skb for remote freeing
7123 * Put @skb in a per-cpu list, using the cpu which
7129 int cpu = skb->alloc_cpu; in skb_attempt_defer_free()
7142 DEBUG_NET_WARN_ON_ONCE(skb->destructor); in skb_attempt_defer_free()
7146 if (READ_ONCE(sd->defer_count) >= defer_max) in skb_attempt_defer_free()
7149 spin_lock_bh(&sd->defer_lock); in skb_attempt_defer_free()
7151 kick = sd->defer_count == (defer_max >> 1); in skb_attempt_defer_free()
7153 WRITE_ONCE(sd->defer_count, sd->defer_count + 1); in skb_attempt_defer_free()
7155 skb->next = sd->defer_list; in skb_attempt_defer_free()
7157 WRITE_ONCE(sd->defer_list, skb); in skb_attempt_defer_free()
7158 spin_unlock_bh(&sd->defer_lock); in skb_attempt_defer_free()
7176 skb->csum = csum_block_add(skb->csum, csum, skb->len); in skb_splice_csum_page()
7180 * skb_splice_from_iter - Splice (or copy) pages to skbuff
7191 * Returns the amount of data spliced/copied or -EMSGSIZE if there's
7202 while (iter->count > 0) { in skb_splice_from_iter()
7206 ret = -EMSGSIZE; in skb_splice_from_iter()
7207 space = frag_limit - skb_shinfo(skb)->nr_frags; in skb_splice_from_iter()
7216 ret = len ?: -EIO; in skb_splice_from_iter()
7223 size_t part = min_t(size_t, PAGE_SIZE - off, len); in skb_splice_from_iter()
7225 ret = -EIO; in skb_splice_from_iter()
7236 if (skb->ip_summed == CHECKSUM_NONE) in skb_splice_from_iter()
7241 maxsize -= part; in skb_splice_from_iter()
7242 len -= part; in skb_splice_from_iter()
7282 if (WARN_ON_ONCE(!i->data_source)) in csum_and_copy_from_iter_full()