/linux-6.12.1/net/tls/ |
D | tls_strp.c | 32 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_anchor_free() local 34 DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1); in tls_strp_anchor_free() 36 shinfo->frag_list = NULL; in tls_strp_anchor_free() 147 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_msg_hold() local 152 WARN_ON_ONCE(!shinfo->nr_frags); in tls_strp_msg_hold() 167 iter = shinfo->frag_list; in tls_strp_msg_hold() 194 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_flush_anchor_copy() local 197 DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1); in tls_strp_flush_anchor_copy() 199 for (i = 0; i < shinfo->nr_frags; i++) in tls_strp_flush_anchor_copy() 200 __skb_frag_unref(&shinfo->frags[i], false); in tls_strp_flush_anchor_copy() [all …]
|
/linux-6.12.1/net/core/ |
D | gso.c | 145 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_transport_seglen() local 152 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) in skb_gso_transport_seglen() 154 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { in skb_gso_transport_seglen() 158 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { in skb_gso_transport_seglen() 165 return thlen + shinfo->gso_size; in skb_gso_transport_seglen() 226 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_size_check() local 229 if (shinfo->gso_size != GSO_BY_FRAGS) in skb_gso_size_check()
|
D | lwt_bpf.c | 528 struct skb_shared_info *shinfo = skb_shinfo(skb); in handle_gso_type() local 531 shinfo->gso_type |= gso_type; in handle_gso_type() 532 skb_decrease_gso_size(shinfo, encap_len); in handle_gso_type() 533 shinfo->gso_segs = 0; in handle_gso_type()
|
D | skbuff.c | 373 struct skb_shared_info *shinfo; in __finalize_skb_around() local 388 shinfo = skb_shinfo(skb); in __finalize_skb_around() 389 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); in __finalize_skb_around() 390 atomic_set(&shinfo->dataref, 1); in __finalize_skb_around() 1062 struct skb_shared_info *shinfo; in skb_pp_frag_ref() local 1069 shinfo = skb_shinfo(skb); in skb_pp_frag_ref() 1071 for (i = 0; i < shinfo->nr_frags; i++) { in skb_pp_frag_ref() 1072 head_netmem = netmem_compound_head(shinfo->frags[i].netmem); in skb_pp_frag_ref() 1104 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data() local 1107 if (!skb_data_unref(skb, shinfo)) in skb_release_data() [all …]
|
/linux-6.12.1/tools/testing/selftests/kvm/x86_64/ |
D | xen_shinfo_test.c | 384 static struct shared_info *shinfo; variable 412 .u.shared_info.hva = (unsigned long)shinfo in juggle_shinfo_state() 464 shinfo = addr_gpa2hva(vm, SHINFO_VADDR); in main() 504 ha.u.shared_info.hva = (unsigned long)shinfo; in main() 517 struct pvclock_wall_clock wc_copy = shinfo->wc; in main() 518 void *m = mmap(shinfo, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE, zero_fd, 0); in main() 519 TEST_ASSERT(m == shinfo, "Failed to map /dev/zero over shared info"); in main() 520 shinfo->wc = wc_copy; in main() 714 shinfo->evtchn_mask[0] = 1UL << EVTCHN_TEST1; in main() 723 shinfo->evtchn_pending[0] = 0; in main() [all …]
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx4/ |
D | en_tx.c | 606 const struct skb_shared_info *shinfo, in is_inline() argument 614 if (shinfo->nr_frags == 1) { in is_inline() 615 ptr = skb_frag_address_safe(&shinfo->frags[0]); in is_inline() 621 if (shinfo->nr_frags) in is_inline() 638 const struct skb_shared_info *shinfo, in get_real_size() argument 648 if (shinfo->gso_size) { in get_real_size() 662 real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE + in get_real_size() 678 shinfo, pfrag); in get_real_size() 684 (shinfo->nr_frags + 1) * DS_SIZE; in get_real_size() 692 const struct skb_shared_info *shinfo, in build_inline_wqe() argument [all …]
|
/linux-6.12.1/drivers/net/xen-netback/ |
D | netback.c | 386 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_get_requests() local 387 skb_frag_t *frags = shinfo->frags; in xenvif_get_requests() 395 nr_slots = shinfo->nr_frags + frag_overflow + 1; in xenvif_get_requests() 461 for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; in xenvif_get_requests() 473 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); in xenvif_get_requests() 474 ++shinfo->nr_frags; in xenvif_get_requests() 485 shinfo = skb_shinfo(nskb); in xenvif_get_requests() 486 frags = shinfo->frags; in xenvif_get_requests() 488 for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) { in xenvif_get_requests() 499 frag_set_pending_idx(&frags[shinfo->nr_frags], in xenvif_get_requests() [all …]
|
/linux-6.12.1/include/linux/ |
D | virtio_net.h | 159 struct skb_shared_info *shinfo = skb_shinfo(skb); in virtio_net_hdr_to_skb() local 190 shinfo->gso_size = gso_size; in virtio_net_hdr_to_skb() 191 shinfo->gso_type = gso_type; in virtio_net_hdr_to_skb() 194 shinfo->gso_type |= SKB_GSO_DODGY; in virtio_net_hdr_to_skb() 195 shinfo->gso_segs = 0; in virtio_net_hdr_to_skb()
|
D | skbuff_ref.h | 68 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_frag_unref() local 71 __skb_frag_unref(&shinfo->frags[f], skb->pp_recycle); in skb_frag_unref()
|
D | skbuff.h | 1239 struct skb_shared_info *shinfo) in skb_data_unref() argument 1248 if (atomic_read(&shinfo->dataref) == bias) in skb_data_unref() 1250 else if (atomic_sub_return(bias, &shinfo->dataref)) in skb_data_unref() 2506 static inline void __skb_fill_netmem_desc_noacc(struct skb_shared_info *shinfo, in __skb_fill_netmem_desc_noacc() argument 2510 skb_frag_t *frag = &shinfo->frags[i]; in __skb_fill_netmem_desc_noacc() 2515 static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo, in __skb_fill_page_desc_noacc() argument 2519 __skb_fill_netmem_desc_noacc(shinfo, i, page_to_netmem(page), off, in __skb_fill_page_desc_noacc() 2619 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_fill_page_desc_noacc() local 2621 __skb_fill_page_desc_noacc(shinfo, i, page, off, size); in skb_fill_page_desc_noacc() 2622 shinfo->nr_frags = i + 1; in skb_fill_page_desc_noacc() [all …]
|
/linux-6.12.1/drivers/net/ethernet/fungible/funeth/ |
D | funeth_tx.c | 154 const struct skb_shared_info *shinfo; in write_pkt_desc() local 164 shinfo = skb_shinfo(skb); in write_pkt_desc() 165 if (unlikely(fun_map_pkt(q->dma_dev, shinfo, skb->data, in write_pkt_desc() 179 if (likely(shinfo->gso_size)) { in write_pkt_desc() 186 if (shinfo->gso_type & (SKB_GSO_UDP_TUNNEL | in write_pkt_desc() 190 if (shinfo->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) in write_pkt_desc() 213 shinfo->gso_size, in write_pkt_desc() 219 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { in write_pkt_desc() 232 shinfo->gso_size, in write_pkt_desc() 242 if (shinfo->gso_type & SKB_GSO_TCPV6) in write_pkt_desc() [all …]
|
/linux-6.12.1/drivers/net/ethernet/marvell/octeon_ep/ |
D | octep_tx.c | 37 struct skb_shared_info *shinfo; in octep_iq_process_completions() local 66 shinfo = skb_shinfo(skb); in octep_iq_process_completions() 67 frags = shinfo->nr_frags; in octep_iq_process_completions() 106 struct skb_shared_info *shinfo; in octep_iq_free_pending() local 127 shinfo = skb_shinfo(skb); in octep_iq_free_pending() 128 frags = shinfo->nr_frags; in octep_iq_free_pending()
|
D | octep_rx.c | 456 struct skb_shared_info *shinfo; in __octep_oq_process_rx() local 463 shinfo = skb_shinfo(skb); in __octep_oq_process_rx() 476 skb_add_rx_frag(skb, shinfo->nr_frags, in __octep_oq_process_rx()
|
/linux-6.12.1/drivers/net/ethernet/marvell/octeon_ep_vf/ |
D | octep_vf_tx.c | 38 struct skb_shared_info *shinfo; in octep_vf_iq_process_completions() local 67 shinfo = skb_shinfo(skb); in octep_vf_iq_process_completions() 68 frags = shinfo->nr_frags; in octep_vf_iq_process_completions() 105 struct skb_shared_info *shinfo; in octep_vf_iq_free_pending() local 126 shinfo = skb_shinfo(skb); in octep_vf_iq_free_pending() 127 frags = shinfo->nr_frags; in octep_vf_iq_free_pending()
|
D | octep_vf_rx.c | 408 struct skb_shared_info *shinfo; in __octep_vf_oq_process_rx() local 422 shinfo = skb_shinfo(skb); in __octep_vf_oq_process_rx() 437 skb_add_rx_frag(skb, shinfo->nr_frags, in __octep_vf_oq_process_rx()
|
D | octep_vf_main.c | 603 struct skb_shared_info *shinfo; in octep_vf_start_xmit() local 622 shinfo = skb_shinfo(skb); in octep_vf_start_xmit() 623 nr_frags = shinfo->nr_frags; in octep_vf_start_xmit() 666 frag = &shinfo->frags[0]; in octep_vf_start_xmit()
|
/linux-6.12.1/drivers/net/ethernet/google/gve/ |
D | gve_tx_dqo.c | 629 const struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_tx_add_skb_no_copy_dqo() local 656 /*eop=*/shinfo->nr_frags == 0, is_gso); in gve_tx_add_skb_no_copy_dqo() 659 for (i = 0; i < shinfo->nr_frags; i++) { in gve_tx_add_skb_no_copy_dqo() 660 const skb_frag_t *frag = &shinfo->frags[i]; in gve_tx_add_skb_no_copy_dqo() 661 bool is_eop = i == (shinfo->nr_frags - 1); in gve_tx_add_skb_no_copy_dqo() 840 const struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_num_buffer_descs_needed() local 846 for (i = 0; i < shinfo->nr_frags; i++) { in gve_num_buffer_descs_needed() 847 unsigned int frag_size = skb_frag_size(&shinfo->frags[i]); in gve_num_buffer_descs_needed() 865 const struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_can_send_tso() local 867 const int gso_size = shinfo->gso_size; in gve_can_send_tso() [all …]
|
D | gve_tx.c | 643 const struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_tx_add_skb_no_copy() local 676 num_descriptors = 1 + shinfo->nr_frags; in gve_tx_add_skb_no_copy() 705 for (i = 0; i < shinfo->nr_frags; i++) { in gve_tx_add_skb_no_copy() 706 const skb_frag_t *frag = &shinfo->frags[i]; in gve_tx_add_skb_no_copy() 728 i += num_descriptors - shinfo->nr_frags; in gve_tx_add_skb_no_copy()
|
/linux-6.12.1/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_xdp.c | 206 struct skb_shared_info *shinfo; in bnxt_xdp_buff_frags_free() local 211 shinfo = xdp_get_shared_info_from_buff(xdp); in bnxt_xdp_buff_frags_free() 212 for (i = 0; i < shinfo->nr_frags; i++) { in bnxt_xdp_buff_frags_free() 213 struct page *page = skb_frag_page(&shinfo->frags[i]); in bnxt_xdp_buff_frags_free() 217 shinfo->nr_frags = 0; in bnxt_xdp_buff_frags_free()
|
/linux-6.12.1/arch/x86/kvm/ |
D | xen.c | 76 struct shared_info *shinfo = gpc->khva; in kvm_xen_shared_info_init() local 78 wc_sec_hi = &shinfo->wc_sec_hi; in kvm_xen_shared_info_init() 79 wc = &shinfo->wc; in kvm_xen_shared_info_init() 83 struct compat_shared_info *shinfo = gpc->khva; in kvm_xen_shared_info_init() local 85 wc_sec_hi = &shinfo->arch.wc_sec_hi; in kvm_xen_shared_info_init() 86 wc = &shinfo->wc; in kvm_xen_shared_info_init() 1396 struct shared_info *shinfo = gpc->khva; in wait_pending_event() local 1397 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in wait_pending_event() 1399 struct compat_shared_info *shinfo = gpc->khva; in wait_pending_event() local 1400 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in wait_pending_event() [all …]
|
/linux-6.12.1/drivers/net/wwan/t7xx/ |
D | t7xx_hif_dpmaif_tx.c | 247 struct skb_shared_info *shinfo; in t7xx_dpmaif_add_skb_to_ring() local 261 shinfo = skb_shinfo(skb); in t7xx_dpmaif_add_skb_to_ring() 262 if (shinfo->frag_list) in t7xx_dpmaif_add_skb_to_ring() 265 payload_cnt = shinfo->nr_frags + 1; in t7xx_dpmaif_add_skb_to_ring() 290 skb_frag_t *frag = shinfo->frags + wr_cnt - 1; in t7xx_dpmaif_add_skb_to_ring()
|
/linux-6.12.1/net/ipv4/ |
D | tcp_offload.c | 391 struct skb_shared_info *shinfo; in tcp_gro_complete() local 400 shinfo = skb_shinfo(skb); in tcp_gro_complete() 401 shinfo->gso_segs = NAPI_GRO_CB(skb)->count; in tcp_gro_complete() 404 shinfo->gso_type |= SKB_GSO_TCP_ECN; in tcp_gro_complete()
|
D | tcp_output.c | 1560 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_fragment_tstamp() local 1563 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { in tcp_fragment_tstamp() 1565 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; in tcp_fragment_tstamp() 1567 shinfo->tx_flags &= ~tsflags; in tcp_fragment_tstamp() 1569 swap(shinfo->tskey, shinfo2->tskey); in tcp_fragment_tstamp() 1695 struct skb_shared_info *shinfo; in __pskb_trim_head() local 1701 shinfo = skb_shinfo(skb); in __pskb_trim_head() 1702 for (i = 0; i < shinfo->nr_frags; i++) { in __pskb_trim_head() 1703 int size = skb_frag_size(&shinfo->frags[i]); in __pskb_trim_head() 1709 shinfo->frags[k] = shinfo->frags[i]; in __pskb_trim_head() [all …]
|
/linux-6.12.1/drivers/net/ethernet/meta/fbnic/ |
D | fbnic_txrx.c | 623 struct skb_shared_info *shinfo; in fbnic_add_rx_frag() local 637 shinfo = xdp_get_shared_info_from_buff(&pkt->buff); in fbnic_add_rx_frag() 642 __skb_fill_page_desc_noacc(shinfo, pkt->nr_frags++, page, pg_off, len); in fbnic_add_rx_frag() 651 struct skb_shared_info *shinfo; in fbnic_put_pkt_buff() local 658 shinfo = xdp_get_shared_info_from_buff(&pkt->buff); in fbnic_put_pkt_buff() 662 page = skb_frag_page(&shinfo->frags[nr_frags]); in fbnic_put_pkt_buff() 674 struct skb_shared_info *shinfo; in fbnic_build_skb() local 696 shinfo = xdp_get_shared_info_from_buff(&pkt->buff); in fbnic_build_skb() 697 WARN_ON(skb_shinfo(skb) != shinfo); in fbnic_build_skb() 701 shinfo->nr_frags = nr_frags; in fbnic_build_skb()
|
/linux-6.12.1/drivers/net/ethernet/intel/idpf/ |
D | idpf_txrx.c | 2221 const struct skb_shared_info *shinfo; in idpf_tx_desc_count_required() local 2229 shinfo = skb_shinfo(skb); in idpf_tx_desc_count_required() 2230 for (i = 0; i < shinfo->nr_frags; i++) { in idpf_tx_desc_count_required() 2233 size = skb_frag_size(&shinfo->frags[i]); in idpf_tx_desc_count_required() 2522 const struct skb_shared_info *shinfo; in idpf_tso() local 2543 shinfo = skb_shinfo(skb); in idpf_tso() 2561 switch (shinfo->gso_type & ~SKB_GSO_DODGY) { in idpf_tso() 2573 l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr)); in idpf_tso() 2580 off->mss = shinfo->gso_size; in idpf_tso() 2581 off->tso_segs = shinfo->gso_segs; in idpf_tso() [all …]
|