Home
last modified time | relevance | path

Searched refs:frag (Results 1 – 25 of 363) sorted by relevance

12345678910>>...15

/linux-6.12.1/fs/jffs2/
Dread.c161 struct jffs2_node_frag *frag; in jffs2_read_inode_range() local
167 frag = jffs2_lookup_node_frag(&f->fragtree, offset); in jffs2_read_inode_range()
177 if (unlikely(!frag || frag->ofs > offset || in jffs2_read_inode_range()
178 frag->ofs + frag->size <= offset)) { in jffs2_read_inode_range()
180 if (frag && frag->ofs > offset) { in jffs2_read_inode_range()
182 f->inocache->ino, frag->ofs, offset); in jffs2_read_inode_range()
183 holesize = min(holesize, frag->ofs - offset); in jffs2_read_inode_range()
191 } else if (unlikely(!frag->node)) { in jffs2_read_inode_range()
192 uint32_t holeend = min(end, frag->ofs + frag->size); in jffs2_read_inode_range()
194 offset, holeend, frag->ofs, in jffs2_read_inode_range()
[all …]
Dgc.c500 struct jffs2_node_frag *frag; in jffs2_garbage_collect_live() local
534 for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { in jffs2_garbage_collect_live()
535 if (frag->node && frag->node->raw == raw) { in jffs2_garbage_collect_live()
536 fn = frag->node; in jffs2_garbage_collect_live()
537 end = frag->ofs + frag->size; in jffs2_garbage_collect_live()
539 start = frag->ofs; in jffs2_garbage_collect_live()
540 if (nrfrags == frag->node->frags) in jffs2_garbage_collect_live()
549 frag->node->raw = f->inocache->nodes; in jffs2_garbage_collect_live()
1018 struct jffs2_node_frag *frag; in jffs2_garbage_collect_hole() local
1083 frag = frag_last(&f->fragtree); in jffs2_garbage_collect_hole()
[all …]
Dnodelist.c61 struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size); in jffs2_truncate_fragtree() local
66 if (frag && frag->ofs != size) { in jffs2_truncate_fragtree()
67 if (frag->ofs+frag->size > size) { in jffs2_truncate_fragtree()
68 frag->size = size - frag->ofs; in jffs2_truncate_fragtree()
70 frag = frag_next(frag); in jffs2_truncate_fragtree()
72 while (frag && frag->ofs >= size) { in jffs2_truncate_fragtree()
73 struct jffs2_node_frag *next = frag_next(frag); in jffs2_truncate_fragtree()
75 frag_erase(frag, list); in jffs2_truncate_fragtree()
76 jffs2_obsolete_node_frag(c, frag); in jffs2_truncate_fragtree()
77 frag = next; in jffs2_truncate_fragtree()
[all …]
/linux-6.12.1/fs/xfs/scrub/
Drefcount.c115 struct xchk_refcnt_frag *frag; in xchk_refcountbt_rmap_check() local
145 frag = kmalloc(sizeof(struct xchk_refcnt_frag), in xchk_refcountbt_rmap_check()
147 if (!frag) in xchk_refcountbt_rmap_check()
149 memcpy(&frag->rm, rec, sizeof(frag->rm)); in xchk_refcountbt_rmap_check()
150 list_add_tail(&frag->list, &refchk->fragments); in xchk_refcountbt_rmap_check()
168 struct xchk_refcnt_frag *frag; in xchk_refcountbt_process_rmap_fragments() local
193 list_for_each_entry(frag, &refchk->fragments, list) { in xchk_refcountbt_process_rmap_fragments()
194 if (frag->rm.rm_startblock < bno) in xchk_refcountbt_process_rmap_fragments()
196 bno = frag->rm.rm_startblock; in xchk_refcountbt_process_rmap_fragments()
204 list_for_each_entry_safe(frag, n, &refchk->fragments, list) { in xchk_refcountbt_process_rmap_fragments()
[all …]
/linux-6.12.1/net/ieee802154/6lowpan/
Dtx.c105 struct sk_buff *frag; in lowpan_alloc_frag() local
108 frag = alloc_skb(wdev->needed_headroom + wdev->needed_tailroom + size, in lowpan_alloc_frag()
111 if (likely(frag)) { in lowpan_alloc_frag()
112 frag->dev = wdev; in lowpan_alloc_frag()
113 frag->priority = skb->priority; in lowpan_alloc_frag()
114 skb_reserve(frag, wdev->needed_headroom); in lowpan_alloc_frag()
115 skb_reset_network_header(frag); in lowpan_alloc_frag()
116 *mac_cb(frag) = *mac_cb(skb); in lowpan_alloc_frag()
119 skb_put_data(frag, skb_mac_header(skb), skb->mac_len); in lowpan_alloc_frag()
121 rc = wpan_dev_hard_header(frag, wdev, in lowpan_alloc_frag()
[all …]
/linux-6.12.1/fs/configfs/
Dfile.c58 struct configfs_fragment *frag = to_frag(file); in fill_read_buffer() local
66 down_read(&frag->frag_sem); in fill_read_buffer()
67 if (!frag->frag_dead) in fill_read_buffer()
69 up_read(&frag->frag_sem); in fill_read_buffer()
109 struct configfs_fragment *frag = to_frag(file); in configfs_bin_read_iter() local
125 down_read(&frag->frag_sem); in configfs_bin_read_iter()
126 if (!frag->frag_dead) in configfs_bin_read_iter()
130 up_read(&frag->frag_sem); in configfs_bin_read_iter()
150 down_read(&frag->frag_sem); in configfs_bin_read_iter()
151 if (!frag->frag_dead) in configfs_bin_read_iter()
[all …]
Ddir.c172 void put_fragment(struct configfs_fragment *frag) in put_fragment() argument
174 if (frag && atomic_dec_and_test(&frag->frag_count)) in put_fragment()
175 kfree(frag); in put_fragment()
178 struct configfs_fragment *get_fragment(struct configfs_fragment *frag) in get_fragment() argument
180 if (likely(frag)) in get_fragment()
181 atomic_inc(&frag->frag_count); in get_fragment()
182 return frag; in get_fragment()
190 struct configfs_fragment *frag) in configfs_new_dirent() argument
209 sd->s_frag = get_fragment(frag); in configfs_new_dirent()
244 umode_t mode, int type, struct configfs_fragment *frag) in configfs_make_dirent() argument
[all …]
/linux-6.12.1/lib/kunit/
Dstring-stream.c19 struct string_stream_fragment *frag; in alloc_string_stream_fragment() local
21 frag = kzalloc(sizeof(*frag), gfp); in alloc_string_stream_fragment()
22 if (!frag) in alloc_string_stream_fragment()
25 frag->fragment = kmalloc(len, gfp); in alloc_string_stream_fragment()
26 if (!frag->fragment) { in alloc_string_stream_fragment()
27 kfree(frag); in alloc_string_stream_fragment()
31 return frag; in alloc_string_stream_fragment()
34 static void string_stream_fragment_destroy(struct string_stream_fragment *frag) in string_stream_fragment_destroy() argument
36 list_del(&frag->node); in string_stream_fragment_destroy()
37 kfree(frag->fragment); in string_stream_fragment_destroy()
[all …]
/linux-6.12.1/drivers/misc/mei/
Dvsc-fw-loader.c411 struct vsc_img_frag *frag; in vsc_identify_csi_image() local
428 frag = &fw_loader->frags[csi_image_map[i].frag_index]; in vsc_identify_csi_image()
430 frag->data = sign->image; in vsc_identify_csi_image()
431 frag->size = le32_to_cpu(sign->image_size); in vsc_identify_csi_image()
432 frag->location = le32_to_cpu(img->image_location[i]); in vsc_identify_csi_image()
433 frag->type = csi_image_map[i].image_type; in vsc_identify_csi_image()
491 struct vsc_img_frag *frag, *last_frag; in vsc_identify_ace_image() local
505 frag = &fw_loader->frags[frag_index]; in vsc_identify_ace_image()
507 frag->data = sign->image; in vsc_identify_ace_image()
508 frag->size = le32_to_cpu(sign->image_size); in vsc_identify_ace_image()
[all …]
/linux-6.12.1/net/rds/
Dib_recv.c160 struct rds_page_frag *frag; in rds_ib_recv_free_caches() local
179 list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) { in rds_ib_recv_free_caches()
180 list_del(&frag->f_cache_entry); in rds_ib_recv_free_caches()
181 WARN_ON(!list_empty(&frag->f_item)); in rds_ib_recv_free_caches()
182 kmem_cache_free(rds_ib_frag_slab, frag); in rds_ib_recv_free_caches()
194 struct rds_page_frag *frag) in rds_ib_frag_free() argument
196 rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg)); in rds_ib_frag_free()
198 rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags); in rds_ib_frag_free()
207 struct rds_page_frag *frag; in rds_ib_inc_free() local
214 list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) { in rds_ib_inc_free()
[all …]
/linux-6.12.1/fs/ceph/
Dinode.c262 struct ceph_inode_frag *frag; in __get_or_create_frag() local
268 frag = rb_entry(parent, struct ceph_inode_frag, node); in __get_or_create_frag()
269 c = ceph_frag_compare(f, frag->frag); in __get_or_create_frag()
275 return frag; in __get_or_create_frag()
278 frag = kmalloc(sizeof(*frag), GFP_NOFS); in __get_or_create_frag()
279 if (!frag) in __get_or_create_frag()
282 frag->frag = f; in __get_or_create_frag()
283 frag->split_by = 0; in __get_or_create_frag()
284 frag->mds = -1; in __get_or_create_frag()
285 frag->ndist = 0; in __get_or_create_frag()
[all …]
/linux-6.12.1/fs/adfs/
Dmap.c76 u32 frag; in lookup_zone() local
78 frag = GET_FRAG_ID(map, 8, idmask & 0x7fff); in lookup_zone()
79 freelink = frag ? 8 + frag : 0; in lookup_zone()
82 frag = GET_FRAG_ID(map, start, idmask); in lookup_zone()
89 freelink += frag & 0x7fff; in lookup_zone()
90 } else if (frag == frag_id) { in lookup_zone()
104 frag, start, fragend); in lookup_zone()
123 u32 frag; in scan_free_map() local
129 frag = GET_FRAG_ID(map, start, idmask); in scan_free_map()
135 if (frag == 0) in scan_free_map()
[all …]
/linux-6.12.1/drivers/net/ethernet/netronome/nfp/nfdk/
Ddp.c123 const skb_frag_t *frag, *fend; in nfp_nfdk_tx_maybe_close_block() local
132 frag = skb_shinfo(skb)->frags; in nfp_nfdk_tx_maybe_close_block()
133 fend = frag + nr_frags; in nfp_nfdk_tx_maybe_close_block()
134 for (; frag < fend; frag++) in nfp_nfdk_tx_maybe_close_block()
135 n_descs += DIV_ROUND_UP(skb_frag_size(frag), in nfp_nfdk_tx_maybe_close_block()
260 const skb_frag_t *frag, *fend; in nfp_nfdk_tx() local
354 frag = skb_shinfo(skb)->frags; in nfp_nfdk_tx()
355 fend = frag + nr_frags; in nfp_nfdk_tx()
370 if (frag >= fend) in nfp_nfdk_tx()
373 dma_len = skb_frag_size(frag); in nfp_nfdk_tx()
[all …]
Drings.c17 const skb_frag_t *frag, *fend; in nfp_nfdk_tx_ring_reset() local
41 frag = skb_shinfo(skb)->frags; in nfp_nfdk_tx_ring_reset()
42 fend = frag + nr_frags; in nfp_nfdk_tx_ring_reset()
43 for (; frag < fend; frag++) { in nfp_nfdk_tx_ring_reset()
44 size = skb_frag_size(frag); in nfp_nfdk_tx_ring_reset()
46 skb_frag_size(frag), DMA_TO_DEVICE); in nfp_nfdk_tx_ring_reset()
/linux-6.12.1/net/tipc/
Dmsg.c127 struct sk_buff *frag = *buf; in tipc_buf_append() local
134 if (!frag) in tipc_buf_append()
137 msg = buf_msg(frag); in tipc_buf_append()
139 frag->next = NULL; in tipc_buf_append()
140 skb_pull(frag, msg_hdr_sz(msg)); in tipc_buf_append()
145 if (skb_has_frag_list(frag) && __skb_linearize(frag)) in tipc_buf_append()
148 frag = skb_unshare(frag, GFP_ATOMIC); in tipc_buf_append()
149 if (unlikely(!frag)) in tipc_buf_append()
151 head = *headbuf = frag; in tipc_buf_append()
164 if (skb_try_coalesce(head, frag, &headstolen, &delta)) { in tipc_buf_append()
[all …]
/linux-6.12.1/drivers/net/ethernet/netronome/nfp/nfd3/
Ddp.c264 const skb_frag_t *frag; in nfp_nfd3_tx() local
347 frag = &skb_shinfo(skb)->frags[f]; in nfp_nfd3_tx()
348 fsize = skb_frag_size(frag); in nfp_nfd3_tx()
350 dma_addr = skb_frag_dma_map(dp->dev, frag, 0, in nfp_nfd3_tx()
389 frag = &skb_shinfo(skb)->frags[f]; in nfp_nfd3_tx()
391 skb_frag_size(frag), DMA_TO_DEVICE); in nfp_nfd3_tx()
442 const skb_frag_t *frag; in nfp_nfd3_tx_complete() local
467 frag = &skb_shinfo(skb)->frags[fidx]; in nfp_nfd3_tx_complete()
469 skb_frag_size(frag), DMA_TO_DEVICE); in nfp_nfd3_tx_complete()
554 void *frag; in nfp_nfd3_napi_alloc_one() local
[all …]
/linux-6.12.1/include/net/
Dxdp_sock_drv.h141 struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_add_frag() local
143 list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list); in xsk_buff_add_frag()
150 struct xdp_buff_xsk *frag; in xsk_buff_get_frag() local
152 frag = list_first_entry_or_null(&xskb->pool->xskb_list, in xsk_buff_get_frag()
154 if (frag) { in xsk_buff_get_frag()
155 list_del(&frag->xskb_list_node); in xsk_buff_get_frag()
156 ret = &frag->xdp; in xsk_buff_get_frag()
172 struct xdp_buff_xsk *frag; in xsk_buff_get_tail() local
174 frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk, in xsk_buff_get_tail()
176 return &frag->xdp; in xsk_buff_get_tail()
/linux-6.12.1/net/tls/
Dtls_strp.c54 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in tls_strp_skb_copy() local
57 skb_frag_address(frag), in tls_strp_skb_copy()
58 skb_frag_size(frag))); in tls_strp_skb_copy()
59 offset += skb_frag_size(frag); in tls_strp_skb_copy()
215 skb_frag_t *frag; in tls_strp_copyin_frag() local
218 frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE]; in tls_strp_copyin_frag()
224 chunk = min_t(size_t, len, PAGE_SIZE - skb_frag_size(frag)); in tls_strp_copyin_frag()
226 skb_frag_address(frag) + in tls_strp_copyin_frag()
227 skb_frag_size(frag), in tls_strp_copyin_frag()
232 skb_frag_size_add(frag, chunk); in tls_strp_copyin_frag()
[all …]
/linux-6.12.1/include/linux/
Dskbuff_ref.h18 static inline void __skb_frag_ref(skb_frag_t *frag) in __skb_frag_ref() argument
20 get_page(skb_frag_page(frag)); in __skb_frag_ref()
54 static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) in __skb_frag_unref() argument
56 skb_page_unref(skb_frag_netmem(frag), recycle); in __skb_frag_unref()
/linux-6.12.1/net/core/
Dtso.c54 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; in tso_build_data() local
57 tso->size = skb_frag_size(frag); in tso_build_data()
58 tso->data = skb_frag_address(frag); in tso_build_data()
80 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; in tso_start() local
83 tso->size = skb_frag_size(frag); in tso_start()
84 tso->data = skb_frag_address(frag); in tso_start()
/linux-6.12.1/drivers/infiniband/hw/hfi1/
Dipoib_rx.c26 void *frag; in prepare_frag_skb() local
30 frag = napi_alloc_frag(skb_size); in prepare_frag_skb()
32 if (unlikely(!frag)) in prepare_frag_skb()
35 skb = build_skb(frag, skb_size); in prepare_frag_skb()
38 skb_free_frag(frag); in prepare_frag_skb()
/linux-6.12.1/net/ipv6/
Dip6_output.c713 iter->frag = skb_shinfo(skb)->frag_list; in ip6_fraglist_init()
744 struct sk_buff *frag = iter->frag; in ip6_fraglist_prepare() local
748 frag->ip_summed = CHECKSUM_NONE; in ip6_fraglist_prepare()
749 skb_reset_transport_header(frag); in ip6_fraglist_prepare()
750 fh = __skb_push(frag, sizeof(struct frag_hdr)); in ip6_fraglist_prepare()
751 __skb_push(frag, hlen); in ip6_fraglist_prepare()
752 skb_reset_network_header(frag); in ip6_fraglist_prepare()
753 memcpy(skb_network_header(frag), iter->tmp_hdr, hlen); in ip6_fraglist_prepare()
758 if (frag->next) in ip6_fraglist_prepare()
761 ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); in ip6_fraglist_prepare()
[all …]
/linux-6.12.1/arch/powerpc/mm/book3s64/
Dmmu_context.c263 void *frag; in destroy_pagetable_cache() local
265 frag = mm->context.pte_frag; in destroy_pagetable_cache()
266 if (frag) in destroy_pagetable_cache()
267 pte_frag_destroy(frag); in destroy_pagetable_cache()
269 frag = mm->context.pmd_frag; in destroy_pagetable_cache()
270 if (frag) in destroy_pagetable_cache()
271 pmd_frag_destroy(frag); in destroy_pagetable_cache()
/linux-6.12.1/drivers/net/ethernet/netronome/nfp/
Dnfp_net_dp.c19 void *frag; in nfp_net_rx_alloc_one() local
22 frag = netdev_alloc_frag(dp->fl_bufsz); in nfp_net_rx_alloc_one()
27 frag = page ? page_address(page) : NULL; in nfp_net_rx_alloc_one()
29 if (!frag) { in nfp_net_rx_alloc_one()
34 *dma_addr = nfp_net_dma_map_rx(dp, frag); in nfp_net_rx_alloc_one()
36 nfp_net_free_frag(frag, dp->xdp_prog); in nfp_net_rx_alloc_one()
41 return frag; in nfp_net_rx_alloc_one()
145 if (!rx_ring->rxbufs[i].frag) in nfp_net_rx_ring_bufs_free()
149 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog); in nfp_net_rx_ring_bufs_free()
151 rx_ring->rxbufs[i].frag = NULL; in nfp_net_rx_ring_bufs_free()
[all …]
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
Drx.c182 struct mlx5e_wqe_frag_info *frag; in mlx5e_xsk_alloc_rx_wqes_batched() local
188 frag = &rq->wqe.frags[j]; in mlx5e_xsk_alloc_rx_wqes_batched()
190 addr = xsk_buff_xdp_get_frame_dma(*frag->xskp); in mlx5e_xsk_alloc_rx_wqes_batched()
192 frag->flags &= ~BIT(MLX5E_WQE_FRAG_SKIP_RELEASE); in mlx5e_xsk_alloc_rx_wqes_batched()
205 struct mlx5e_wqe_frag_info *frag; in mlx5e_xsk_alloc_rx_wqes() local
211 frag = &rq->wqe.frags[j]; in mlx5e_xsk_alloc_rx_wqes()
213 *frag->xskp = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_alloc_rx_wqes()
214 if (unlikely(!*frag->xskp)) in mlx5e_xsk_alloc_rx_wqes()
217 addr = xsk_buff_xdp_get_frame_dma(*frag->xskp); in mlx5e_xsk_alloc_rx_wqes()
219 frag->flags &= ~BIT(MLX5E_WQE_FRAG_SKIP_RELEASE); in mlx5e_xsk_alloc_rx_wqes()

12345678910>>...15