Home
last modified time | relevance | path

Searched refs:chunk (Results 1 – 25 of 352) sorted by relevance

12345678910>>...15

/linux-6.12.1/net/sctp/
Dinqueue.c42 static inline void sctp_inq_chunk_free(struct sctp_chunk *chunk) in sctp_inq_chunk_free() argument
44 if (chunk->head_skb) in sctp_inq_chunk_free()
45 chunk->skb = chunk->head_skb; in sctp_inq_chunk_free()
46 sctp_chunk_free(chunk); in sctp_inq_chunk_free()
52 struct sctp_chunk *chunk, *tmp; in sctp_inq_free() local
55 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { in sctp_inq_free()
56 list_del_init(&chunk->list); in sctp_inq_free()
57 sctp_chunk_free(chunk); in sctp_inq_free()
72 void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) in sctp_inq_push() argument
75 if (chunk->rcvr->dead) { in sctp_inq_push()
[all …]
Dchunk.c60 struct sctp_chunk *chunk; in sctp_datamsg_free() local
65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free()
66 sctp_chunk_free(chunk); in sctp_datamsg_free()
76 struct sctp_chunk *chunk; in sctp_datamsg_destroy() local
83 chunk = list_entry(pos, struct sctp_chunk, frag_list); in sctp_datamsg_destroy()
86 sctp_chunk_put(chunk); in sctp_datamsg_destroy()
90 asoc = chunk->asoc; in sctp_datamsg_destroy()
92 sent = chunk->has_tsn ? SCTP_DATA_SENT : SCTP_DATA_UNSENT; in sctp_datamsg_destroy()
96 ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent, in sctp_datamsg_destroy()
104 ev = sctp_ulpevent_make_send_failed_event(asoc, chunk, in sctp_datamsg_destroy()
[all …]
Doutput.c46 struct sctp_chunk *chunk);
48 struct sctp_chunk *chunk);
50 struct sctp_chunk *chunk);
52 struct sctp_chunk *chunk,
122 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); in sctp_packet_config() local
124 if (chunk) in sctp_packet_config()
125 sctp_packet_append_chunk(packet, chunk); in sctp_packet_config()
163 struct sctp_chunk *chunk, *tmp; in sctp_packet_free() local
167 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { in sctp_packet_free()
168 list_del_init(&chunk->list); in sctp_packet_free()
[all …]
Doutqueue.c210 struct sctp_chunk *chunk, *tmp; in __sctp_outq_teardown() local
216 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown()
219 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
220 sctp_chunk_free(chunk); in __sctp_outq_teardown()
227 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown()
229 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
230 sctp_chunk_free(chunk); in __sctp_outq_teardown()
236 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown()
238 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
239 sctp_chunk_free(chunk); in __sctp_outq_teardown()
[all …]
Dsm_statefuns.c55 struct sctp_chunk *chunk,
58 struct sctp_chunk *chunk,
63 const struct sctp_chunk *chunk);
67 const struct sctp_chunk *chunk,
98 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
150 struct sctp_chunk *chunk);
173 static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk, in sctp_chunk_length_valid() argument
176 __u16 chunk_length = ntohs(chunk->chunk_hdr->length); in sctp_chunk_length_valid()
179 if (unlikely(chunk->pdiscard)) in sctp_chunk_length_valid()
188 static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk) in sctp_err_chunk_valid() argument
[all …]
Dsm_make_chunk.c67 static void *sctp_addto_param(struct sctp_chunk *chunk, int len,
73 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; in sctp_control_release_owner() local
75 if (chunk->shkey) { in sctp_control_release_owner()
76 struct sctp_shared_key *shkey = chunk->shkey; in sctp_control_release_owner()
77 struct sctp_association *asoc = chunk->asoc; in sctp_control_release_owner()
93 sctp_auth_shkey_release(chunk->shkey); in sctp_control_release_owner()
97 static void sctp_control_set_owner_w(struct sctp_chunk *chunk) in sctp_control_set_owner_w() argument
99 struct sctp_association *asoc = chunk->asoc; in sctp_control_set_owner_w()
100 struct sk_buff *skb = chunk->skb; in sctp_control_set_owner_w()
109 if (chunk->auth) { in sctp_control_set_owner_w()
[all …]
/linux-6.12.1/net/sunrpc/xprtrdma/
Dsvc_rdma_pcl.c20 struct svc_rdma_chunk *chunk; in pcl_free() local
22 chunk = pcl_first_chunk(pcl); in pcl_free()
23 list_del(&chunk->ch_list); in pcl_free()
24 kfree(chunk); in pcl_free()
30 struct svc_rdma_chunk *chunk; in pcl_alloc_chunk() local
32 chunk = kmalloc(struct_size(chunk, ch_segments, segcount), GFP_KERNEL); in pcl_alloc_chunk()
33 if (!chunk) in pcl_alloc_chunk()
36 chunk->ch_position = position; in pcl_alloc_chunk()
37 chunk->ch_length = 0; in pcl_alloc_chunk()
38 chunk->ch_payload_length = 0; in pcl_alloc_chunk()
[all …]
/linux-6.12.1/mm/
Dpercpu-vm.c13 static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, in pcpu_chunk_page() argument
17 WARN_ON(chunk->immutable); in pcpu_chunk_page()
19 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); in pcpu_chunk_page()
54 static void pcpu_free_pages(struct pcpu_chunk *chunk, in pcpu_free_pages() argument
82 static int pcpu_alloc_pages(struct pcpu_chunk *chunk, in pcpu_alloc_pages() argument
127 static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, in pcpu_pre_unmap_flush() argument
131 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), in pcpu_pre_unmap_flush()
132 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); in pcpu_pre_unmap_flush()
153 static void pcpu_unmap_pages(struct pcpu_chunk *chunk, in pcpu_unmap_pages() argument
163 page = pcpu_chunk_page(chunk, cpu, i); in pcpu_unmap_pages()
[all …]
Dpercpu.c215 static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) in pcpu_addr_in_chunk() argument
219 if (!chunk) in pcpu_addr_in_chunk()
222 start_addr = chunk->base_addr + chunk->start_offset; in pcpu_addr_in_chunk()
223 end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - in pcpu_addr_in_chunk()
224 chunk->end_offset; in pcpu_addr_in_chunk()
242 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) in pcpu_chunk_slot() argument
244 const struct pcpu_block_md *chunk_md = &chunk->chunk_md; in pcpu_chunk_slot()
246 if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || in pcpu_chunk_slot()
275 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, in pcpu_chunk_addr() argument
278 return (unsigned long)chunk->base_addr + in pcpu_chunk_addr()
[all …]
Dpercpu-km.c35 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, in pcpu_post_unmap_tlb_flush() argument
41 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, in pcpu_populate_chunk() argument
47 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, in pcpu_depopulate_chunk() argument
56 struct pcpu_chunk *chunk; in pcpu_create_chunk() local
61 chunk = pcpu_alloc_chunk(gfp); in pcpu_create_chunk()
62 if (!chunk) in pcpu_create_chunk()
67 pcpu_free_chunk(chunk); in pcpu_create_chunk()
72 pcpu_set_page_chunk(nth_page(pages, i), chunk); in pcpu_create_chunk()
74 chunk->data = pages; in pcpu_create_chunk()
75 chunk->base_addr = page_address(pages); in pcpu_create_chunk()
[all …]
Dpercpu-stats.c35 struct pcpu_chunk *chunk; in find_max_nr_alloc() local
40 list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) in find_max_nr_alloc()
41 max_nr_alloc = max(max_nr_alloc, chunk->nr_alloc); in find_max_nr_alloc()
52 static void chunk_map_stats(struct seq_file *m, struct pcpu_chunk *chunk, in chunk_map_stats() argument
55 struct pcpu_block_md *chunk_md = &chunk->chunk_md; in chunk_map_stats()
69 last_alloc = find_last_bit(chunk->alloc_map, in chunk_map_stats()
70 pcpu_chunk_map_bits(chunk) - in chunk_map_stats()
71 chunk->end_offset / PCPU_MIN_ALLOC_SIZE - 1); in chunk_map_stats()
72 last_alloc = test_bit(last_alloc, chunk->alloc_map) ? in chunk_map_stats()
76 start = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; in chunk_map_stats()
[all …]
/linux-6.12.1/drivers/s390/cio/
Ditcw.c183 void *chunk; in itcw_init() local
195 chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); in itcw_init()
196 if (IS_ERR(chunk)) in itcw_init()
197 return chunk; in itcw_init()
198 itcw = chunk; in itcw_init()
211 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); in itcw_init()
212 if (IS_ERR(chunk)) in itcw_init()
213 return chunk; in itcw_init()
214 itcw->tcw = chunk; in itcw_init()
219 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); in itcw_init()
[all …]
/linux-6.12.1/kernel/trace/
Dpid_list.c14 union lower_chunk *chunk; in get_lower_chunk() local
21 chunk = pid_list->lower_list; in get_lower_chunk()
22 pid_list->lower_list = chunk->next; in get_lower_chunk()
25 chunk->next = NULL; in get_lower_chunk()
33 return chunk; in get_lower_chunk()
38 union upper_chunk *chunk; in get_upper_chunk() local
45 chunk = pid_list->upper_list; in get_upper_chunk()
46 pid_list->upper_list = chunk->next; in get_upper_chunk()
49 chunk->next = NULL; in get_upper_chunk()
57 return chunk; in get_upper_chunk()
[all …]
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx4/
Dicm.c55 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) in mlx4_free_icm_pages() argument
59 if (chunk->nsg > 0) in mlx4_free_icm_pages()
60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, in mlx4_free_icm_pages()
63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages()
64 __free_pages(sg_page(&chunk->sg[i]), in mlx4_free_icm_pages()
65 get_order(chunk->sg[i].length)); in mlx4_free_icm_pages()
68 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) in mlx4_free_icm_coherent() argument
72 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent()
74 chunk->buf[i].size, in mlx4_free_icm_coherent()
75 chunk->buf[i].addr, in mlx4_free_icm_coherent()
[all …]
Dicm.h74 struct mlx4_icm_chunk *chunk; member
100 iter->chunk = list_empty(&icm->chunk_list) ? in mlx4_icm_first()
108 return !iter->chunk; in mlx4_icm_last()
113 if (++iter->page_idx >= iter->chunk->nsg) { in mlx4_icm_next()
114 if (iter->chunk->list.next == &iter->icm->chunk_list) { in mlx4_icm_next()
115 iter->chunk = NULL; in mlx4_icm_next()
119 iter->chunk = list_entry(iter->chunk->list.next, in mlx4_icm_next()
127 if (iter->chunk->coherent) in mlx4_icm_addr()
128 return iter->chunk->buf[iter->page_idx].dma_addr; in mlx4_icm_addr()
130 return sg_dma_address(&iter->chunk->sg[iter->page_idx]); in mlx4_icm_addr()
[all …]
/linux-6.12.1/kernel/
Daudit_tree.c42 struct audit_chunk *chunk; member
128 static void free_chunk(struct audit_chunk *chunk) in free_chunk() argument
132 for (i = 0; i < chunk->count; i++) { in free_chunk()
133 if (chunk->owners[i].owner) in free_chunk()
134 put_tree(chunk->owners[i].owner); in free_chunk()
136 kfree(chunk); in free_chunk()
139 void audit_put_chunk(struct audit_chunk *chunk) in audit_put_chunk() argument
141 if (atomic_long_dec_and_test(&chunk->refs)) in audit_put_chunk()
142 free_chunk(chunk); in audit_put_chunk()
147 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); in __put_chunk() local
[all …]
/linux-6.12.1/lib/
Dgenalloc.c40 static inline size_t chunk_size(const struct gen_pool_chunk *chunk) in chunk_size() argument
42 return chunk->end_addr - chunk->start_addr + 1; in chunk_size()
187 struct gen_pool_chunk *chunk; in gen_pool_add_owner() local
192 chunk = vzalloc_node(nbytes, nid); in gen_pool_add_owner()
193 if (unlikely(chunk == NULL)) in gen_pool_add_owner()
196 chunk->phys_addr = phys; in gen_pool_add_owner()
197 chunk->start_addr = virt; in gen_pool_add_owner()
198 chunk->end_addr = virt + size - 1; in gen_pool_add_owner()
199 chunk->owner = owner; in gen_pool_add_owner()
200 atomic_long_set(&chunk->avail, size); in gen_pool_add_owner()
[all …]
/linux-6.12.1/drivers/gpu/drm/amd/amdgpu/
Damdgpu_ring_mux.c79 struct amdgpu_mux_chunk *chunk; in amdgpu_mux_resubmit_chunks() local
103 list_for_each_entry(chunk, &e->list, entry) { in amdgpu_mux_resubmit_chunks()
104 if (chunk->sync_seq > last_seq && chunk->sync_seq <= seq) { in amdgpu_mux_resubmit_chunks()
106 chunk->sync_seq, in amdgpu_mux_resubmit_chunks()
108 if (chunk->sync_seq == in amdgpu_mux_resubmit_chunks()
110 if (chunk->cntl_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
112 chunk->cntl_offset); in amdgpu_mux_resubmit_chunks()
113 if (chunk->ce_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
114 amdgpu_ring_patch_ce(e->ring, chunk->ce_offset); in amdgpu_mux_resubmit_chunks()
115 if (chunk->de_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
[all …]
/linux-6.12.1/drivers/gpu/drm/nouveau/
Dnouveau_dmem.c96 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in page_to_drm() local
98 return chunk->drm; in page_to_drm()
103 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in nouveau_dmem_page_addr() local
105 chunk->pagemap.range.start; in nouveau_dmem_page_addr()
107 return chunk->bo->offset + off; in nouveau_dmem_page_addr()
112 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in nouveau_dmem_page_free() local
113 struct nouveau_dmem *dmem = chunk->drm->dmem; in nouveau_dmem_page_free()
119 WARN_ON(!chunk->callocated); in nouveau_dmem_page_free()
120 chunk->callocated--; in nouveau_dmem_page_free()
229 struct nouveau_dmem_chunk *chunk; in nouveau_dmem_chunk_alloc() local
[all …]
/linux-6.12.1/drivers/infiniband/hw/irdma/
Dpble.c18 struct irdma_chunk *chunk; in irdma_destroy_pble_prm() local
22 chunk = (struct irdma_chunk *) pinfo->clist.next; in irdma_destroy_pble_prm()
23 list_del(&chunk->list); in irdma_destroy_pble_prm()
24 if (chunk->type == PBLE_SD_PAGED) in irdma_destroy_pble_prm()
25 irdma_pble_free_paged_mem(chunk); in irdma_destroy_pble_prm()
26 bitmap_free(chunk->bitmapbuf); in irdma_destroy_pble_prm()
27 kfree(chunk->chunkmem.va); in irdma_destroy_pble_prm()
90 struct irdma_chunk *chunk = info->chunk; in add_sd_direct() local
103 chunk->type = PBLE_SD_CONTIGOUS; in add_sd_direct()
107 chunk->size = info->pages << HMC_PAGED_BP_SHIFT; in add_sd_direct()
[all …]
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/steering/
Ddr_icm_pool.c80 u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk) in mlx5dr_icm_pool_get_chunk_mr_addr() argument
82 u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_mr_addr()
84 return (u64)offset * chunk->seg; in mlx5dr_icm_pool_get_chunk_mr_addr()
87 u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk) in mlx5dr_icm_pool_get_chunk_rkey() argument
89 return chunk->buddy_mem->icm_mr->mkey; in mlx5dr_icm_pool_get_chunk_rkey()
92 u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk) in mlx5dr_icm_pool_get_chunk_icm_addr() argument
94 u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_icm_addr()
96 return (u64)chunk->buddy_mem->icm_mr->icm_start_addr + size * chunk->seg; in mlx5dr_icm_pool_get_chunk_icm_addr()
99 u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk) in mlx5dr_icm_pool_get_chunk_byte_size() argument
101 return mlx5dr_icm_pool_chunk_size_to_byte(chunk->size, in mlx5dr_icm_pool_get_chunk_byte_size()
[all …]
/linux-6.12.1/drivers/infiniband/hw/mthca/
Dmthca_memfree.c64 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) in mthca_free_icm_pages() argument
68 if (chunk->nsg > 0) in mthca_free_icm_pages()
69 dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages, in mthca_free_icm_pages()
72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages()
73 __free_pages(sg_page(&chunk->mem[i]), in mthca_free_icm_pages()
74 get_order(chunk->mem[i].length)); in mthca_free_icm_pages()
77 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) in mthca_free_icm_coherent() argument
81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent()
82 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, in mthca_free_icm_coherent()
83 lowmem_page_address(sg_page(&chunk->mem[i])), in mthca_free_icm_coherent()
[all …]
/linux-6.12.1/include/net/sctp/
Dsm.h174 const struct sctp_chunk *chunk,
177 const struct sctp_chunk *chunk);
179 const struct sctp_chunk *chunk);
182 const struct sctp_chunk *chunk);
195 const struct sctp_chunk *chunk);
197 const struct sctp_chunk *chunk);
200 const struct sctp_chunk *chunk);
201 int sctp_init_cause(struct sctp_chunk *chunk, __be16 cause, size_t paylen);
203 const struct sctp_chunk *chunk,
206 const struct sctp_chunk *chunk,
[all …]
/linux-6.12.1/drivers/gpu/drm/panthor/
Dpanthor_heap.c123 struct panthor_heap_chunk *chunk) in panthor_free_heap_chunk() argument
126 list_del(&chunk->node); in panthor_free_heap_chunk()
130 panthor_kernel_bo_destroy(chunk->bo); in panthor_free_heap_chunk()
131 kfree(chunk); in panthor_free_heap_chunk()
139 struct panthor_heap_chunk *chunk; in panthor_alloc_heap_chunk() local
143 chunk = kmalloc(sizeof(*chunk), GFP_KERNEL); in panthor_alloc_heap_chunk()
144 if (!chunk) in panthor_alloc_heap_chunk()
147 chunk->bo = panthor_kernel_bo_create(ptdev, vm, heap->chunk_size, in panthor_alloc_heap_chunk()
151 if (IS_ERR(chunk->bo)) { in panthor_alloc_heap_chunk()
152 ret = PTR_ERR(chunk->bo); in panthor_alloc_heap_chunk()
[all …]
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/
Dmlx5hws_pool.c128 struct mlx5hws_pool_chunk *chunk) in hws_pool_buddy_db_put_chunk() argument
132 buddy = pool->db.buddy_manager->buddies[chunk->resource_idx]; in hws_pool_buddy_db_put_chunk()
134 mlx5hws_err(pool->ctx, "No such buddy (%d)\n", chunk->resource_idx); in hws_pool_buddy_db_put_chunk()
138 mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order); in hws_pool_buddy_db_put_chunk()
225 struct mlx5hws_pool_chunk *chunk) in hws_pool_buddy_db_get_chunk() argument
230 ret = hws_pool_buddy_get_mem_chunk(pool, chunk->order, in hws_pool_buddy_db_get_chunk()
231 &chunk->resource_idx, in hws_pool_buddy_db_get_chunk()
232 &chunk->offset); in hws_pool_buddy_db_get_chunk()
235 chunk->order); in hws_pool_buddy_db_get_chunk()
408 struct mlx5hws_pool_chunk *chunk) in hws_pool_general_element_db_get_chunk() argument
[all …]

12345678910>>...15