/linux-6.12.1/scripts/atomic/ |
D | gen-atomic-fallback.sh | 16 local order="$1"; shift 35 local order="$1"; shift 37 local tmpl_order=${order#_} 39 gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@" 49 local order="$1"; shift 51 local tmpl="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" 52 gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@" 62 local order="$1"; shift 66 local atomicname="${atomic}_${pfx}${name}${sfx}${order}" 69 local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" [all …]
|
/linux-6.12.1/arch/arm64/kvm/hyp/nvhe/ |
D | page_alloc.c | 35 unsigned short order) in __find_buddy_nocheck() argument 39 addr ^= (PAGE_SIZE << order); in __find_buddy_nocheck() 54 unsigned short order) in __find_buddy_avail() argument 56 struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); in __find_buddy_avail() 58 if (!buddy || buddy->order != order || buddy->refcount) in __find_buddy_avail() 97 unsigned short order = p->order; in __hyp_attach_page() local 100 memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order); in __hyp_attach_page() 112 p->order = HYP_NO_ORDER; in __hyp_attach_page() 113 for (; (order + 1) <= pool->max_order; order++) { in __hyp_attach_page() 114 buddy = __find_buddy_avail(pool, p, order); in __hyp_attach_page() [all …]
|
/linux-6.12.1/include/trace/events/ |
D | compaction.h | 168 int order, 172 TP_ARGS(order, gfp_mask, prio), 175 __field(int, order) 181 __entry->order = order; 187 __entry->order, 195 int order, 198 TP_ARGS(zone, order, ret), 203 __field(int, order) 210 __entry->order = order; 217 __entry->order, [all …]
|
D | vmscan.h | 68 TP_PROTO(int nid, int zid, int order), 70 TP_ARGS(nid, zid, order), 75 __field( int, order ) 81 __entry->order = order; 86 __entry->order) 91 TP_PROTO(int nid, int zid, int order, gfp_t gfp_flags), 93 TP_ARGS(nid, zid, order, gfp_flags), 98 __field( int, order ) 105 __entry->order = order; 111 __entry->order, [all …]
|
/linux-6.12.1/mm/ |
D | page_alloc.c | 214 static void __free_pages_ok(struct page *page, unsigned int order, 288 static bool page_contains_unaccepted(struct page *page, unsigned int order); 289 static bool cond_accept_memory(struct zone *zone, unsigned int order); 314 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument 316 return deferred_grow_zone(zone, order); in _deferred_grow_zone() 324 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument 509 static inline unsigned int order_to_pindex(int migratetype, int order) in order_to_pindex() argument 514 if (order > PAGE_ALLOC_COSTLY_ORDER) { in order_to_pindex() 515 VM_BUG_ON(order != HPAGE_PMD_ORDER); in order_to_pindex() 522 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); in order_to_pindex() [all …]
|
D | compaction.c | 51 static inline bool is_via_compact_memory(int order) in is_via_compact_memory() argument 53 return order == -1; in is_via_compact_memory() 59 static inline bool is_via_compact_memory(int order) { return false; } in is_via_compact_memory() argument 67 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument 68 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument 83 static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags) in mark_allocated_noprof() argument 85 post_alloc_hook(page, order, __GFP_MOVABLE); in mark_allocated_noprof() 92 int order; in release_free_list() local 95 for (order = 0; order < NR_PAGE_ORDERS; order++) { in release_free_list() 98 list_for_each_entry_safe(page, next, &freepages[order], lru) { in release_free_list() [all …]
|
/linux-6.12.1/drivers/iommu/ |
D | iommu-pages.h | 28 static inline void __iommu_alloc_account(struct page *page, int order) in __iommu_alloc_account() argument 30 const long pgcnt = 1l << order; in __iommu_alloc_account() 41 static inline void __iommu_free_account(struct page *page, int order) in __iommu_free_account() argument 43 const long pgcnt = 1l << order; in __iommu_free_account() 56 static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order) in __iommu_alloc_pages() argument 60 page = alloc_pages(gfp | __GFP_ZERO, order); in __iommu_alloc_pages() 64 __iommu_alloc_account(page, order); in __iommu_alloc_pages() 74 static inline void __iommu_free_pages(struct page *page, int order) in __iommu_free_pages() argument 79 __iommu_free_account(page, order); in __iommu_free_pages() 80 __free_pages(page, order); in __iommu_free_pages() [all …]
|
/linux-6.12.1/lib/ |
D | test_xarray.c | 72 unsigned order, void *entry, gfp_t gfp) in xa_store_order() argument 74 XA_STATE_ORDER(xas, xa, index, order); in xa_store_order() 177 unsigned int order; in check_xa_mark_1() local 207 for (order = 2; order < max_order; order++) { in check_xa_mark_1() 208 unsigned long base = round_down(index, 1UL << order); in check_xa_mark_1() 209 unsigned long next = base + (1UL << order); in check_xa_mark_1() 217 xa_store_order(xa, index, order, xa_mk_index(index), in check_xa_mark_1() 328 unsigned int order; in check_xa_shrink() local 353 for (order = 0; order < max_order; order++) { in check_xa_shrink() 354 unsigned long max = (1UL << order) - 1; in check_xa_shrink() [all …]
|
/linux-6.12.1/drivers/gpu/drm/ttm/ |
D | ttm_pool.c | 81 unsigned int order) in ttm_pool_alloc_page() argument 92 if (order) in ttm_pool_alloc_page() 97 p = alloc_pages_node(pool->nid, gfp_flags, order); in ttm_pool_alloc_page() 99 p->private = order; in ttm_pool_alloc_page() 107 if (order) in ttm_pool_alloc_page() 110 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, in ttm_pool_alloc_page() 123 dma->vaddr = (unsigned long)vaddr | order; in ttm_pool_alloc_page() 134 unsigned int order, struct page *p) in ttm_pool_free_page() argument 145 set_pages_wb(p, 1 << order); in ttm_pool_free_page() 149 __free_pages(p, order); in ttm_pool_free_page() [all …]
|
/linux-6.12.1/tools/testing/radix-tree/ |
D | multiorder.c | 16 unsigned order) in item_insert_order() argument 18 XA_STATE_ORDER(xas, xa, index, order); in item_insert_order() 19 struct item *item = item_create(index, order); in item_insert_order() 42 int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7}; in multiorder_iteration() local 47 err = item_insert_order(xa, index[i], order[i]); in multiorder_iteration() 53 if (j <= (index[i] | ((1 << order[i]) - 1))) in multiorder_iteration() 58 int height = order[i] / XA_CHUNK_SHIFT; in multiorder_iteration() 60 unsigned long mask = (1UL << order[i]) - 1; in multiorder_iteration() 66 assert(item->order == order[i]); in multiorder_iteration() 82 int order[MT_NUM_ENTRIES] = {1, 0, 2, 4, 3, 1, 3, 0, 7}; in multiorder_tagged_iteration() local [all …]
|
/linux-6.12.1/include/linux/ |
D | gfp.h | 199 static inline void arch_free_page(struct page *page, int order) { } in arch_free_page() argument 202 static inline void arch_alloc_page(struct page *page, int order) { } in arch_alloc_page() argument 205 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 209 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 264 __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node_noprof() argument 269 return __alloc_pages_noprof(gfp_mask, order, nid, NULL); in __alloc_pages_node_noprof() 275 struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid) in __folio_alloc_node_noprof() argument 280 return __folio_alloc_noprof(gfp, order, nid, NULL); in __folio_alloc_node_noprof() 291 unsigned int order) in alloc_pages_node_noprof() argument 296 return __alloc_pages_node_noprof(nid, gfp_mask, order); in alloc_pages_node_noprof() [all …]
|
D | compaction.h | 65 static inline unsigned long compact_gap(unsigned int order) in compact_gap() argument 80 return 2UL << order; in compact_gap() 85 extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order); 86 extern int fragmentation_index(struct zone *zone, unsigned int order); 88 unsigned int order, unsigned int alloc_flags, 92 extern bool compaction_suitable(struct zone *zone, int order, 95 extern void compaction_defer_reset(struct zone *zone, int order, 98 bool compaction_zonelist_suitable(struct alloc_context *ac, int order, 103 extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx); 110 static inline bool compaction_suitable(struct zone *zone, int order, in compaction_suitable() argument [all …]
|
/linux-6.12.1/drivers/gpu/drm/lib/ |
D | drm_random.c | 16 void drm_random_reorder(unsigned int *order, unsigned int count, in drm_random_reorder() argument 24 swap(order[i], order[j]); in drm_random_reorder() 31 unsigned int *order, i; in drm_random_order() local 33 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); in drm_random_order() 34 if (!order) in drm_random_order() 35 return order; in drm_random_order() 38 order[i] = i; in drm_random_order() 40 drm_random_reorder(order, count, state); in drm_random_order() 41 return order; in drm_random_order()
|
/linux-6.12.1/drivers/media/pci/cx18/ |
D | cx18-mailbox.c | 231 static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order) in epu_dma_done() argument 240 mb = &order->mb; in epu_dma_done() 247 (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ? in epu_dma_done() 253 mdl_ack = order->mdl_ack; in epu_dma_done() 277 if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) && in epu_dma_done() 324 static void epu_debug(struct cx18 *cx, struct cx18_in_work_order *order) in epu_debug() argument 327 char *str = order->str; in epu_debug() 329 CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str); in epu_debug() 335 static void epu_cmd(struct cx18 *cx, struct cx18_in_work_order *order) in epu_cmd() argument 337 switch (order->rpu) { in epu_cmd() [all …]
|
/linux-6.12.1/mm/kmsan/ |
D | init.c | 117 bool kmsan_memblock_free_pages(struct page *page, unsigned int order) in kmsan_memblock_free_pages() argument 121 if (!held_back[order].shadow) { in kmsan_memblock_free_pages() 122 held_back[order].shadow = page; in kmsan_memblock_free_pages() 125 if (!held_back[order].origin) { in kmsan_memblock_free_pages() 126 held_back[order].origin = page; in kmsan_memblock_free_pages() 129 shadow = held_back[order].shadow; in kmsan_memblock_free_pages() 130 origin = held_back[order].origin; in kmsan_memblock_free_pages() 131 kmsan_setup_meta(page, shadow, origin, order); in kmsan_memblock_free_pages() 133 held_back[order].shadow = NULL; in kmsan_memblock_free_pages() 134 held_back[order].origin = NULL; in kmsan_memblock_free_pages() [all …]
|
/linux-6.12.1/arch/riscv/kvm/ |
D | tlb.c | 22 unsigned long order) in kvm_riscv_local_hfence_gvma_vmid_gpa() argument 26 if (PTRS_PER_PTE < (gpsz >> order)) { in kvm_riscv_local_hfence_gvma_vmid_gpa() 33 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa() 38 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa() 50 unsigned long order) in kvm_riscv_local_hfence_gvma_gpa() argument 54 if (PTRS_PER_PTE < (gpsz >> order)) { in kvm_riscv_local_hfence_gvma_gpa() 61 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa() 66 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa() 81 unsigned long order) in kvm_riscv_local_hfence_vvma_asid_gva() argument 85 if (PTRS_PER_PTE < (gvsz >> order)) { in kvm_riscv_local_hfence_vvma_asid_gva() [all …]
|
/linux-6.12.1/Documentation/trace/postprocess/ |
D | trace-vmscan-postprocess.pl | 315 my $order = $1; 316 $perprocesspid{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN_PERORDER}[$order]++; 317 $perprocesspid{$process_pid}->{STATE_DIRECT_ORDER} = $order; 326 my $order = $perprocesspid{$process_pid}->{STATE_DIRECT_ORDER}; 328 $perprocesspid{$process_pid}->{HIGH_DIRECT_RECLAIM_LATENCY}[$index] = "$order-$latency"; 339 my $order = $2; 340 $perprocesspid{$process_pid}->{STATE_KSWAPD_ORDER} = $order; 345 $perprocesspid{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE_PERORDER}[$order]++; 348 $perprocesspid{$process_pid}->{HIGH_KSWAPD_REWAKEUP_PERORDER}[$order]++; 358 my $order = $perprocesspid{$process_pid}->{STATE_KSWAPD_ORDER}; [all …]
|
/linux-6.12.1/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ |
D | gk104.c | 34 const struct gk104_clkgate_engine_info *order = therm->clkgate_order; in gk104_clkgate_enable() local 38 for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { in gk104_clkgate_enable() 39 if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) in gk104_clkgate_enable() 42 nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500); in gk104_clkgate_enable() 50 for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { in gk104_clkgate_enable() 51 if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) in gk104_clkgate_enable() 54 nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045); in gk104_clkgate_enable() 63 const struct gk104_clkgate_engine_info *order = therm->clkgate_order; in gk104_clkgate_fini() local 67 for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { in gk104_clkgate_fini() 68 if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) in gk104_clkgate_fini() [all …]
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
D | dr_buddy.c | 75 unsigned int *order) in dr_buddy_find_free_seg() argument 99 *order = order_iter; in dr_buddy_find_free_seg() 120 unsigned int order, in mlx5dr_buddy_alloc_mem() argument 126 err = dr_buddy_find_free_seg(buddy, order, &seg, &order_iter); in mlx5dr_buddy_alloc_mem() 137 while (order_iter > order) { in mlx5dr_buddy_alloc_mem() 144 seg <<= order; in mlx5dr_buddy_alloc_mem() 151 unsigned int seg, unsigned int order) in mlx5dr_buddy_free_mem() argument 153 seg >>= order; in mlx5dr_buddy_free_mem() 158 while (test_bit(seg ^ 1, buddy->bitmap[order])) { in mlx5dr_buddy_free_mem() 159 bitmap_clear(buddy->bitmap[order], seg ^ 1, 1); in mlx5dr_buddy_free_mem() [all …]
|
/linux-6.12.1/arch/riscv/mm/ |
D | hugetlbpage.c | 35 unsigned long order; in huge_pte_alloc() local 68 for_each_napot_order(order) { in huge_pte_alloc() 69 if (napot_cont_size(order) == sz) { in huge_pte_alloc() 70 pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order)); in huge_pte_alloc() 88 unsigned long order; in huge_pte_offset() local 119 for_each_napot_order(order) { in huge_pte_offset() 120 if (napot_cont_size(order) == sz) { in huge_pte_offset() 121 pte = pte_offset_huge(pmd, addr & napot_cont_mask(order)); in huge_pte_offset() 186 unsigned long order; in arch_make_huge_pte() local 188 for_each_napot_order(order) { in arch_make_huge_pte() [all …]
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/ |
D | mlx5hws_buddy.c | 85 u32 *order) in hws_buddy_find_free_seg() argument 109 *order = order_iter; in hws_buddy_find_free_seg() 113 int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order) in mlx5hws_buddy_alloc_mem() argument 117 err = hws_buddy_find_free_seg(buddy, order, &seg, &order_iter); in mlx5hws_buddy_alloc_mem() 124 while (order_iter > order) { in mlx5hws_buddy_alloc_mem() 131 seg <<= order; in mlx5hws_buddy_alloc_mem() 136 void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order) in mlx5hws_buddy_free_mem() argument 138 seg >>= order; in mlx5hws_buddy_free_mem() 140 while (test_bit(seg ^ 1, buddy->bitmap[order])) { in mlx5hws_buddy_free_mem() 141 bitmap_clear(buddy->bitmap[order], seg ^ 1, 1); in mlx5hws_buddy_free_mem() [all …]
|
/linux-6.12.1/kernel/bpf/ |
D | cgroup_iter.c | 54 int order; member 77 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_start() 79 else if (p->order == BPF_CGROUP_ITER_DESCENDANTS_POST) in cgroup_iter_seq_start() 110 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_next() 112 else if (p->order == BPF_CGROUP_ITER_DESCENDANTS_POST) in cgroup_iter_seq_next() 114 else if (p->order == BPF_CGROUP_ITER_ANCESTORS_UP) in cgroup_iter_seq_next() 176 p->order = aux->cgroup.order; in BTF_ID_LIST_GLOBAL_SINGLE() 200 int order = linfo->cgroup.order; in bpf_iter_attach_cgroup() local 203 if (order != BPF_CGROUP_ITER_DESCENDANTS_PRE && in bpf_iter_attach_cgroup() 204 order != BPF_CGROUP_ITER_DESCENDANTS_POST && in bpf_iter_attach_cgroup() [all …]
|
/linux-6.12.1/drivers/gpu/drm/i915/selftests/ |
D | i915_syncmap.c | 274 unsigned int pass, order; in igt_syncmap_join_above() local 296 for (order = 0; order < 64; order += SHIFT) { in igt_syncmap_join_above() 297 u64 context = BIT_ULL(order); in igt_syncmap_join_above() 335 unsigned int step, order, idx; in igt_syncmap_join_below() local 345 for (order = 64 - SHIFT; order > 0; order -= SHIFT) { in igt_syncmap_join_below() 346 u64 context = step * BIT_ULL(order); in igt_syncmap_join_below() 354 context, order, step, sync->height, sync->prefix); in igt_syncmap_join_below() 362 for (order = SHIFT; order < 64; order += SHIFT) { in igt_syncmap_join_below() 363 u64 context = step * BIT_ULL(order); in igt_syncmap_join_below() 367 context, order, step); in igt_syncmap_join_below() [all …]
|
D | i915_random.c | 70 void i915_random_reorder(unsigned int *order, unsigned int count, in i915_random_reorder() argument 73 i915_prandom_shuffle(order, sizeof(*order), count, state); in i915_random_reorder() 78 unsigned int *order, i; in i915_random_order() local 80 order = kmalloc_array(count, sizeof(*order), in i915_random_order() 82 if (!order) in i915_random_order() 83 return order; in i915_random_order() 86 order[i] = i; in i915_random_order() 88 i915_random_reorder(order, count, state); in i915_random_order() 89 return order; in i915_random_order()
|
/linux-6.12.1/drivers/gpu/drm/ |
D | drm_buddy.c | 16 unsigned int order, in drm_block_alloc() argument 21 BUG_ON(order > DRM_BUDDY_MAX_ORDER); in drm_block_alloc() 28 block->header |= order; in drm_block_alloc() 125 unsigned int order; in __drm_buddy_free() local 158 order = drm_buddy_block_order(block); in __drm_buddy_free() 161 return order; in __drm_buddy_free() 169 unsigned int order; in __force_merge() local 213 order = __drm_buddy_free(mm, block, true); in __force_merge() 214 if (order >= min_order) in __force_merge() 284 unsigned int order; in drm_buddy_init() local [all …]
|