Home
last modified time | relevance | path

Searched refs:PUD_SIZE (Results 1 – 25 of 70) sorted by relevance

123

/linux-6.12.1/arch/arm64/mm/ !
Dhugetlbpage.c56 case PUD_SIZE: in __hugetlb_valid_size()
109 case PUD_SIZE: in num_contig_ptes()
273 if (sz == PUD_SIZE) { in huge_pte_alloc()
314 if (sz != PUD_SIZE && pud_none(pud)) in huge_pte_offset()
344 case PUD_SIZE: in hugetlb_mask_last_page()
345 return PGDIR_SIZE - PUD_SIZE; in hugetlb_mask_last_page()
348 return PUD_SIZE - CONT_PMD_SIZE; in hugetlb_mask_last_page()
350 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page()
369 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { in arch_make_huge_pte()
/linux-6.12.1/arch/riscv/mm/ !
Dkasan_init.c97 if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) && in kasan_populate_pud()
98 (next - vaddr) >= PUD_SIZE) { in kasan_populate_pud()
99 phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE); in kasan_populate_pud()
102 memset(__va(phys_addr), KASAN_SHADOW_INIT, PUD_SIZE); in kasan_populate_pud()
181 if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) { in kasan_early_clear_pud()
252 if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) && in kasan_early_populate_pud()
253 (next - vaddr) >= PUD_SIZE) { in kasan_early_populate_pud()
Dhugetlbpage.c51 if (sz == PUD_SIZE) { in huge_pte_alloc()
104 if (sz == PUD_SIZE) in huge_pte_offset()
134 case PUD_SIZE: in hugetlb_mask_last_page()
135 return P4D_SIZE - PUD_SIZE; in hugetlb_mask_last_page()
138 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page()
236 else if (sz >= PUD_SIZE) in set_huge_pte_at()
406 else if (IS_ENABLED(CONFIG_64BIT) && size == PUD_SIZE) in __hugetlb_valid_size()
Dinit.c590 if (sz == PUD_SIZE) { in create_pud_mapping()
702 !(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE) in best_map_size()
703 return PUD_SIZE; in best_map_size()
823 PUD_SIZE, PAGE_TABLE); in set_satp_mode()
1086 nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE; in setup_vm()
1158 BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size); in setup_vm()
1177 (uintptr_t)fixmap_pmd, PUD_SIZE, PAGE_TABLE); in setup_vm()
1188 (uintptr_t)trampoline_pmd, PUD_SIZE, PAGE_TABLE); in setup_vm()
1697 free_vmemmap_storage(pud_page(pud), PUD_SIZE, altmap); in remove_pud_mapping()
Dtlbflush.c155 else if (stride_size >= PUD_SIZE) in flush_tlb_range()
156 stride_size = PUD_SIZE; in flush_tlb_range()
/linux-6.12.1/include/asm-generic/ !
Dpgtable-nopud.h20 #define PUD_SIZE (1UL << PUD_SHIFT) macro
21 #define PUD_MASK (~(PUD_SIZE-1))
/linux-6.12.1/arch/powerpc/include/asm/nohash/64/ !
Dpgtable-4k.h36 #define PUD_SIZE (1UL << PUD_SHIFT) macro
37 #define PUD_MASK (~(PUD_SIZE-1))
/linux-6.12.1/arch/x86/include/asm/ !
Dpgtable_64_types.h101 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) macro
102 #define PUD_MASK (~(PUD_SIZE - 1))
/linux-6.12.1/arch/s390/boot/ !
Dvmem.c135 IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { in kasan_pud_populate_zero_shadow()
276 IS_ALIGNED(addr, PUD_SIZE) && (size >= PUD_SIZE) && in can_large_pud()
277 IS_ALIGNED(_pa(addr, size, mode), PUD_SIZE); in can_large_pud()
/linux-6.12.1/arch/x86/mm/ !
Dkasan_init_64.c83 ((end - addr) == PUD_SIZE) && in kasan_populate_pud()
84 IS_ALIGNED(addr, PUD_SIZE)) { in kasan_populate_pud()
85 p = early_alloc(PUD_SIZE, nid, false); in kasan_populate_pud()
88 memblock_free(p, PUD_SIZE); in kasan_populate_pud()
Dinit.c365 unsigned long start = round_down(mr[i].start, PUD_SIZE); in adjust_range_page_size_mask()
366 unsigned long end = round_up(mr[i].end, PUD_SIZE); in adjust_range_page_size_mask()
437 end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range()
450 start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range()
451 end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range()
/linux-6.12.1/mm/kasan/ !
Dinit.c160 if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { in zero_pud_populate()
420 if (IS_ALIGNED(addr, PUD_SIZE) && in kasan_remove_pud_table()
421 IS_ALIGNED(next, PUD_SIZE)) { in kasan_remove_pud_table()
/linux-6.12.1/arch/arc/include/asm/ !
Dpgtable-levels.h75 #define PUD_SIZE BIT(PUD_SHIFT) macro
76 #define PUD_MASK (~(PUD_SIZE - 1))
/linux-6.12.1/arch/s390/mm/ !
Dhugetlbpage.c133 size = PUD_SIZE; in clear_huge_pte_skeys()
204 if (sz == PUD_SIZE) in huge_pte_alloc()
240 else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) in arch_hugetlb_valid_size()
Dvmem.c336 if (IS_ALIGNED(addr, PUD_SIZE) && in modify_pud_table()
337 IS_ALIGNED(next, PUD_SIZE)) { in modify_pud_table()
344 if (IS_ALIGNED(addr, PUD_SIZE) && in modify_pud_table()
345 IS_ALIGNED(next, PUD_SIZE) && in modify_pud_table()
/linux-6.12.1/drivers/dax/ !
Ddevice.c188 unsigned int fault_size = PUD_SIZE; in __dev_dax_pud_fault()
194 if (dev_dax->align > PUD_SIZE) { in __dev_dax_pud_fault()
207 (pud_addr + PUD_SIZE) > vmf->vma->vm_end) in __dev_dax_pud_fault()
211 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE); in __dev_dax_pud_fault()
Ddax-private.h104 if (align == PUD_SIZE && IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) in dax_align_valid()
/linux-6.12.1/arch/x86/mm/pat/ !
Dset_memory.c1303 if (start & (PUD_SIZE - 1)) { in unmap_pud_range()
1304 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; in unmap_pud_range()
1316 while (end - start >= PUD_SIZE) { in unmap_pud_range()
1321 unmap_pmd_range(pud, start, start + PUD_SIZE); in unmap_pud_range()
1323 start += PUD_SIZE; in unmap_pud_range()
1466 if (start & (PUD_SIZE - 1)) { in populate_pud()
1468 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; in populate_pud()
1501 while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) { in populate_pud()
1505 start += PUD_SIZE; in populate_pud()
1506 cpa->pfn += PUD_SIZE >> PAGE_SHIFT; in populate_pud()
[all …]
/linux-6.12.1/arch/mips/include/asm/ !
Dpgtable-64.h60 #define PUD_SIZE (1UL << PUD_SHIFT) macro
61 #define PUD_MASK (~(PUD_SIZE-1))
/linux-6.12.1/mm/ !
Dpage_table_check.c191 page_table_check_clear(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT); in __page_table_check_pud_clear()
259 page_table_check_set(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT, in __page_table_check_pud_set()
/linux-6.12.1/arch/arm64/kvm/ !
Dmmu.c95 n += DIV_ROUND_UP(range, PUD_SIZE); in kvm_mmu_split_nr_page_tables()
1375 if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) && in get_vma_page_shift()
1376 ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start && in get_vma_page_shift()
1377 ALIGN(hva, PUD_SIZE) <= vma->vm_end) in get_vma_page_shift()
1494 if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE)) in user_mem_abort()
1520 max_map_size = force_pte ? PAGE_SIZE : PUD_SIZE; in user_mem_abort()
1535 if (max_map_size >= PMD_SIZE && max_map_size < PUD_SIZE) in user_mem_abort()
1549 if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) { in user_mem_abort()
/linux-6.12.1/arch/powerpc/mm/book3s64/ !
Dradix_pgtable.c92 if (map_page_size == PUD_SIZE) { in early_map_kernel_page()
158 if (map_page_size == PUD_SIZE) { in __map_kernel_page()
325 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE && in create_physical_mapping()
327 mapping_size = PUD_SIZE; in create_physical_mapping()
887 if (!IS_ALIGNED(addr, PUD_SIZE) || in remove_pud_table()
888 !IS_ALIGNED(next, PUD_SIZE)) { in remove_pud_table()
1635 flush_tlb_kernel_range(addr, addr + PUD_SIZE); in pud_free_pmd_page()
Dradix_hugetlbpage.c38 if (end - start >= PUD_SIZE) in radix__flush_hugetlb_tlb_range()
/linux-6.12.1/arch/sparc/mm/ !
Dhugetlbpage.c291 if (sz >= PUD_SIZE) in huge_pte_alloc()
338 if (size >= PUD_SIZE) in __set_huge_pte_at()
381 if (size >= PUD_SIZE) in huge_ptep_get_and_clear()
/linux-6.12.1/arch/riscv/kvm/ !
Dmmu.c562 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); in kvm_age_gfn()
580 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); in kvm_test_age_gfn()
629 if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) in kvm_riscv_gstage_map()
643 if (vma_pagesize != PUD_SIZE && in kvm_riscv_gstage_map()

123