/linux-6.12.1/arch/arm64/include/asm/ |
D | memory.h | 43 #define VA_BITS (CONFIG_ARM64_VA_BITS) macro 45 #define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) 56 #if VA_BITS > 48 63 #define VA_BITS_MIN (VA_BITS) 231 #if VA_BITS > 48 235 #define vabits_actual ((u64)VA_BITS)
|
D | processor.h | 55 #define TASK_SIZE_MAX (UL(1) << VA_BITS)
|
D | pgtable-hwdef.h | 79 #define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
|
D | pgtable.h | 24 #if VA_BITS == VA_BITS_MIN 886 return vabits_actual == VA_BITS; in pgtable_l4_enabled() 1010 return vabits_actual == VA_BITS; in pgtable_l5_enabled()
|
/linux-6.12.1/arch/sparc/include/asm/ |
D | processor_64.h | 23 #define VA_BITS 44 macro 25 #define VPTE_SIZE (1UL << (VA_BITS - PAGE_SHIFT + 3)) 27 #define VPTE_SIZE (1 << (VA_BITS - PAGE_SHIFT + 3))
|
/linux-6.12.1/arch/powerpc/include/asm/book3s/64/ |
D | mmu-hash.h | 578 #define VA_BITS 68 macro 580 #define ESID_BITS (VA_BITS - (SID_SHIFT + CONTEXT_BITS)) 581 #define ESID_BITS_1T (VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS)) 651 #define VSID_BITS_256M (VA_BITS - SID_SHIFT) 659 #define VSID_BITS_1T (VA_BITS - SID_SHIFT_1T) 781 unsigned long va_bits = VA_BITS; in get_vsid()
|
/linux-6.12.1/arch/loongarch/include/asm/ |
D | processor.h | 33 #define TASK_SIZE64 (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits))
|
D | kasan.h | 19 #define XRANGE_SHADOW_SHIFT min(cpu_vabits, VA_BITS)
|
D | pgtable.h | 45 #define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3)) macro
|
/linux-6.12.1/scripts/gdb/linux/ |
D | mm.py | 48 self.VA_BITS = constants.LX_CONFIG_ARM64_VA_BITS 49 if self.VA_BITS > 48: 58 self.VA_BITS_MIN = self.VA_BITS 59 self.vabits_actual = self.VA_BITS 93 self.PAGE_OFFSET = self._PAGE_OFFSET(self.VA_BITS)
|
/linux-6.12.1/arch/riscv/include/asm/ |
D | pgtable.h | 74 #define VA_BITS (pgtable_l5_enabled ? \ macro 77 #define VA_BITS VA_BITS_SV32 macro 81 (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) 126 #define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS) 897 #define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE)
|
D | kasan.h | 28 #define KASAN_SHADOW_SIZE (UL(1) << ((VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT))
|
/linux-6.12.1/arch/riscv/kernel/ |
D | vmcore_info.c | 13 VMCOREINFO_NUMBER(VA_BITS); in arch_crash_save_vmcoreinfo()
|
/linux-6.12.1/arch/arm64/kernel/ |
D | vmcore_info.c | 22 VMCOREINFO_NUMBER(VA_BITS); in arch_crash_save_vmcoreinfo()
|
/linux-6.12.1/arch/arm64/mm/ |
D | kasan_init.c | 203 BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), SHADOW_ALIGN)); in kasan_early_init() 246 u64 vabits = IS_ENABLED(CONFIG_ARM64_64K_PAGES) ? VA_BITS in root_level_idx()
|
D | proc.S | 476 mov x9, #64 - VA_BITS
|
/linux-6.12.1/Documentation/arch/arm64/ |
D | memory.rst | 127 Most code in the kernel should not need to consider the VA_BITS, for 131 VA_BITS constant the *maximum* VA space size
|
/linux-6.12.1/arch/arm64/kernel/pi/ |
D | map_kernel.c | 210 int va_bits = VA_BITS; in early_map_kernel()
|
/linux-6.12.1/arch/csky/ |
D | Kconfig | 189 # VA_BITS - PAGE_SHIFT - 3
|
/linux-6.12.1/Documentation/admin-guide/kdump/ |
D | vmcoreinfo.rst | 419 VA_BITS section in ARM64 566 VA_BITS section in RISCV64
|
/linux-6.12.1/arch/powerpc/platforms/pseries/ |
D | lpar.c | 1933 unsigned long va_bits = VA_BITS; in vsid_unscramble()
|
/linux-6.12.1/arch/arm64/kvm/ |
D | mmu.c | 623 if ((base ^ io_map_base) & BIT(VA_BITS - 1)) in __hyp_alloc_private_va_range()
|
/linux-6.12.1/arch/riscv/ |
D | Kconfig | 255 # VA_BITS - PAGE_SHIFT - 3
|
/linux-6.12.1/drivers/iommu/arm/arm-smmu-v3/ |
D | arm-smmu-v3.c | 2430 pgtbl_cfg.ias = min_t(unsigned long, ias, VA_BITS); in arm_smmu_domain_finalise()
|
/linux-6.12.1/arch/arm64/ |
D | Kconfig | 319 # VA_BITS - PAGE_SHIFT - 3
|