/linux-6.12.1/arch/arm/mm/ |
D | nommu.c | 159 void *zero_page; in paging_init() local 165 zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE); in paging_init() 166 if (!zero_page) in paging_init() 172 empty_zero_page = virt_to_page(zero_page); in paging_init()
|
D | mmu.c | 1768 void *zero_page; in paging_init() local 1796 zero_page = early_alloc(PAGE_SIZE); in paging_init() 1800 empty_zero_page = virt_to_page(zero_page); in paging_init()
|
/linux-6.12.1/arch/arm64/kernel/ |
D | hibernate.c | 402 void *zero_page; in swsusp_arch_resume() local 427 zero_page = (void *)get_safe_page(GFP_ATOMIC); in swsusp_arch_resume() 428 if (!zero_page) { in swsusp_arch_resume() 464 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); in swsusp_arch_resume()
|
D | machine_kexec.c | 139 kimage->arch.zero_page = __pa_symbol(empty_zero_page); in machine_kexec_post_load()
|
D | asm-offsets.c | 194 DEFINE(KIMAGE_ARCH_ZERO_PAGE, offsetof(struct kimage, arch.zero_page)); in main()
|
/linux-6.12.1/fs/iomap/ |
D | direct-io.c | 35 static struct page *zero_page; variable 263 __bio_add_page(bio, zero_page, len, 0); in iomap_dio_zero() 777 zero_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, in iomap_dio_init() 780 if (!zero_page) in iomap_dio_init()
|
/linux-6.12.1/arch/arm64/include/asm/ |
D | kexec.h | 116 phys_addr_t zero_page; member
|
D | assembler.h | 465 .macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2 466 phys_to_ttbr \tmp, \zero_page
|
/linux-6.12.1/drivers/dma/ |
D | bcm2835-dma.c | 50 dma_addr_t zero_page; member 750 if (buf_addr == od->zero_page && !c->is_lite_channel) in bcm2835_dma_prep_dma_cyclic() 852 dma_unmap_page_attrs(od->ddev.dev, od->zero_page, PAGE_SIZE, in bcm2835_dma_free() 935 od->zero_page = dma_map_page_attrs(od->ddev.dev, ZERO_PAGE(0), 0, in bcm2835_dma_probe() 938 if (dma_mapping_error(od->ddev.dev, od->zero_page)) { in bcm2835_dma_probe()
|
/linux-6.12.1/arch/arm64/kvm/ |
D | hypercalls.c | 172 const void *zero_page = page_to_virt(ZERO_PAGE(0)); in kvm_smccc_set_filter() local 180 if (memcmp(filter.pad, zero_page, sizeof(filter.pad))) in kvm_smccc_set_filter()
|
D | sys_regs.c | 4631 const void *zero_page = page_to_virt(ZERO_PAGE(0)); in kvm_vm_ioctl_get_reg_writable_masks() local 4636 memcmp(range->reserved, zero_page, sizeof(range->reserved))) in kvm_vm_ioctl_get_reg_writable_masks()
|
/linux-6.12.1/include/target/ |
D | target_core_fabric.h | 227 u32 length, bool zero_page, bool chainable);
|
/linux-6.12.1/drivers/nvdimm/ |
D | pfn_devs.c | 368 void *zero_page = page_address(ZERO_PAGE(0)); in nd_pfn_clear_memmap_errors() local 406 rc = nvdimm_write_bytes(ndns, nsoff, zero_page, in nd_pfn_clear_memmap_errors()
|
D | btt.c | 512 void *zero_page = page_address(ZERO_PAGE(0)); in arena_clear_freelist_error() local 522 ret = arena_write_bytes(arena, nsoff, zero_page, in arena_clear_freelist_error()
|
/linux-6.12.1/arch/x86/virt/vmx/tdx/ |
D | tdx.c | 677 const void *zero_page = (const void *)page_address(ZERO_PAGE(0)); in reset_tdx_pages() local 682 movdir64b(__va(phys), zero_page); in reset_tdx_pages()
|
/linux-6.12.1/drivers/target/ |
D | target_core_transport.c | 2765 bool zero_page, bool chainable) in target_alloc_sgl() argument 2767 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0); in target_alloc_sgl()
|
/linux-6.12.1/virt/kvm/ |
D | kvm_main.c | 3569 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); in kvm_clear_guest() local 3576 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, seg); in kvm_clear_guest()
|
/linux-6.12.1/arch/x86/kvm/vmx/ |
D | vmx.c | 3873 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); in init_rmode_tss() local 3878 if (__copy_to_user(ua + PAGE_SIZE * i, zero_page, PAGE_SIZE)) in init_rmode_tss()
|