/linux-6.12.1/crypto/async_tx/ |
D | raid6test.c | 40 get_random_bytes(page_address(data[i]), PAGE_SIZE); in makedata() 131 memset(page_address(recovi), 0xf0, PAGE_SIZE); in test_disks() 132 memset(page_address(recovj), 0xba, PAGE_SIZE); in test_disks() 139 erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE); in test_disks() 140 errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE); in test_disks() 167 memset(page_address(data[disks-2]), 0xee, PAGE_SIZE); in test() 168 memset(page_address(data[disks-1]), 0xee, PAGE_SIZE); in test()
|
D | async_raid6_recov.c | 73 a = page_address(srcs[0]) + src_offs[0]; in async_sum_product() 74 b = page_address(srcs[1]) + src_offs[1]; in async_sum_product() 75 c = page_address(dest) + d_off; in async_sum_product() 143 d = page_address(dest) + d_off; in async_mult() 144 s = page_address(src) + s_off; in async_mult() 419 ptrs[i] = page_address(blocks[i]) + offs[i]; in async_raid6_2data_recov() 502 ptrs[i] = page_address(blocks[i]) + offs[i]; in async_raid6_datap_recov()
|
D | async_pq.c | 124 srcs[i] = page_address(blocks[i]) + offsets[i]; in do_sync_gen_syndrome() 406 p = page_address(p_src) + p_off; in async_syndrome_val() 407 s = page_address(spare) + s_off; in async_syndrome_val() 419 q = page_address(q_src) + q_off; in async_syndrome_val() 420 s = page_address(spare) + s_off; in async_syndrome_val()
|
/linux-6.12.1/include/linux/ |
D | highmem-internal.h | 46 addr = page_address(page); in kmap() 165 return page_address(page); in kmap() 174 kunmap_flush_on_unmap(page_address(page)); in kunmap() 180 return page_address(page); in kmap_local_page() 185 return page_address(&folio->page) + offset; in kmap_local_folio() 212 return page_address(page); in kmap_atomic()
|
/linux-6.12.1/mm/kmsan/ |
D | shadow.c | 28 return page_address(shadow_page_for(page)); in shadow_ptr_for() 33 return page_address(origin_page_for(page)); in origin_ptr_for() 158 kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE, in kmsan_copy_page_meta() 184 __memset(page_address(shadow), 0, PAGE_SIZE * pages); in kmsan_alloc_page() 185 __memset(page_address(origin), 0, PAGE_SIZE * pages); in kmsan_alloc_page() 193 __memset(page_address(shadow), -1, PAGE_SIZE * pages); in kmsan_alloc_page() 202 ((depot_stack_handle_t *)page_address(origin))[i] = handle; in kmsan_alloc_page() 210 kmsan_internal_poison_memory(page_address(page), in kmsan_free_page()
|
/linux-6.12.1/arch/arm64/mm/ |
D | pageattr.c | 111 __change_memory_common((u64)page_address(area->pages[i]), in change_memory_common() 176 (unsigned long)page_address(page), in set_direct_map_invalid_noflush() 191 (unsigned long)page_address(page), in set_direct_map_default_noflush() 201 set_memory_valid((unsigned long)page_address(page), numpages, enable); in __kernel_map_pages() 220 unsigned long addr = (unsigned long)page_address(page); in kernel_page_present()
|
D | copypage.c | 19 void *kto = page_address(to); in copy_highpage() 20 void *kfrom = page_address(from); in copy_highpage()
|
D | mteswap.c | 34 mte_save_page_tags(page_address(page), tag_storage); in mte_save_tags() 58 mte_restore_page_tags(page_address(page), tags); in mte_restore_tags()
|
/linux-6.12.1/security/selinux/ |
D | status.c | 52 status = page_address(selinux_state.status_page); in selinux_kernel_status_page() 85 status = page_address(selinux_state.status_page); in selinux_status_update_setenforce() 110 status = page_address(selinux_state.status_page); in selinux_status_update_policyload()
|
/linux-6.12.1/arch/riscv/mm/ |
D | pageattr.c | 122 ptep_new = (pte_t *)page_address(pte_page); in __split_linear_mapping_pmd() 162 pmdp_new = (pmd_t *)page_address(pmd_page); in __split_linear_mapping_pud() 215 pudp_new = (pud_t *)page_address(pud_page); in __split_linear_mapping_p4d() 295 lm_start = (unsigned long)page_address(area->pages[i]); in __set_memory() 379 return __set_memory((unsigned long)page_address(page), 1, in set_direct_map_invalid_noflush() 385 return __set_memory((unsigned long)page_address(page), 1, in set_direct_map_default_noflush() 411 unsigned long start = (unsigned long)page_address(page); in __kernel_map_pages() 422 unsigned long addr = (unsigned long)page_address(page); in kernel_page_present()
|
/linux-6.12.1/arch/x86/kernel/ |
D | machine_kexec_32.c | 105 control_page = page_address(image->control_code_page); in machine_kexec_prepare_page_tables() 139 set_memory_x((unsigned long)page_address(image->control_code_page), 1); in machine_kexec_prepare() 153 set_memory_nx((unsigned long)page_address(image->control_code_page), 1); in machine_kexec_cleanup() 197 control_page = page_address(image->control_code_page); in machine_kexec()
|
D | espfix_64.c | 167 pmd_p = (pmd_t *)page_address(page); in init_espfix_ap() 179 pte_p = (pte_t *)page_address(page); in init_espfix_ap() 187 stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0)); in init_espfix_ap()
|
D | irq_32.c | 127 per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = page_address(ph); in irq_init_percpu_irqstack() 128 per_cpu(pcpu_hot.softirq_stack_ptr, cpu) = page_address(ps); in irq_init_percpu_irqstack()
|
/linux-6.12.1/net/ceph/ |
D | cls_lock_client.c | 56 p = page_address(lock_op_page); in ceph_cls_lock() 115 p = page_address(unlock_op_page); in ceph_cls_unlock() 168 p = page_address(break_op_page); in ceph_cls_break_lock() 217 p = page_address(cookie_op_page); in ceph_cls_set_cookie() 362 p = page_address(get_info_op_page); in ceph_cls_lock_info() 377 p = page_address(reply_page); in ceph_cls_lock_info() 415 p = page_address(pages[0]); in ceph_cls_assert_locked()
|
D | pagevec.c | 72 bad = copy_from_user(page_address(pages[i]) + po, data, l); in ceph_copy_user_to_page_vector() 98 memcpy(page_address(pages[i]) + po, data, l); in ceph_copy_to_page_vector() 121 memcpy(data, page_address(pages[i]) + po, l); in ceph_copy_from_page_vector()
|
/linux-6.12.1/drivers/gpu/drm/v3d/ |
D | v3d_mmu.c | 95 u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT; in v3d_mmu_insert_ptes() local 96 u32 pte = page_prot | page_address; in v3d_mmu_insert_ptes() 99 BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >= in v3d_mmu_insert_ptes()
|
/linux-6.12.1/mm/ |
D | highmem.c | 282 if (page_address(page)) in map_new_virtual() 283 return (unsigned long)page_address(page); in map_new_virtual() 316 vaddr = (unsigned long)page_address(page); in kmap_high() 342 vaddr = (unsigned long)page_address(page); in kmap_high_get() 369 vaddr = (unsigned long)page_address(page); in kunmap_high() 585 return page_address(page); in __kmap_local_page_prot() 753 void *page_address(const struct page *page) in page_address() function 779 EXPORT_SYMBOL(page_address);
|
/linux-6.12.1/drivers/mtd/devices/ |
D | block2mtd.c | 70 max = page_address(page) + PAGE_SIZE; in _block2mtd_erase() 71 for (p=page_address(page); p<max; p++) in _block2mtd_erase() 74 memset(page_address(page), 0xff, PAGE_SIZE); in _block2mtd_erase() 125 memcpy(buf, page_address(page) + offset, cpylen); in block2mtd_read() 159 if (memcmp(page_address(page)+offset, buf, cpylen)) { in _block2mtd_write() 161 memcpy(page_address(page) + offset, buf, cpylen); in _block2mtd_write()
|
/linux-6.12.1/mm/kasan/ |
D | common.c | 130 kasan_unpoison(set_tag(page_address(page), tag), in __kasan_unpoison_pages() 141 kasan_poison(page_address(page), PAGE_SIZE << order, in __kasan_poison_pages() 152 kasan_poison(page_address(page), page_size(page), in __kasan_poison_slab() 288 if (ptr != page_address(virt_to_head_page(ptr))) { in check_page_allocation() 482 ptr = page_address(page); in __kasan_mempool_poison_pages()
|
/linux-6.12.1/arch/loongarch/mm/ |
D | pageattr.c | 167 unsigned long addr = (unsigned long)page_address(page); in kernel_page_present() 202 unsigned long addr = (unsigned long)page_address(page); in set_direct_map_default_noflush() 212 unsigned long addr = (unsigned long)page_address(page); in set_direct_map_invalid_noflush()
|
/linux-6.12.1/arch/arm/mm/ |
D | copypage-v6.c | 80 discard_old_kernel_data(page_address(to)); in v6_copy_user_highpage_aliasing() 109 discard_old_kernel_data(page_address(page)); in v6_clear_user_highpage_aliasing()
|
/linux-6.12.1/drivers/iommu/ |
D | iommu-pages.h | 101 return page_address(page); in iommu_alloc_pages_node() 118 return page_address(page); in iommu_alloc_pages()
|
/linux-6.12.1/arch/powerpc/mm/ |
D | dma-noncoherent.c | 102 unsigned long start = (unsigned long)page_address(page) + offset; in __dma_sync_page() 121 unsigned long kaddr = (unsigned long)page_address(page); in arch_dma_prep_coherent()
|
/linux-6.12.1/kernel/power/ |
D | snapshot.c | 61 static inline int __must_check hibernate_restore_protect_page(void *page_address) in hibernate_restore_protect_page() argument 64 return set_memory_ro((unsigned long)page_address, 1); in hibernate_restore_protect_page() 68 static inline int hibernate_restore_unprotect_page(void *page_address) in hibernate_restore_unprotect_page() argument 71 return set_memory_rw((unsigned long)page_address, 1); in hibernate_restore_unprotect_page() 77 static inline int __must_check hibernate_restore_protect_page(void *page_address) {return 0; } in hibernate_restore_protect_page() argument 78 static inline int hibernate_restore_unprotect_page(void *page_address) {return 0; } in hibernate_restore_unprotect_page() argument 103 unsigned long addr = (unsigned long)page_address(page); in hibernate_unmap_page() 239 static void recycle_safe_page(void *page_address) in recycle_safe_page() argument 241 struct linked_page *lp = page_address; in recycle_safe_page() 1462 zeros_only = do_copy_page(dst, page_address(s_page)); in safe_copy_page() [all …]
|
/linux-6.12.1/arch/riscv/kernel/ |
D | machine_kexec.c | 63 control_code_buffer = page_address(image->control_code_page); in machine_kexec_prepare() 180 void *control_code_buffer = page_address(image->control_code_page); in machine_kexec()
|