Home
last modified time | relevance | path

Searched refs:page_size (Results 1 – 25 of 431) sorted by relevance

12345678910>>...18

/linux-6.12.1/tools/testing/selftests/mm/
Dmseal_test.c205 unsigned long page_size = getpagesize(); in seal_support() local
207 ptr = mmap(NULL, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); in seal_support()
211 ret = sys_mseal(ptr, page_size); in seal_support()
233 unsigned long page_size = getpagesize(); in test_seal_addseal() local
234 unsigned long size = 4 * page_size; in test_seal_addseal()
249 unsigned long page_size = getpagesize(); in test_seal_unmapped_start() local
250 unsigned long size = 4 * page_size; in test_seal_unmapped_start()
256 ret = sys_munmap(ptr, 2 * page_size); in test_seal_unmapped_start()
267 ret = sys_mseal(ptr + 2 * page_size, 2 * page_size); in test_seal_unmapped_start()
277 unsigned long page_size = getpagesize(); in test_seal_unmapped_middle() local
[all …]
Dmremap_dontunmap.c19 unsigned long page_size; variable
46 void *source_mapping = mmap(NULL, num_pages * page_size, PROT_NONE, in kernel_support_for_mremap_dontunmap()
53 mremap(source_mapping, num_pages * page_size, num_pages * page_size, in kernel_support_for_mremap_dontunmap()
58 BUG_ON(munmap(dest_mapping, num_pages * page_size) == -1, in kernel_support_for_mremap_dontunmap()
62 BUG_ON(munmap(source_mapping, num_pages * page_size) == -1, in kernel_support_for_mremap_dontunmap()
71 BUG_ON(size & (page_size - 1), in check_region_contains_byte()
73 BUG_ON((unsigned long)addr & (page_size - 1), in check_region_contains_byte()
76 memset(page_buffer, byte, page_size); in check_region_contains_byte()
78 unsigned long num_pages = size / page_size; in check_region_contains_byte()
84 memcmp(addr + (i * page_size), page_buffer, page_size); in check_region_contains_byte()
[all …]
Dmlock2-tests.c179 unsigned long page_size = getpagesize(); in test_mlock_lock() local
181 map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE, in test_mlock_lock()
186 if (mlock2_(map, 2 * page_size, 0)) { in test_mlock_lock()
187 munmap(map, 2 * page_size); in test_mlock_lock()
194 if (munlock(map, 2 * page_size)) { in test_mlock_lock()
195 munmap(map, 2 * page_size); in test_mlock_lock()
200 munmap(map, 2 * page_size); in test_mlock_lock()
216 unsigned long page_size = getpagesize(); in unlock_onfault_check() local
219 is_vma_lock_on_fault((unsigned long)map + page_size)) { in unlock_onfault_check()
230 unsigned long page_size = getpagesize(); in test_mlock_onfault() local
[all …]
Dmap_fixed_noreplace.c44 unsigned long flags, addr, size, page_size; in main() local
50 page_size = sysconf(_SC_PAGE_SIZE); in main()
53 size = 5 * page_size; in main()
60 size = 5 * page_size; in main()
66 if (munmap((void *)addr, 5 * page_size) != 0) { in main()
73 addr = base_addr + page_size; in main()
74 size = 3 * page_size; in main()
92 size = 5 * page_size; in main()
110 addr = base_addr + (2 * page_size); in main()
111 size = page_size; in main()
[all …]
Dksm_tests.c348 long page_count, int timeout, size_t page_size) in check_ksm_merge() argument
359 map_ptr = allocate_memory(NULL, prot, mapping, '*', page_size * page_count); in check_ksm_merge()
363 if (ksm_merge_pages(merge_type, map_ptr, page_size * page_count, start_time, timeout)) in check_ksm_merge()
369 munmap(map_ptr, page_size * page_count); in check_ksm_merge()
377 munmap(map_ptr, page_size * page_count); in check_ksm_merge()
381 static int check_ksm_unmerge(int merge_type, int mapping, int prot, int timeout, size_t page_size) in check_ksm_unmerge() argument
393 map_ptr = allocate_memory(NULL, prot, mapping, '*', page_size * page_count); in check_ksm_unmerge()
397 if (ksm_merge_pages(merge_type, map_ptr, page_size * page_count, start_time, timeout)) in check_ksm_unmerge()
402 memset(map_ptr + page_size, '+', 1); in check_ksm_unmerge()
411 munmap(map_ptr, page_size * page_count); in check_ksm_unmerge()
[all …]
Duffd-unit-tests.c194 page_size = default_huge_page_size(); in uffd_setup_environment()
196 page_size = psize(); in uffd_setup_environment()
198 nr_pages = UFFD_TEST_MEM_SIZE / page_size; in uffd_setup_environment()
327 if (test_pin && pin_pages(&args, area_dst, page_size)) in pagemap_test_fork()
365 if (uffd_register(uffd, area_dst, nr_pages * page_size, in uffd_wp_unpopulated_test()
372 wp_range(uffd, (uint64_t)area_dst, page_size, true); in uffd_wp_unpopulated_test()
377 wp_range(uffd, (uint64_t)area_dst, page_size, false); in uffd_wp_unpopulated_test()
382 wp_range(uffd, (uint64_t)area_dst, page_size, true); in uffd_wp_unpopulated_test()
383 if (madvise(area_dst, page_size, MADV_DONTNEED)) in uffd_wp_unpopulated_test()
393 if (madvise(area_dst, page_size, MADV_DONTNEED)) in uffd_wp_unpopulated_test()
[all …]
Dmemfd_secret.c36 static unsigned long page_size; variable
64 if (len % page_size != 0) in test_mlock_limit()
65 len = (len/page_size) * page_size; in test_mlock_limit()
97 mem = mmap(NULL, page_size, prot, mode, fd, 0); in test_vmsplice()
107 memset(mem, PATTERN, page_size); in test_vmsplice()
110 iov.iov_len = page_size; in test_vmsplice()
118 munmap(mem, page_size); in test_vmsplice()
222 mem = mmap(NULL, page_size, prot, mode, fd, 0); in test_remote_access()
228 memset(mem, PATTERN, page_size); in test_remote_access()
273 page_size = sysconf(_SC_PAGE_SIZE); in prepare()
[all …]
Dpagemap_ioctl.c37 int page_size; variable
41 #define LEN(region) ((region.end - region.start)/page_size)
190 mem_size = num_pages * page_size; in userfaultfd_tests()
210 vec_size = mem_size/page_size; in userfaultfd_tests()
245 mem_size = num_pages * page_size; in sanity_tests_sd()
318 for (i = 0; i < mem_size; i += 2 * page_size) in sanity_tests_sd()
326 ksft_test_result(ret == mem_size/(page_size * 2), in sanity_tests_sd()
352 for (i = 0; i < mem_size; i += 2 * page_size) in sanity_tests_sd()
354 mem[(mem_size/page_size - 1) * page_size]++; in sanity_tests_sd()
373 vec_size = mem_size/page_size; in sanity_tests_sd()
[all …]
Dmremap_test.c60 #define PTE page_size
239 static void mremap_expand_merge(FILE *maps_fp, unsigned long page_size) in mremap_expand_merge() argument
245 start = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE, in mremap_expand_merge()
253 munmap(start + page_size, page_size); in mremap_expand_merge()
254 remap = mremap(start, page_size, 2 * page_size, 0); in mremap_expand_merge()
257 munmap(start, page_size); in mremap_expand_merge()
258 munmap(start + 2 * page_size, page_size); in mremap_expand_merge()
263 (unsigned long)(start + 3 * page_size)); in mremap_expand_merge()
264 munmap(start, 3 * page_size); in mremap_expand_merge()
278 static void mremap_expand_merge_offset(FILE *maps_fp, unsigned long page_size) in mremap_expand_merge_offset() argument
[all …]
Dseal_elf.c46 unsigned long page_size = getpagesize(); in seal_support() local
48 ptr = mmap(NULL, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); in seal_support()
52 ret = sys_mseal(ptr, page_size); in seal_support()
69 unsigned long page_size = getpagesize(); in test_seal_elf() local
76 if (((unsigned long long)ptr % page_size) != 0) in test_seal_elf()
77 ptr = (unsigned long long)ptr & ~(page_size - 1); in test_seal_elf()
81 ret = sys_mprotect((void *)ptr, page_size, PROT_READ|PROT_WRITE); in test_seal_elf()
85 ret = sys_mprotect((void *)ptr, page_size, PROT_READ); in test_seal_elf()
115 ret = sys_mprotect((void *)ptr, page_size, PROT_READ | PROT_WRITE); in test_seal_elf()
/linux-6.12.1/tools/testing/selftests/kvm/
Dguest_memfd_test.c37 static void test_mmap(int fd, size_t page_size) in test_mmap() argument
41 mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); in test_mmap()
45 static void test_file_size(int fd, size_t page_size, size_t total_size) in test_file_size() argument
53 TEST_ASSERT_EQ(sb.st_blksize, page_size); in test_file_size()
56 static void test_fallocate(int fd, size_t page_size, size_t total_size) in test_fallocate() argument
64 page_size - 1, page_size); in test_fallocate()
67 ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, total_size, page_size); in test_fallocate()
70 ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, total_size + page_size, page_size); in test_fallocate()
74 total_size, page_size); in test_fallocate()
78 total_size + page_size, page_size); in test_fallocate()
[all …]
/linux-6.12.1/tools/testing/selftests/mincore/
Dmincore_selftest.c33 int page_size; in TEST() local
37 page_size = sysconf(_SC_PAGESIZE); in TEST()
45 retval = mincore(NULL, page_size, vec); in TEST()
50 addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, in TEST()
58 retval = mincore(addr + 1, page_size, vec); in TEST()
70 retval = mincore(addr, page_size, NULL); in TEST()
73 munmap(addr, page_size); in TEST()
88 int page_size; in TEST() local
90 page_size = sysconf(_SC_PAGESIZE); in TEST()
94 addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, in TEST()
[all …]
/linux-6.12.1/tools/testing/selftests/lsm/
Dlsm_get_self_attr_test.c29 const long page_size = sysconf(_SC_PAGESIZE); in TEST() local
30 struct lsm_ctx *ctx = calloc(page_size, 1); in TEST()
42 const long page_size = sysconf(_SC_PAGESIZE); in TEST() local
43 __u32 size = page_size; in TEST()
58 const long page_size = sysconf(_SC_PAGESIZE); in TEST() local
59 struct lsm_ctx *ctx = calloc(page_size, 1); in TEST()
77 const long page_size = sysconf(_SC_PAGESIZE); in TEST() local
78 struct lsm_ctx *ctx = calloc(page_size, 1); in TEST()
79 __u64 *syscall_lsms = calloc(page_size, 1); in TEST()
86 size = page_size; in TEST()
[all …]
Dlsm_list_modules_test.c20 const long page_size = sysconf(_SC_PAGESIZE); in TEST() local
21 __u64 *syscall_lsms = calloc(page_size, 1); in TEST()
33 const long page_size = sysconf(_SC_PAGESIZE); in TEST() local
34 __u32 size = page_size; in TEST()
44 const long page_size = sysconf(_SC_PAGESIZE); in TEST() local
45 __u64 *syscall_lsms = calloc(page_size, 1); in TEST()
59 const long page_size = sysconf(_SC_PAGESIZE); in TEST() local
60 __u64 *syscall_lsms = calloc(page_size, 1); in TEST()
61 __u32 size = page_size; in TEST()
67 ASSERT_EQ(page_size, size); in TEST()
[all …]
Dlsm_set_self_attr_test.c26 const long page_size = sysconf(_SC_PAGESIZE); in TEST() local
27 struct lsm_ctx *ctx = calloc(page_size, 1); in TEST()
28 __u32 size = page_size; in TEST()
42 const long page_size = sysconf(_SC_PAGESIZE); in TEST() local
43 struct lsm_ctx *ctx = calloc(page_size, 1); in TEST()
44 __u32 size = page_size; in TEST()
58 const long page_size = sysconf(_SC_PAGESIZE); in TEST() local
59 char *ctx = calloc(page_size, 1); in TEST()
60 __u32 size = page_size; in TEST()
/linux-6.12.1/drivers/infiniband/hw/mlx5/
Dmem.c40 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, in mlx5_ib_populate_pas() argument
45 rdma_umem_for_each_dma_block (umem, &biter, page_size) { in mlx5_ib_populate_pas()
63 unsigned long page_size; in __mlx5_umem_find_best_quantized_pgoff() local
66 page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask); in __mlx5_umem_find_best_quantized_pgoff()
67 if (!page_size) in __mlx5_umem_find_best_quantized_pgoff()
77 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff()
78 while (page_offset & ~(u64)(page_offset_mask * (page_size / scale))) { in __mlx5_umem_find_best_quantized_pgoff()
79 page_size /= 2; in __mlx5_umem_find_best_quantized_pgoff()
80 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff()
87 if (!(pgsz_bitmap & page_size)) in __mlx5_umem_find_best_quantized_pgoff()
[all …]
/linux-6.12.1/tools/testing/selftests/bpf/prog_tests/
Dmmap.c12 long page_size = sysconf(_SC_PAGE_SIZE); in roundup_page() local
13 return (sz + page_size - 1) / page_size * page_size; in roundup_page()
21 const long page_size = sysconf(_SC_PAGE_SIZE); in test_mmap() local
36 err = bpf_map__set_max_entries(skel->maps.rdonly_map, page_size); in test_mmap()
42 4 * (page_size / sizeof(u64))); in test_mmap()
55 tmp1 = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0); in test_mmap()
57 munmap(tmp1, page_size); in test_mmap()
61 tmp1 = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rdmap_fd, 0); in test_mmap()
200 tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, in test_mmap()
206 tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, in test_mmap()
[all …]
Dringbuf.c92 int page_size = getpagesize(); in ringbuf_write_subtest() local
108 mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0); in ringbuf_write_subtest()
112 ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw"); in ringbuf_write_subtest()
147 int page_size = getpagesize(); in ringbuf_subtest() local
157 skel->maps.ringbuf.max_entries = page_size; in ringbuf_subtest()
165 mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0); in ringbuf_subtest()
167 tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE); in ringbuf_subtest()
170 ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect"); in ringbuf_subtest()
171 ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw"); in ringbuf_subtest()
174 mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size); in ringbuf_subtest()
[all …]
/linux-6.12.1/drivers/pci/endpoint/
Dpci-epc-mem.c26 unsigned int page_shift = ilog2(mem->window.page_size); in pci_epc_mem_get_order()
54 size_t page_size; in pci_epc_multi_mem_init() local
70 page_size = windows[i].page_size; in pci_epc_multi_mem_init()
71 if (page_size < PAGE_SIZE) in pci_epc_multi_mem_init()
72 page_size = PAGE_SIZE; in pci_epc_multi_mem_init()
73 page_shift = ilog2(page_size); in pci_epc_multi_mem_init()
94 mem->window.page_size = page_size; in pci_epc_multi_mem_init()
129 size_t size, size_t page_size) in pci_epc_mem_init() argument
135 mem_window.page_size = page_size; in pci_epc_mem_init()
192 align_size = ALIGN(size, mem->window.page_size); in pci_epc_mem_alloc_addr()
[all …]
/linux-6.12.1/tools/testing/selftests/powerpc/primitives/
Dload_unaligned_zeropad.c38 static int page_size; variable
43 if (mprotect(mem_region + page_size, page_size, PROT_NONE)) { in protect_region()
53 if (mprotect(mem_region + page_size, page_size, PROT_READ|PROT_WRITE)) { in unprotect_region()
125 page_size = getpagesize(); in test_body()
126 mem_region = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE, in test_body()
131 for (i = 0; i < page_size; i++) in test_body()
134 memset(mem_region+page_size, 0, page_size); in test_body()
138 for (i = 0; i < page_size; i++) in test_body()
/linux-6.12.1/arch/powerpc/mm/
Dinit_64.c188 unsigned long page_size) in altmap_cross_boundary() argument
190 unsigned long nr_pfn = page_size / sizeof(struct page); in altmap_cross_boundary()
206 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; in __vmemmap_populate() local
209 start = ALIGN_DOWN(start, page_size); in __vmemmap_populate()
213 for (; start < end; start += page_size) { in __vmemmap_populate()
223 if (vmemmap_populated(start, page_size)) in __vmemmap_populate()
231 if (altmap && !altmap_cross_boundary(altmap, start, page_size)) { in __vmemmap_populate()
232 p = vmemmap_alloc_block_buf(page_size, node, altmap); in __vmemmap_populate()
239 p = vmemmap_alloc_block_buf(page_size, node, NULL); in __vmemmap_populate()
252 int nr_pfns = page_size >> PAGE_SHIFT; in __vmemmap_populate()
[all …]
/linux-6.12.1/drivers/misc/
Dvmw_balloon.c245 enum vmballoon_page_size_type page_size; member
565 unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size) in vmballoon_page_order() argument
567 return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0; in vmballoon_page_order()
577 vmballoon_page_in_frames(enum vmballoon_page_size_type page_size) in vmballoon_page_in_frames() argument
579 return 1 << vmballoon_page_order(page_size); in vmballoon_page_in_frames()
589 enum vmballoon_page_size_type page_size) in vmballoon_mark_page_offline() argument
593 for (i = 0; i < vmballoon_page_in_frames(page_size); i++) in vmballoon_mark_page_offline()
604 enum vmballoon_page_size_type page_size) in vmballoon_mark_page_online() argument
608 for (i = 0; i < vmballoon_page_in_frames(page_size); i++) in vmballoon_mark_page_online()
668 if (ctl->page_size == VMW_BALLOON_2M_PAGE) in vmballoon_alloc_page_list()
[all …]
/linux-6.12.1/tools/testing/selftests/kvm/lib/s390x/
Dprocessor.c17 TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", in virt_arch_pgd_alloc()
18 vm->page_size); in virt_arch_pgd_alloc()
26 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_arch_pgd_alloc()
43 memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_alloc_region()
55 TEST_ASSERT((gva % vm->page_size) == 0, in virt_arch_pg_map()
58 gva, vm->page_size); in virt_arch_pg_map()
63 TEST_ASSERT((gpa % vm->page_size) == 0, in virt_arch_pg_map()
66 gva, vm->page_size); in virt_arch_pg_map()
70 gva, vm->max_gfn, vm->page_size); in virt_arch_pg_map()
94 TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", in addr_arch_gva2gpa()
[all …]
/linux-6.12.1/drivers/accel/habanalabs/common/mmu/
Dmmu.c33 return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, in hl_is_dram_va()
176 u32 page_size, u32 *real_page_size, bool is_dram_addr) in hl_mmu_get_real_page_size() argument
182 if ((page_size % mmu_prop->page_size) == 0) { in hl_mmu_get_real_page_size()
183 *real_page_size = mmu_prop->page_size; in hl_mmu_get_real_page_size()
188 page_size, mmu_prop->page_size >> 10); in hl_mmu_get_real_page_size()
193 static struct hl_mmu_properties *hl_mmu_get_prop(struct hl_device *hdev, u32 page_size, in hl_mmu_get_prop() argument
200 else if ((page_size % prop->pmmu_huge.page_size) == 0) in hl_mmu_get_prop()
227 int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flush_pte) in hl_mmu_unmap_page() argument
241 mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr); in hl_mmu_unmap_page()
246 rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size, in hl_mmu_unmap_page()
[all …]
/linux-6.12.1/drivers/accel/habanalabs/common/
Dmemory.c29 static int set_alloc_page_size(struct hl_device *hdev, struct hl_mem_in *args, u32 *page_size) in set_alloc_page_size() argument
38 if (prop->supports_user_set_page_size && args->alloc.page_size) { in set_alloc_page_size()
39 psize = args->alloc.page_size; in set_alloc_page_size()
49 *page_size = psize; in set_alloc_page_size()
94 u32 num_curr_pgs, page_size; in alloc_device_memory() local
100 rc = set_alloc_page_size(hdev, args, &page_size); in alloc_device_memory()
104 num_pgs = DIV_ROUND_UP_ULL(args->alloc.mem_size, page_size); in alloc_device_memory()
105 total_size = num_pgs * page_size; in alloc_device_memory()
115 if (is_power_of_2(page_size)) in alloc_device_memory()
117 total_size, NULL, page_size); in alloc_device_memory()
[all …]

12345678910>>...18