/linux-6.12.1/tools/testing/selftests/mm/ |
D | mremap_dontunmap.c | 45 unsigned long num_pages = 1; in kernel_support_for_mremap_dontunmap() local 46 void *source_mapping = mmap(NULL, num_pages * page_size, PROT_NONE, in kernel_support_for_mremap_dontunmap() 53 mremap(source_mapping, num_pages * page_size, num_pages * page_size, in kernel_support_for_mremap_dontunmap() 58 BUG_ON(munmap(dest_mapping, num_pages * page_size) == -1, in kernel_support_for_mremap_dontunmap() 62 BUG_ON(munmap(source_mapping, num_pages * page_size) == -1, in kernel_support_for_mremap_dontunmap() 78 unsigned long num_pages = size / page_size; in check_region_contains_byte() local 82 for (i = 0; i < num_pages; ++i) { in check_region_contains_byte() 97 unsigned long num_pages = 5; in mremap_dontunmap_simple() local 100 mmap(NULL, num_pages * page_size, PROT_READ | PROT_WRITE, in mremap_dontunmap_simple() 104 memset(source_mapping, 'a', num_pages * page_size); in mremap_dontunmap_simple() [all …]
|
/linux-6.12.1/arch/s390/include/asm/ |
D | page-states.h | 38 static __always_inline void __set_page_state(void *addr, unsigned long num_pages, unsigned char cmd) in __set_page_state() argument 42 while (num_pages--) { in __set_page_state() 48 static inline void __set_page_unused(void *addr, unsigned long num_pages) in __set_page_unused() argument 50 __set_page_state(addr, num_pages, ESSA_SET_UNUSED); in __set_page_unused() 53 static inline void __set_page_stable_dat(void *addr, unsigned long num_pages) in __set_page_stable_dat() argument 55 __set_page_state(addr, num_pages, ESSA_SET_STABLE); in __set_page_stable_dat() 58 static inline void __set_page_stable_nodat(void *addr, unsigned long num_pages) in __set_page_stable_nodat() argument 60 __set_page_state(addr, num_pages, ESSA_SET_STABLE_NODAT); in __set_page_stable_nodat() 63 static inline void __arch_set_page_nodat(void *addr, unsigned long num_pages) in __arch_set_page_nodat() argument 68 __set_page_stable_dat(addr, num_pages); in __arch_set_page_nodat() [all …]
|
/linux-6.12.1/drivers/infiniband/hw/qib/ |
D | qib_user_pages.c | 40 static void __qib_release_user_pages(struct page **p, size_t num_pages, in __qib_release_user_pages() argument 43 unpin_user_pages_dirty_lock(p, num_pages, dirty); in __qib_release_user_pages() 94 int qib_get_user_pages(unsigned long start_page, size_t num_pages, in qib_get_user_pages() argument 102 locked = atomic64_add_return(num_pages, ¤t->mm->pinned_vm); in qib_get_user_pages() 110 for (got = 0; got < num_pages; got += ret) { in qib_get_user_pages() 112 num_pages - got, in qib_get_user_pages() 126 atomic64_sub(num_pages, ¤t->mm->pinned_vm); in qib_get_user_pages() 130 void qib_release_user_pages(struct page **p, size_t num_pages) in qib_release_user_pages() argument 132 __qib_release_user_pages(p, num_pages, 1); in qib_release_user_pages() 136 atomic64_sub(num_pages, ¤t->mm->pinned_vm); in qib_release_user_pages()
|
/linux-6.12.1/drivers/gpu/drm/ttm/ |
D | ttm_pool.c | 168 unsigned int num_pages = last - first; in ttm_pool_apply_caching() local 170 if (!num_pages) in ttm_pool_apply_caching() 177 return set_pages_array_wc(first, num_pages); in ttm_pool_apply_caching() 179 return set_pages_array_uc(first, num_pages); in ttm_pool_apply_caching() 214 unsigned int num_pages) in ttm_pool_unmap() argument 220 dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT, in ttm_pool_unmap() 227 unsigned int i, num_pages = 1 << pt->order; in ttm_pool_type_give() local 229 for (i = 0; i < num_pages; ++i) { in ttm_pool_type_give() 324 unsigned int num_pages; in ttm_pool_shrink() local 336 num_pages = 1 << pt->order; in ttm_pool_shrink() [all …]
|
D | ttm_tt.c | 115 ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL); in ttm_tt_alloc_page_directory() 124 ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) + in ttm_dma_tt_alloc_page_directory() 129 ttm->dma_address = (void *)(ttm->pages + ttm->num_pages); in ttm_dma_tt_alloc_page_directory() 135 ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address), in ttm_sg_tt_alloc_page_directory() 155 ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages; in ttm_tt_init_fields() 228 for (i = 0; i < ttm->num_pages; ++i) { in ttm_tt_swapin() 269 loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT; in ttm_tt_swapout() 285 for (i = 0; i < ttm->num_pages; ++i) { in ttm_tt_swapout() 305 return ttm->num_pages; in ttm_tt_swapout() 326 atomic_long_add(ttm->num_pages, &ttm_pages_allocated); in ttm_tt_populate() [all …]
|
/linux-6.12.1/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_gmr.c | 39 unsigned long num_pages, in vmw_gmr2_bind() argument 47 uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0); in vmw_gmr2_bind() 48 uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num; in vmw_gmr2_bind() 58 define_cmd.numPages = num_pages; in vmw_gmr2_bind() 73 while (num_pages > 0) { in vmw_gmr2_bind() 74 unsigned long nr = min_t(unsigned long, num_pages, VMW_PPN_PER_REMAP); in vmw_gmr2_bind() 94 num_pages -= nr; in vmw_gmr2_bind() 128 unsigned long num_pages, in vmw_gmr_bind() argument 141 return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); in vmw_gmr_bind()
|
D | vmwgfx_ttm_buffer.c | 70 return ++(viter->i) < viter->num_pages; in __vmw_piter_non_sg_next() 107 viter->num_pages = vsgt->num_pages; in vmw_piter_start() 184 vsgt->num_pages = vmw_tt->dma_ttm.num_pages; in vmw_ttm_map_dma() 196 vsgt->pages, vsgt->num_pages, 0, in vmw_ttm_map_dma() 197 (unsigned long)vsgt->num_pages << PAGE_SHIFT, in vmw_ttm_map_dma() 296 ttm->num_pages, vmw_be->gmr_id); in vmw_ttm_bind() 301 vmw_mob_create(ttm->num_pages); in vmw_ttm_bind() 307 &vmw_be->vsgt, ttm->num_pages, in vmw_ttm_bind() 373 ttm->num_pages); in vmw_ttm_populate()
|
/linux-6.12.1/drivers/gpu/drm/xen/ |
D | xen_drm_front_gem.c | 29 size_t num_pages; member 48 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE); in gem_alloc_pages_array() 49 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, in gem_alloc_pages_array() 93 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); in xen_drm_front_gem_object_mmap() 159 ret = xen_alloc_unpopulated_pages(xen_obj->num_pages, in gem_create() 163 xen_obj->num_pages, ret); in gem_create() 175 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); in gem_create() 212 xen_free_unpopulated_pages(xen_obj->num_pages, in xen_drm_front_gem_free_object_unlocked() 240 xen_obj->pages, xen_obj->num_pages); in xen_drm_front_gem_get_sg_table() 265 xen_obj->num_pages); in xen_drm_front_gem_import_sg_table() [all …]
|
/linux-6.12.1/drivers/xen/ |
D | xen-front-pgdir-shbuf.c | 160 return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE); in get_num_pages_dir() 187 buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages; in guest_calc_num_grefs() 208 unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops), in backend_unmap() 213 for (i = 0; i < buf->num_pages; i++) { in backend_unmap() 222 buf->num_pages); in backend_unmap() 224 for (i = 0; i < buf->num_pages; i++) { in backend_unmap() 253 map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL); in backend_map() 257 buf->backend_map_handles = kcalloc(buf->num_pages, in backend_map() 271 grefs_left = buf->num_pages; in backend_map() 295 ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages); in backend_map() [all …]
|
/linux-6.12.1/tools/testing/scatterlist/ |
D | main.c | 10 unsigned num_pages; member 40 printf("%u input PFNs:", test->num_pages); in fail() 41 for (i = 0; i < test->num_pages; i++) in fail() 87 int left_pages = test->pfn_app ? test->num_pages : 0; in main() 92 set_pages(pages, test->pfn, test->num_pages); in main() 96 &append, pages, test->num_pages, 0, test->size, in main() 100 &append.sgt, pages, test->num_pages, 0, in main() 109 set_pages(pages, test->pfn_app, test->num_pages); in main() 111 &append, pages, test->num_pages, 0, test->size, in main()
|
/linux-6.12.1/drivers/media/common/videobuf2/ |
D | videobuf2-dma-sg.c | 49 unsigned int num_pages; member 107 int num_pages; in vb2_dma_sg_alloc() local 121 buf->num_pages = size >> PAGE_SHIFT; in vb2_dma_sg_alloc() 129 buf->pages = kvcalloc(buf->num_pages, sizeof(struct page *), GFP_KERNEL); in vb2_dma_sg_alloc() 138 buf->num_pages, 0, size, GFP_KERNEL); in vb2_dma_sg_alloc() 162 __func__, buf->num_pages); in vb2_dma_sg_alloc() 169 num_pages = buf->num_pages; in vb2_dma_sg_alloc() 170 while (num_pages--) in vb2_dma_sg_alloc() 171 __free_page(buf->pages[num_pages]); in vb2_dma_sg_alloc() 183 int i = buf->num_pages; in vb2_dma_sg_put() [all …]
|
/linux-6.12.1/arch/x86/platform/efi/ |
D | memmap.c | 129 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1; in efi_memmap_split_count() 194 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1; in efi_memmap_insert() 203 md->num_pages = (m_end - md->phys_addr + 1) >> in efi_memmap_insert() 210 md->num_pages = (end - md->phys_addr + 1) >> in efi_memmap_insert() 216 md->num_pages = (m_start - md->phys_addr) >> in efi_memmap_insert() 224 md->num_pages = (m_end - m_start + 1) >> in efi_memmap_insert() 231 md->num_pages = (end - m_end) >> in efi_memmap_insert() 238 md->num_pages = (m_start - md->phys_addr) >> in efi_memmap_insert() 245 md->num_pages = (end - md->phys_addr + 1) >> in efi_memmap_insert()
|
D | efi.c | 130 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; in do_add_efi_memmap() 245 u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1; in efi_memmap_entry_valid() 249 if (md->num_pages == 0) { in efi_memmap_entry_valid() 251 } else if (md->num_pages > EFI_PAGES_MAX || in efi_memmap_entry_valid() 252 EFI_PAGES_MAX - md->num_pages < in efi_memmap_entry_valid() 254 end_hi = (md->num_pages & OVERFLOW_ADDR_MASK) in efi_memmap_entry_valid() 336 size = md->num_pages << EFI_PAGE_SHIFT; in efi_remove_e820_mmio() 364 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1, in efi_print_memmap() 365 (md->num_pages >> (20 - EFI_PAGE_SHIFT))); in efi_print_memmap() 549 prev_size = prev_md->num_pages << EFI_PAGE_SHIFT; in efi_merge_regions() [all …]
|
D | efi_32.c | 43 size = md->num_pages << PAGE_SHIFT; in efi_map_region() 51 set_memory_uc((unsigned long)va, md->num_pages); in efi_map_region() 82 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) in efi_setup_page_tables() argument 139 set_memory_x(md->virt_addr, md->num_pages); in efi_runtime_update_mappings()
|
/linux-6.12.1/net/ceph/ |
D | pagevec.c | 13 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument 17 for (i = 0; i < num_pages; i++) { in ceph_put_page_vector() 26 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector() argument 30 for (i = 0; i < num_pages; i++) in ceph_release_page_vector() 39 struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags) in ceph_alloc_page_vector() argument 44 pages = kmalloc_array(num_pages, sizeof(*pages), flags); in ceph_alloc_page_vector() 47 for (i = 0; i < num_pages; i++) { in ceph_alloc_page_vector()
|
/linux-6.12.1/drivers/tee/ |
D | tee_shm.c | 38 unpin_user_pages(shm->pages, shm->num_pages); in release_registered_pages() 40 shm_put_kernel_pages(shm->pages, shm->num_pages); in release_registered_pages() 210 size_t num_pages, in tee_dyn_shm_alloc_helper() argument 240 shm->num_pages = nr_pages; in tee_dyn_shm_alloc_helper() 277 size_t num_pages, off; in register_shm_helper() local 305 num_pages = iov_iter_npages(iter, INT_MAX); in register_shm_helper() 306 if (!num_pages) { in register_shm_helper() 311 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); in register_shm_helper() 317 len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0, in register_shm_helper() 329 shm_get_kernel_pages(shm->pages, num_pages); in register_shm_helper() [all …]
|
/linux-6.12.1/drivers/infiniband/sw/siw/ |
D | siw_mem.c | 68 int i, num_pages = umem->num_pages; in siw_umem_release() local 73 for (i = 0; num_pages > 0; i++) { in siw_umem_release() 75 num_pages -= PAGES_PER_CHUNK; in siw_umem_release() 365 int num_pages, num_chunks, i, rv = 0; in siw_umem_get() local 371 num_pages = PAGE_ALIGN(start + len - first_page_va) >> PAGE_SHIFT; in siw_umem_get() 372 num_chunks = (num_pages >> CHUNK_SHIFT) + 1; in siw_umem_get() 400 for (i = 0; num_pages > 0; i++) { in siw_umem_get() 401 int nents = min_t(int, num_pages, PAGES_PER_CHUNK); in siw_umem_get() 412 umem->num_pages++; in siw_umem_get() 413 num_pages--; in siw_umem_get()
|
/linux-6.12.1/drivers/gpu/drm/i915/gem/ |
D | i915_gem_userptr.c | 92 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; in i915_gem_object_userptr_drop_ref() local 94 unpin_user_pages(pvec, num_pages); in i915_gem_object_userptr_drop_ref() 104 unsigned int num_pages; /* limited by sg_alloc_table_from_pages_segment */ in i915_gem_userptr_get_pages() local 107 if (overflows_type(obj->base.size >> PAGE_SHIFT, num_pages)) in i915_gem_userptr_get_pages() 110 num_pages = obj->base.size >> PAGE_SHIFT; in i915_gem_userptr_get_pages() 124 ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0, in i915_gem_userptr_get_pages() 125 num_pages << PAGE_SHIFT, in i915_gem_userptr_get_pages() 235 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; in i915_gem_object_userptr_submit_init() local 260 pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL); in i915_gem_object_userptr_submit_init() 268 while (pinned < num_pages) { in i915_gem_object_userptr_submit_init() [all …]
|
/linux-6.12.1/arch/riscv/include/asm/ |
D | set_memory.h | 21 int num_pages)) in set_kernel_memory() argument 25 int num_pages = PAGE_ALIGN(end - start) >> PAGE_SHIFT; in set_kernel_memory() local 27 return set_memory(start, num_pages); in set_kernel_memory() 37 int num_pages)) in set_kernel_memory() argument
|
/linux-6.12.1/arch/x86/hyperv/ |
D | hv_proc.c | 23 int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages) in hv_call_deposit_pages() argument 36 if (num_pages > HV_DEPOSIT_MAX) in hv_call_deposit_pages() 38 if (!num_pages) in hv_call_deposit_pages() 56 while (num_pages) { in hv_call_deposit_pages() 58 order = 31 - __builtin_clz(num_pages); in hv_call_deposit_pages() 74 num_pages -= counts[i]; in hv_call_deposit_pages()
|
/linux-6.12.1/drivers/gpu/drm/gma500/ |
D | mmu.c | 479 uint32_t num_pages, uint32_t desired_tile_stride, in psb_mmu_flush_ptes() argument 497 rows = num_pages / desired_tile_stride; in psb_mmu_flush_ptes() 499 desired_tile_stride = num_pages; in psb_mmu_flush_ptes() 527 unsigned long address, uint32_t num_pages) in psb_mmu_remove_pfn_sequence() argument 538 end = addr + (num_pages << PAGE_SHIFT); in psb_mmu_remove_pfn_sequence() 555 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); in psb_mmu_remove_pfn_sequence() 566 uint32_t num_pages, uint32_t desired_tile_stride, in psb_mmu_remove_pages() argument 580 rows = num_pages / desired_tile_stride; in psb_mmu_remove_pages() 582 desired_tile_stride = num_pages; in psb_mmu_remove_pages() 612 psb_mmu_flush_ptes(pd, f_address, num_pages, in psb_mmu_remove_pages() [all …]
|
D | mmu.h | 69 uint32_t num_pages); 73 uint32_t num_pages, int type); 78 unsigned long address, uint32_t num_pages, 82 unsigned long address, uint32_t num_pages,
|
/linux-6.12.1/drivers/gpu/drm/imagination/ |
D | pvr_free_list.c | 223 struct sg_table *sgt, u32 offset, u32 num_pages) in pvr_free_list_insert_pages_locked() argument 251 num_pages--; in pvr_free_list_insert_pages_locked() 252 if (!num_pages) in pvr_free_list_insert_pages_locked() 256 if (!num_pages) in pvr_free_list_insert_pages_locked() 281 free_list_node->num_pages; in pvr_free_list_insert_node_locked() 290 offset, free_list_node->num_pages); in pvr_free_list_insert_node_locked() 292 free_list->current_pages += free_list_node->num_pages; in pvr_free_list_insert_node_locked() 298 pvr_free_list_grow(struct pvr_free_list *free_list, u32 num_pages) in pvr_free_list_grow() argument 306 if (num_pages & FREE_LIST_ALIGNMENT) { in pvr_free_list_grow() 317 free_list_node->num_pages = num_pages; in pvr_free_list_grow() [all …]
|
/linux-6.12.1/drivers/gpu/drm/radeon/ |
D | radeon_ttm.c | 141 unsigned num_pages; in radeon_move_blit() local 178 num_pages = PFN_UP(new_mem->size) * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); in radeon_move_blit() 179 fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv); in radeon_move_blit() 341 unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE; in radeon_ttm_tt_pin_userptr() 349 unsigned num_pages = ttm->num_pages - pinned; in radeon_ttm_tt_pin_userptr() local 353 r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0, in radeon_ttm_tt_pin_userptr() 360 } while (pinned < ttm->num_pages); in radeon_ttm_tt_pin_userptr() 362 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, in radeon_ttm_tt_pin_userptr() 363 (u64)ttm->num_pages << PAGE_SHIFT, in radeon_ttm_tt_pin_userptr() 373 ttm->num_pages); in radeon_ttm_tt_pin_userptr() [all …]
|
/linux-6.12.1/fs/crypto/ |
D | bio.c | 56 int num_pages = 0; in fscrypt_zeroout_range_inline_crypt() local 66 if (num_pages == 0) { in fscrypt_zeroout_range_inline_crypt() 76 num_pages++; in fscrypt_zeroout_range_inline_crypt() 80 if (num_pages == BIO_MAX_VECS || !len || in fscrypt_zeroout_range_inline_crypt() 86 num_pages = 0; in fscrypt_zeroout_range_inline_crypt()
|