Home
last modified time | relevance | path

Searched refs:npages (Results 1 – 25 of 305) sorted by relevance

12345678910>>...13

/linux-6.12.1/tools/testing/selftests/mm/
Dhmm-tests.c181 unsigned long npages) in hmm_dmirror_cmd() argument
189 cmd.npages = npages; in hmm_dmirror_cmd()
270 unsigned long npages) in hmm_migrate_sys_to_dev() argument
272 return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages); in hmm_migrate_sys_to_dev()
277 unsigned long npages) in hmm_migrate_dev_to_sys() argument
279 return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages); in hmm_migrate_dev_to_sys()
295 unsigned long npages; in TEST_F() local
302 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
303 ASSERT_NE(npages, 0); in TEST_F()
304 size = npages << self->page_shift; in TEST_F()
[all …]
/linux-6.12.1/drivers/gpu/drm/i915/selftests/
Dscatterlist.c53 unsigned int npages = npages_fn(n, pt->st.nents, rnd); in expect_pfn_sg() local
61 if (sg->length != npages * PAGE_SIZE) { in expect_pfn_sg()
63 __func__, who, npages * PAGE_SIZE, sg->length); in expect_pfn_sg()
70 pfn += npages; in expect_pfn_sg()
209 unsigned long npages) in page_contiguous() argument
211 return first + npages == last; in page_contiguous()
242 unsigned long npages = npages_fn(n, count, rnd); in alloc_table() local
246 pfn_to_page(pfn + npages), in alloc_table()
247 npages)) { in alloc_table()
254 sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0); in alloc_table()
[all …]
/linux-6.12.1/lib/
Dkunit_iov_iter.c50 size_t npages) in iov_kunit_create_buffer() argument
56 pages = kunit_kcalloc(test, npages, sizeof(struct page *), GFP_KERNEL); in iov_kunit_create_buffer()
60 got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages); in iov_kunit_create_buffer()
61 if (got != npages) { in iov_kunit_create_buffer()
63 KUNIT_ASSERT_EQ(test, got, npages); in iov_kunit_create_buffer()
66 for (int i = 0; i < npages; i++) in iov_kunit_create_buffer()
69 buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL); in iov_kunit_create_buffer()
109 size_t bufsize, npages, size, copied; in iov_kunit_copy_to_kvec() local
113 npages = bufsize / PAGE_SIZE; in iov_kunit_copy_to_kvec()
115 scratch = iov_kunit_create_buffer(test, &spages, npages); in iov_kunit_copy_to_kvec()
[all …]
/linux-6.12.1/io_uring/
Dmemmap.c59 void *io_pages_map(struct page ***out_pages, unsigned short *npages, in io_pages_map() argument
80 *npages = nr_pages; in io_pages_map()
86 *npages = 0; in io_pages_map()
90 void io_pages_unmap(void *ptr, struct page ***pages, unsigned short *npages, in io_pages_unmap() argument
98 if (put_pages && *npages) { in io_pages_unmap()
107 *npages = 1; in io_pages_unmap()
108 else if (*npages > 1) in io_pages_unmap()
110 for (i = 0; i < *npages; i++) in io_pages_unmap()
117 *npages = 0; in io_pages_unmap()
120 void io_pages_free(struct page ***pages, int npages) in io_pages_free() argument
[all …]
Dmemmap.h4 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
5 void io_pages_free(struct page ***pages, int npages);
7 struct page **pages, int npages);
9 void *io_pages_map(struct page ***out_pages, unsigned short *npages,
11 void io_pages_unmap(void *ptr, struct page ***pages, unsigned short *npages,
14 void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/
Dpagealloc.c52 s32 npages; member
199 s32 *npages, int boot) in mlx5_cmd_query_pages() argument
215 *npages = MLX5_GET(query_pages_out, out, num_pages); in mlx5_cmd_query_pages()
353 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in give_pages() argument
366 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); in give_pages()
374 for (i = 0; i < npages; i++) { in give_pages()
381 dev->priv.fw_pages_alloc_failed += (npages - i); in give_pages()
393 MLX5_SET(manage_pages_in, in, input_num_entries, npages); in give_pages()
408 func_id, npages, err); in give_pages()
413 dev->priv.page_counters[func_type] += npages; in give_pages()
[all …]
/linux-6.12.1/drivers/infiniband/hw/hfi1/
Duser_pages.c30 u32 nlocked, u32 npages) in hfi1_can_pin_pages() argument
47 if (atomic64_read(&mm->pinned_vm) + npages > ulimit_pages) in hfi1_can_pin_pages()
66 if (nlocked + npages > (ulimit_pages / usr_ctxts / 4)) in hfi1_can_pin_pages()
74 if (nlocked + npages > cache_limit_pages) in hfi1_can_pin_pages()
80 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, in hfi1_acquire_user_pages() argument
86 ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages); in hfi1_acquire_user_pages()
96 size_t npages, bool dirty) in hfi1_release_user_pages() argument
98 unpin_user_pages_dirty_lock(p, npages, dirty); in hfi1_release_user_pages()
101 atomic64_sub(npages, &mm->pinned_vm); in hfi1_release_user_pages()
Dpin_system.c20 unsigned int npages; member
55 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages) in sdma_cache_evict() argument
60 evict_data.target = npages; in sdma_cache_evict()
66 unsigned int start, unsigned int npages) in unpin_vector_pages() argument
68 hfi1_release_user_pages(mm, pages + start, npages, false); in unpin_vector_pages()
79 if (node->npages) { in free_system_node()
81 node->npages); in free_system_node()
82 atomic_sub(node->npages, &node->pq->n_locked); in free_system_node()
116 struct sdma_mmu_node *node, int npages) in pin_system_pages() argument
122 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); in pin_system_pages()
[all …]
Duser_exp_rcv.c16 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages);
20 u16 pageidx, unsigned int npages);
136 unsigned int npages, in unpin_rcv_pages() argument
145 node->npages * PAGE_SIZE, DMA_FROM_DEVICE); in unpin_rcv_pages()
152 hfi1_release_user_pages(mm, pages, npages, mapped); in unpin_rcv_pages()
153 fd->tid_n_pinned -= npages; in unpin_rcv_pages()
162 unsigned int npages = tidbuf->npages; in pin_rcv_pages() local
167 if (npages > fd->uctxt->expected_count) { in pin_rcv_pages()
173 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); in pin_rcv_pages()
182 if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) { in pin_rcv_pages()
[all …]
/linux-6.12.1/arch/sparc/kernel/
Diommu.c158 unsigned long npages) in alloc_npages() argument
162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages()
204 int npages, nid; in dma_4u_alloc_coherent() local
233 npages = size >> IO_PAGE_SHIFT; in dma_4u_alloc_coherent()
235 while (npages--) { in dma_4u_alloc_coherent()
251 unsigned long order, npages; in dma_4u_free_coherent() local
253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; in dma_4u_free_coherent()
256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4u_free_coherent()
271 unsigned long flags, npages, oaddr; in dma_4u_map_page() local
283 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); in dma_4u_map_page()
[all …]
Dpci_sun4v.c61 unsigned long npages; /* Number of pages in list. */ member
75 p->npages = 0; in iommu_batch_start()
92 unsigned long npages = p->npages; in iommu_batch_flush() local
101 while (npages != 0) { in iommu_batch_flush()
105 npages, in iommu_batch_flush()
113 npages, prot, __pa(pglist), in iommu_batch_flush()
118 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), in iommu_batch_flush()
136 npages -= num; in iommu_batch_flush()
141 p->npages = 0; in iommu_batch_flush()
150 if (p->entry + p->npages == entry) in iommu_batch_new_entry()
[all …]
/linux-6.12.1/drivers/gpu/drm/i915/gem/selftests/
Dmock_dmabuf.c22 err = sg_alloc_table(st, mock->npages, GFP_KERNEL); in mock_map_dma_buf()
27 for (i = 0; i < mock->npages; i++) { in mock_map_dma_buf()
59 for (i = 0; i < mock->npages; i++) in mock_dmabuf_release()
70 vaddr = vm_map_ram(mock->pages, mock->npages, 0); in mock_dmabuf_vmap()
82 vm_unmap_ram(map->vaddr, mock->npages); in mock_dmabuf_vunmap()
99 static struct dma_buf *mock_dmabuf(int npages) in mock_dmabuf() argument
106 mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), in mock_dmabuf()
111 mock->npages = npages; in mock_dmabuf()
112 for (i = 0; i < npages; i++) { in mock_dmabuf()
119 exp_info.size = npages * PAGE_SIZE; in mock_dmabuf()
/linux-6.12.1/drivers/gpu/drm/xe/
Dxe_hmm.c34 u64 i, npages; in xe_mark_range_accessed() local
36 npages = xe_npages_in_range(range->start, range->end); in xe_mark_range_accessed()
37 for (i = 0; i < npages; i++) { in xe_mark_range_accessed()
85 u64 i, npages; in xe_build_sg() local
88 npages = xe_npages_in_range(range->start, range->end); in xe_build_sg()
89 pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL); in xe_build_sg()
93 for (i = 0; i < npages; i++) { in xe_build_sg()
98 ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT, in xe_build_sg()
178 u64 npages; in xe_hmm_userptr_populate_range() local
196 npages = xe_npages_in_range(userptr_start, userptr_end); in xe_hmm_userptr_populate_range()
[all …]
/linux-6.12.1/drivers/iommu/iommufd/
Diova_bitmap.c48 unsigned long npages; member
169 unsigned long npages; in iova_bitmap_get() local
179 npages = DIV_ROUND_UP((bitmap->mapped_total_index - in iova_bitmap_get()
193 npages = min(npages + !!offset_in_page(addr), in iova_bitmap_get()
196 ret = pin_user_pages_fast((unsigned long)addr, npages, in iova_bitmap_get()
201 mapped->npages = (unsigned long)ret; in iova_bitmap_get()
224 if (mapped->npages) { in iova_bitmap_put()
225 unpin_user_pages(mapped->pages, mapped->npages); in iova_bitmap_put()
226 mapped->npages = 0; in iova_bitmap_put()
307 bytes = (bitmap->mapped.npages << PAGE_SHIFT) - bitmap->mapped.pgoff; in iova_bitmap_mapped_remaining()
[all …]
/linux-6.12.1/drivers/gpu/drm/amd/amdkfd/
Dkfd_migrate.c49 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, in svm_migrate_gart_map() argument
65 num_bytes = npages * 8; in svm_migrate_gart_map()
93 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); in svm_migrate_gart_map()
125 uint64_t *vram, uint64_t npages, in svm_migrate_copy_memory_gart() argument
138 while (npages) { in svm_migrate_copy_memory_gart()
139 size = min(GTT_MAX_PAGES, npages); in svm_migrate_copy_memory_gart()
164 npages -= size; in svm_migrate_copy_memory_gart()
165 if (npages) { in svm_migrate_copy_memory_gart()
268 for (i = 0; i < migrate->npages; i++) { in svm_migrate_unsuccessful_pages()
281 uint64_t npages = migrate->cpages; in svm_migrate_copy_to_vram() local
[all …]
/linux-6.12.1/mm/
Dmigrate_device.c28 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_skip()
29 migrate->src[migrate->npages++] = 0; in migrate_vma_collect_skip()
48 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; in migrate_vma_collect_hole()
49 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_hole()
50 migrate->npages++; in migrate_vma_collect_hole()
268 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_pmd()
269 migrate->src[migrate->npages++] = mpfn; in migrate_vma_collect_pmd()
314 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); in migrate_vma_collect()
363 unsigned long npages, in migrate_device_unmap() argument
372 for (i = 0; i < npages; i++) { in migrate_device_unmap()
[all …]
/linux-6.12.1/arch/powerpc/kernel/
Diommu.c216 unsigned long npages, in iommu_range_alloc() argument
223 int largealloc = npages > 15; in iommu_range_alloc()
235 if (unlikely(npages == 0)) { in iommu_range_alloc()
289 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc()
325 end = n + npages; in iommu_range_alloc()
347 void *page, unsigned int npages, in iommu_alloc() argument
356 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc()
365 build_fail = tbl->it_ops->set(tbl, entry, npages, in iommu_alloc()
375 __iommu_free(tbl, ret, npages); in iommu_alloc()
390 unsigned int npages) in iommu_free_check() argument
[all …]
/linux-6.12.1/drivers/fpga/
Ddfl-afu-dma-region.c37 int npages = region->length >> PAGE_SHIFT; in afu_dma_pin_pages() local
41 ret = account_locked_vm(current->mm, npages, true); in afu_dma_pin_pages()
45 region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); in afu_dma_pin_pages()
51 pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE, in afu_dma_pin_pages()
56 } else if (pinned != npages) { in afu_dma_pin_pages()
70 account_locked_vm(current->mm, npages, false); in afu_dma_pin_pages()
85 long npages = region->length >> PAGE_SHIFT; in afu_dma_unpin_pages() local
88 unpin_user_pages(region->pages, npages); in afu_dma_unpin_pages()
90 account_locked_vm(current->mm, npages, false); in afu_dma_unpin_pages()
92 dev_dbg(dev, "%ld pages unpinned\n", npages); in afu_dma_unpin_pages()
[all …]
/linux-6.12.1/drivers/infiniband/hw/mthca/
Dmthca_memfree.c69 dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages, in mthca_free_icm_pages()
72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages()
81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent()
137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, in mthca_alloc_icm() argument
157 while (npages > 0) { in mthca_alloc_icm()
165 chunk->npages = 0; in mthca_alloc_icm()
170 while (1 << cur_order > npages) in mthca_alloc_icm()
175 &chunk->mem[chunk->npages], in mthca_alloc_icm()
178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], in mthca_alloc_icm()
182 ++chunk->npages; in mthca_alloc_icm()
[all …]
Dmthca_allocator.c195 int npages, shift; in mthca_buf_alloc() local
202 npages = 1; in mthca_buf_alloc()
214 npages *= 2; in mthca_buf_alloc()
217 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc()
222 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
226 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_buf_alloc()
229 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc()
234 buf->page_list = kmalloc_array(npages, in mthca_buf_alloc()
240 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
243 for (i = 0; i < npages; ++i) { in mthca_buf_alloc()
[all …]
/linux-6.12.1/arch/x86/mm/
Dcpu_entry_area.c108 unsigned int npages; in percpu_setup_debug_store() local
115 npages = sizeof(struct debug_store) / PAGE_SIZE; in percpu_setup_debug_store()
117 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, in percpu_setup_debug_store()
125 npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; in percpu_setup_debug_store()
126 for (; npages; npages--, cea += PAGE_SIZE) in percpu_setup_debug_store()
134 npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \
136 estacks->name## _stack, npages, PAGE_KERNEL); \
143 unsigned int npages; in percpu_setup_exception_stacks() local
/linux-6.12.1/drivers/infiniband/core/
Dumem.c152 unsigned long npages; in ib_umem_get() local
191 npages = ib_umem_num_pages(umem); in ib_umem_get()
192 if (npages == 0 || npages > UINT_MAX) { in ib_umem_get()
199 new_pinned = atomic64_add_return(npages, &mm->pinned_vm); in ib_umem_get()
201 atomic64_sub(npages, &mm->pinned_vm); in ib_umem_get()
211 while (npages) { in ib_umem_get()
214 min_t(unsigned long, npages, in ib_umem_get()
224 npages -= pinned; in ib_umem_get()
228 npages, GFP_KERNEL); in ib_umem_get()
Dib_core_uverbs.c141 pgoff, entry->npages); in rdma_user_mmap_entry_get_pgoff()
171 if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) { in rdma_user_mmap_entry_get()
191 for (i = 0; i < entry->npages; i++) in rdma_user_mmap_entry_free()
196 entry->start_pgoff, entry->npages); in rdma_user_mmap_entry_free()
269 u32 xa_first, xa_last, npages; in rdma_user_mmap_entry_insert_range() local
290 npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE); in rdma_user_mmap_entry_insert_range()
291 entry->npages = npages; in rdma_user_mmap_entry_insert_range()
301 if (check_add_overflow(xa_first, npages, &xa_last)) in rdma_user_mmap_entry_insert_range()
328 entry->start_pgoff, npages); in rdma_user_mmap_entry_insert_range()
/linux-6.12.1/tools/testing/selftests/kvm/
Dmemslot_perf_test.c88 uint64_t npages; member
196 TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size, in vm_gpa2hva()
209 slotpages = data->npages - slot * data->pages_per_slot; in vm_gpa2hva()
267 mempages = data->npages; in get_max_slots()
300 data->npages = mempages; in prepare_vm()
301 TEST_ASSERT(data->npages > 1, "Can't test without any memory"); in prepare_vm()
303 data->pages_per_slot = data->npages / data->nslots; in prepare_vm()
304 rempages = data->npages % data->nslots; in prepare_vm()
319 uint64_t npages; in prepare_vm() local
321 npages = data->pages_per_slot; in prepare_vm()
[all …]
/linux-6.12.1/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_misc.c53 u64 npages, bool alloc_pages) in pvrdma_page_dir_init() argument
57 if (npages > PVRDMA_PAGE_DIR_MAX_PAGES) in pvrdma_page_dir_init()
67 pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1; in pvrdma_page_dir_init()
81 pdir->npages = npages; in pvrdma_page_dir_init()
84 pdir->pages = kcalloc(npages, sizeof(*pdir->pages), in pvrdma_page_dir_init()
89 for (i = 0; i < pdir->npages; i++) { in pvrdma_page_dir_init()
127 for (i = 0; i < pdir->npages && pdir->pages[i]; i++) { in pvrdma_page_dir_cleanup_pages()
173 if (idx >= pdir->npages) in pvrdma_page_dir_insert_dma()
189 if (offset >= pdir->npages) in pvrdma_page_dir_insert_umem()
212 if (num_pages > pdir->npages) in pvrdma_page_dir_insert_page_list()

12345678910>>...13