Lines Matching full:pages

35 static inline void sanity_check_pinned_pages(struct page **pages,  in sanity_check_pinned_pages()  argument
42 * We only pin anonymous pages if they are exclusive. Once pinned, we in sanity_check_pinned_pages()
46 * We'd like to verify that our pinned anonymous pages are still mapped in sanity_check_pinned_pages()
53 for (; npages; npages--, pages++) { in sanity_check_pinned_pages()
54 struct page *page = *pages; in sanity_check_pinned_pages()
181 * Pages that were pinned via pin_user_pages*() must be released via either
183 * that such pages can be separately tracked and uniquely handled. In
264 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
265 * @pages: array of pages to be maybe marked dirty, and definitely released.
266 * @npages: number of pages in the @pages array.
267 * @make_dirty: whether to mark the pages dirty
272 * For each page in the @pages array, make that page (or its head page, if a
274 * listed as clean. In any case, releases all pages using unpin_user_page(),
285 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument
293 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock()
297 sanity_check_pinned_pages(pages, npages); in unpin_user_pages_dirty_lock()
299 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages_dirty_lock()
335 * @npages: number of consecutive pages to release.
336 * @make_dirty: whether to mark the pages dirty
338 * "gup-pinned page range" refers to a range of pages that has had one of the
342 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
370 static void gup_fast_unpin_user_pages(struct page **pages, unsigned long npages) in gup_fast_unpin_user_pages() argument
378 * fork() and some anonymous pages might now actually be shared -- in gup_fast_unpin_user_pages()
382 folio = gup_folio_next(pages, npages, i, &nr); in gup_fast_unpin_user_pages()
388 * unpin_user_pages() - release an array of gup-pinned pages.
389 * @pages: array of pages to be marked dirty and released.
390 * @npages: number of pages in the @pages array.
392 * For each page in the @pages array, release the page using unpin_user_page().
396 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() argument
403 * If this WARN_ON() fires, then the system *might* be leaking pages (by in unpin_user_pages()
410 sanity_check_pinned_pages(pages, npages); in unpin_user_pages()
412 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages()
419 * unpin_user_folio() - release pages of a folio
421 * @npages: number of pages of same folio
481 struct page **pages) in record_subpages() argument
488 pages[nr] = nth_page(start_page, nr); in record_subpages()
597 * When core dumping, we don't want to allocate unnecessary pages or in no_page_table()
639 * device mapped pages can only be returned if the caller in follow_huge_pud()
858 * We only care about anon pages in can_follow_write_pte() and don't in follow_page_pte()
869 * Only return device mapping pages in the FOLL_GET or FOLL_PIN in follow_page_pte()
880 /* Avoid special (like zero) pages in core dumps */ in follow_page_pte()
1051 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
1100 /* user gate pages are read-only */ in get_gate_page()
1297 * Anon pages in shared mappings are surprising: now in check_vma_flags()
1362 * __get_user_pages() - pin user pages in memory
1365 * @nr_pages: number of pages from start to pin
1367 * @pages: array that receives pointers to the pages pinned.
1369 * only intends to ensure the pages are faulted in.
1372 * Returns either number of pages pinned (which may be less than the
1376 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1377 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1378 * pages pinned. Again, this may be less than nr_pages.
1381 * The caller is responsible for releasing returned @pages, via put_page().
1417 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
1429 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); in __get_user_pages()
1457 pages ? &page : NULL); in __get_user_pages()
1474 * If we have a pending SIGKILL, don't keep faulting pages and in __get_user_pages()
1503 * struct page. If the caller expects **pages to be in __get_user_pages()
1507 if (pages) { in __get_user_pages()
1520 if (pages) { in __get_user_pages()
1532 * pages. in __get_user_pages()
1555 pages[i + j] = subpage; in __get_user_pages()
1711 struct page **pages, in __get_user_pages_locked() argument
1739 * is to set FOLL_GET if the caller wants pages[] filled in (but has in __get_user_pages_locked()
1743 * FOLL_PIN always expects pages to be non-null, but no need to assert in __get_user_pages_locked()
1746 if (pages && !(flags & FOLL_PIN)) in __get_user_pages_locked()
1751 ret = __get_user_pages(mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
1782 * For the prefault case (!pages) we only update counts. in __get_user_pages_locked()
1784 if (likely(pages)) in __get_user_pages_locked()
1785 pages += ret; in __get_user_pages_locked()
1816 pages, locked); in __get_user_pages_locked()
1832 if (likely(pages)) in __get_user_pages_locked()
1833 pages++; in __get_user_pages_locked()
1857 * populate_vma_page_range() - populate a range of pages in the vma.
1863 * This takes care of mlocking the pages too if VM_LOCKED is set.
1865 * Return either number of pages pinned in the vma, or a negative error
1898 /* ... similarly, we've never faulted in PROT_NONE pages */ in populate_vma_page_range()
1933 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1941 * Returns either number of processed pages in the MM, or a negative error
1982 * __mm_populate - populate and/or mlock pages within a range of address space.
2000 * We want to fault in pages for [nstart; end) address range. in __mm_populate()
2022 * Now fault in a range of pages. populate_vma_page_range() in __mm_populate()
2023 * double checks the vma flags, so that it won't mlock pages in __mm_populate()
2043 unsigned long nr_pages, struct page **pages, in __get_user_pages_locked() argument
2083 if (pages) { in __get_user_pages_locked()
2084 pages[i] = virt_to_page((void *)start); in __get_user_pages_locked()
2085 if (pages[i]) in __get_user_pages_locked()
2086 get_page(pages[i]); in __get_user_pages_locked()
2172 * already know that some or all of the pages in the address range aren't in
2177 * Note that we don't pin or otherwise hold the pages referenced that we fault
2278 * An array of either pages or folios ("pofs"). Although it may seem tempting to
2280 * pages, that approach won't work in the longer term, because eventually the
2286 struct page **pages; member
2298 return page_folio(pofs->pages[i]); in pofs_get_folio()
2311 unpin_user_pages(pofs->pages, pofs->nr_entries); in pofs_unpin()
2484 struct page **pages) in check_and_migrate_movable_pages() argument
2487 .pages = pages, in check_and_migrate_movable_pages()
2496 struct page **pages) in check_and_migrate_movable_pages() argument
2515 struct page **pages, in __gup_longterm_locked() argument
2523 return __get_user_pages_locked(mm, start, nr_pages, pages, in __gup_longterm_locked()
2529 pages, locked, in __gup_longterm_locked()
2537 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages); in __gup_longterm_locked()
2547 static bool is_valid_gup_args(struct page **pages, int *locked, in is_valid_gup_args() argument
2580 /* Pages input must be given if using GET/PIN */ in is_valid_gup_args()
2581 if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages)) in is_valid_gup_args()
2595 * get_user_pages_remote() - pin user pages in memory
2598 * @nr_pages: number of pages from start to pin
2600 * @pages: array that receives pointers to the pages pinned.
2602 * only intends to ensure the pages are faulted in.
2607 * Returns either number of pages pinned (which may be less than the
2611 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2612 * -- If nr_pages is >0, and some pages were pinned, returns the number of
2613 * pages pinned. Again, this may be less than nr_pages.
2615 * The caller is responsible for releasing returned @pages, via put_page().
2639 * via the user virtual addresses. The pages may be submitted for
2652 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
2657 if (!is_valid_gup_args(pages, locked, &gup_flags, in get_user_pages_remote()
2661 return __get_user_pages_locked(mm, start, nr_pages, pages, in get_user_pages_remote()
2670 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
2678 * get_user_pages() - pin user pages in memory
2680 * @nr_pages: number of pages from start to pin
2682 * @pages: array that receives pointers to the pages pinned.
2684 * only intends to ensure the pages are faulted in.
2692 unsigned int gup_flags, struct page **pages) in get_user_pages() argument
2696 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH)) in get_user_pages()
2699 return __get_user_pages_locked(current->mm, start, nr_pages, pages, in get_user_pages()
2708 * get_user_pages(mm, ..., pages, NULL);
2713 * get_user_pages_unlocked(mm, ..., pages);
2720 struct page **pages, unsigned int gup_flags) in get_user_pages_unlocked() argument
2724 if (!is_valid_gup_args(pages, NULL, &gup_flags, in get_user_pages_unlocked()
2728 return __get_user_pages_locked(current->mm, start, nr_pages, pages, in get_user_pages_unlocked()
2736 * get_user_pages_fast attempts to pin user pages by walking the page
2738 * protected from page table pages being freed from under it, and should
2743 * pages are freed. This is unsuitable for architectures that do not need
2746 * Another way to achieve this is to batch up page table containing pages
2748 * pages. Disabling interrupts will allow the gup_fast() walker to both block
2756 * free pages containing page tables or TLB flushing requires IPI broadcast.
2857 unsigned int flags, struct page **pages) in gup_fast_undo_dev_pagemap() argument
2860 struct folio *folio = page_folio(pages[--(*nr)]); in gup_fast_undo_dev_pagemap()
2888 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pte_range() argument
2904 * Always fallback to ordinary GUP on PROT_NONE-mapped pages: in gup_fast_pte_range()
2905 * pte_access_permitted() better should reject these pages in gup_fast_pte_range()
2922 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_pte_range()
2965 pages[*nr] = page; in gup_fast_pte_range()
2985 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2989 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pte_range() argument
2998 unsigned long end, unsigned int flags, struct page **pages, int *nr) in gup_fast_devmap_leaf() argument
3009 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_devmap_leaf()
3014 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_devmap_leaf()
3020 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_devmap_leaf()
3024 pages[*nr] = page; in gup_fast_devmap_leaf()
3034 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_devmap_pmd_leaf() argument
3041 if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr)) in gup_fast_devmap_pmd_leaf()
3045 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_devmap_pmd_leaf()
3052 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_devmap_pud_leaf() argument
3059 if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr)) in gup_fast_devmap_pud_leaf()
3063 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_devmap_pud_leaf()
3070 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_devmap_pmd_leaf() argument
3078 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_devmap_pud_leaf() argument
3087 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pmd_leaf() argument
3104 pages, nr); in gup_fast_pmd_leaf()
3108 refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr); in gup_fast_pmd_leaf()
3134 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pud_leaf() argument
3151 pages, nr); in gup_fast_pud_leaf()
3155 refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr); in gup_fast_pud_leaf()
3182 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pgd_leaf() argument
3195 refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr); in gup_fast_pgd_leaf()
3222 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pmd_range() argument
3242 pages, nr)) in gup_fast_pmd_range()
3246 pages, nr)) in gup_fast_pmd_range()
3254 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pud_range() argument
3269 pages, nr)) in gup_fast_pud_range()
3272 pages, nr)) in gup_fast_pud_range()
3280 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_p4d_range() argument
3295 pages, nr)) in gup_fast_p4d_range()
3303 unsigned int flags, struct page **pages, int *nr) in gup_fast_pgd_range() argument
3317 pages, nr)) in gup_fast_pgd_range()
3320 pages, nr)) in gup_fast_pgd_range()
3326 unsigned int flags, struct page **pages, int *nr) in gup_fast_pgd_range() argument
3343 unsigned int gup_flags, struct page **pages) in gup_fast() argument
3363 * With interrupts disabled, we block page table pages from being freed in gup_fast()
3371 gup_fast_pgd_range(start, end, gup_flags, pages, &nr_pinned); in gup_fast()
3375 * When pinning pages for DMA there could be a concurrent write protect in gup_fast()
3380 gup_fast_unpin_user_pages(pages, nr_pinned); in gup_fast()
3383 sanity_check_pinned_pages(pages, nr_pinned); in gup_fast()
3390 unsigned int gup_flags, struct page **pages) in gup_fast_fallback() argument
3418 nr_pinned = gup_fast(start, end, gup_flags, pages); in gup_fast_fallback()
3422 /* Slow path: try to get the remaining pages with get_user_pages */ in gup_fast_fallback()
3424 pages += nr_pinned; in gup_fast_fallback()
3426 pages, &locked, in gup_fast_fallback()
3430 * The caller has to unpin the pages we already pinned so in gup_fast_fallback()
3441 * get_user_pages_fast_only() - pin user pages in memory
3443 * @nr_pages: number of pages from start to pin
3445 * @pages: array that receives pointers to the pages pinned.
3452 * pages pinned.
3459 unsigned int gup_flags, struct page **pages) in get_user_pages_fast_only() argument
3468 if (!is_valid_gup_args(pages, NULL, &gup_flags, in get_user_pages_fast_only()
3472 return gup_fast_fallback(start, nr_pages, gup_flags, pages); in get_user_pages_fast_only()
3477 * get_user_pages_fast() - pin user pages in memory
3479 * @nr_pages: number of pages from start to pin
3481 * @pages: array that receives pointers to the pages pinned.
3484 * Attempt to pin user pages in memory without taking mm->mmap_lock.
3488 * Returns number of pages pinned. This may be fewer than the number requested.
3489 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
3493 unsigned int gup_flags, struct page **pages) in get_user_pages_fast() argument
3501 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET)) in get_user_pages_fast()
3503 return gup_fast_fallback(start, nr_pages, gup_flags, pages); in get_user_pages_fast()
3508 * pin_user_pages_fast() - pin user pages in memory without taking locks
3511 * @nr_pages: number of pages from start to pin
3513 * @pages: array that receives pointers to the pages pinned.
3520 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3523 * Note that if a zero_page is amongst the returned pages, it will not have
3527 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast() argument
3529 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) in pin_user_pages_fast()
3531 return gup_fast_fallback(start, nr_pages, gup_flags, pages); in pin_user_pages_fast()
3536 * pin_user_pages_remote() - pin pages of a remote process
3540 * @nr_pages: number of pages from start to pin
3542 * @pages: array that receives pointers to the pages pinned.
3552 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3555 * Note that if a zero_page is amongst the returned pages, it will not have
3560 unsigned int gup_flags, struct page **pages, in pin_user_pages_remote() argument
3565 if (!is_valid_gup_args(pages, locked, &gup_flags, in pin_user_pages_remote()
3568 return __gup_longterm_locked(mm, start, nr_pages, pages, in pin_user_pages_remote()
3575 * pin_user_pages() - pin user pages in memory for use by other devices
3578 * @nr_pages: number of pages from start to pin
3580 * @pages: array that receives pointers to the pages pinned.
3586 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3589 * Note that if a zero_page is amongst the returned pages, it will not have
3593 unsigned int gup_flags, struct page **pages) in pin_user_pages() argument
3597 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) in pin_user_pages()
3600 pages, &locked, gup_flags); in pin_user_pages()
3609 * Note that if a zero_page is amongst the returned pages, it will not have
3613 struct page **pages, unsigned int gup_flags) in pin_user_pages_unlocked() argument
3617 if (!is_valid_gup_args(pages, NULL, &gup_flags, in pin_user_pages_unlocked()
3621 return __gup_longterm_locked(current->mm, start, nr_pages, pages, in pin_user_pages_unlocked()