Lines Matching full:pages
69 * allocation can hold about 26M of 4k pages and 13G of 2M pages in an
163 static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_add_npinned() argument
167 rc = check_add_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_add_npinned()
169 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_add_npinned()
172 static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_sub_npinned() argument
176 rc = check_sub_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_sub_npinned()
178 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_sub_npinned()
181 static void iopt_pages_err_unpin(struct iopt_pages *pages, in iopt_pages_err_unpin() argument
189 iopt_pages_sub_npinned(pages, npages); in iopt_pages_err_unpin()
195 * covers a portion of the first and last pages in the range.
249 static struct iopt_area *iopt_pages_find_domain_area(struct iopt_pages *pages, in iopt_pages_find_domain_area() argument
254 node = interval_tree_iter_first(&pages->domains_itree, index, index); in iopt_pages_find_domain_area()
576 unsigned long last_index, struct page **pages) in pages_to_xarray() argument
578 struct page **end_pages = pages + (last_index - start_index) + 1; in pages_to_xarray()
579 struct page **half_pages = pages + (end_pages - pages) / 2; in pages_to_xarray()
586 while (pages != end_pages) { in pages_to_xarray()
588 if (pages == half_pages && iommufd_should_fail()) { in pages_to_xarray()
596 old = xas_store(&xas, xa_mk_value(page_to_pfn(*pages))); in pages_to_xarray()
600 pages++; in pages_to_xarray()
615 static void batch_from_pages(struct pfn_batch *batch, struct page **pages, in batch_from_pages() argument
618 struct page **end = pages + npages; in batch_from_pages()
620 for (; pages != end; pages++) in batch_from_pages()
621 if (!batch_add_pfn(batch, page_to_pfn(*pages))) in batch_from_pages()
625 static void batch_unpin(struct pfn_batch *batch, struct iopt_pages *pages, in batch_unpin() argument
643 to_unpin, pages->writable); in batch_unpin()
644 iopt_pages_sub_npinned(pages, to_unpin); in batch_unpin()
709 struct iopt_pages *pages) in pfn_reader_user_init() argument
717 if (pages->writable) in pfn_reader_user_init()
722 struct iopt_pages *pages) in pfn_reader_user_destroy() argument
726 mmap_read_unlock(pages->source_mm); in pfn_reader_user_destroy()
727 if (pages->source_mm != current->mm) in pfn_reader_user_destroy()
728 mmput(pages->source_mm); in pfn_reader_user_destroy()
737 struct iopt_pages *pages, in pfn_reader_user_pin() argument
741 bool remote_mm = pages->source_mm != current->mm; in pfn_reader_user_pin()
762 * providing the pages, so we can optimize into in pfn_reader_user_pin()
766 if (!mmget_not_zero(pages->source_mm)) in pfn_reader_user_pin()
779 uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE); in pfn_reader_user_pin()
785 mmap_read_lock(pages->source_mm); in pfn_reader_user_pin()
788 rc = pin_user_pages_remote(pages->source_mm, uptr, npages, in pfn_reader_user_pin()
797 iopt_pages_add_npinned(pages, rc); in pfn_reader_user_pin()
804 static int incr_user_locked_vm(struct iopt_pages *pages, unsigned long npages) in incr_user_locked_vm() argument
810 lock_limit = task_rlimit(pages->source_task, RLIMIT_MEMLOCK) >> in incr_user_locked_vm()
813 cur_pages = atomic_long_read(&pages->source_user->locked_vm); in incr_user_locked_vm()
818 } while (!atomic_long_try_cmpxchg(&pages->source_user->locked_vm, in incr_user_locked_vm()
823 static void decr_user_locked_vm(struct iopt_pages *pages, unsigned long npages) in decr_user_locked_vm() argument
825 if (WARN_ON(atomic_long_read(&pages->source_user->locked_vm) < npages)) in decr_user_locked_vm()
827 atomic_long_sub(npages, &pages->source_user->locked_vm); in decr_user_locked_vm()
831 static int update_mm_locked_vm(struct iopt_pages *pages, unsigned long npages, in update_mm_locked_vm() argument
838 mmap_read_unlock(pages->source_mm); in update_mm_locked_vm()
842 pages->source_mm != current->mm) { in update_mm_locked_vm()
843 if (!mmget_not_zero(pages->source_mm)) in update_mm_locked_vm()
848 mmap_write_lock(pages->source_mm); in update_mm_locked_vm()
849 rc = __account_locked_vm(pages->source_mm, npages, inc, in update_mm_locked_vm()
850 pages->source_task, false); in update_mm_locked_vm()
851 mmap_write_unlock(pages->source_mm); in update_mm_locked_vm()
854 mmput(pages->source_mm); in update_mm_locked_vm()
858 static int do_update_pinned(struct iopt_pages *pages, unsigned long npages, in do_update_pinned() argument
863 switch (pages->account_mode) { in do_update_pinned()
868 rc = incr_user_locked_vm(pages, npages); in do_update_pinned()
870 decr_user_locked_vm(pages, npages); in do_update_pinned()
873 rc = update_mm_locked_vm(pages, npages, inc, user); in do_update_pinned()
879 pages->last_npinned = pages->npinned; in do_update_pinned()
881 atomic64_add(npages, &pages->source_mm->pinned_vm); in do_update_pinned()
883 atomic64_sub(npages, &pages->source_mm->pinned_vm); in do_update_pinned()
887 static void update_unpinned(struct iopt_pages *pages) in update_unpinned() argument
889 if (WARN_ON(pages->npinned > pages->last_npinned)) in update_unpinned()
891 if (pages->npinned == pages->last_npinned) in update_unpinned()
893 do_update_pinned(pages, pages->last_npinned - pages->npinned, false, in update_unpinned()
898 * Changes in the number of pages pinned is done after the pages have been read
901 * how many pages we have already pinned within a range to generate an accurate
905 struct iopt_pages *pages) in pfn_reader_user_update_pinned() argument
910 lockdep_assert_held(&pages->mutex); in pfn_reader_user_update_pinned()
912 if (pages->npinned == pages->last_npinned) in pfn_reader_user_update_pinned()
915 if (pages->npinned < pages->last_npinned) { in pfn_reader_user_update_pinned()
916 npages = pages->last_npinned - pages->npinned; in pfn_reader_user_update_pinned()
921 npages = pages->npinned - pages->last_npinned; in pfn_reader_user_update_pinned()
924 return do_update_pinned(pages, npages, inc, user); in pfn_reader_user_update_pinned()
932 * - The original PFN source, ie pages->source_mm
938 struct iopt_pages *pages; member
950 return pfn_reader_user_update_pinned(&pfns->user, pfns->pages); in pfn_reader_update_pinned()
954 * The batch can contain a mixture of pages that are still in use and pages that
955 * need to be unpinned. Unpin only pages that are not held anywhere else.
962 struct iopt_pages *pages = pfns->pages; in pfn_reader_unpin() local
964 lockdep_assert_held(&pages->mutex); in pfn_reader_unpin()
966 interval_tree_for_each_double_span(&span, &pages->access_itree, in pfn_reader_unpin()
967 &pages->domains_itree, start, last) { in pfn_reader_unpin()
971 batch_unpin(&pfns->batch, pages, span.start_hole - start, in pfn_reader_unpin()
989 batch_from_xarray(&pfns->batch, &pfns->pages->pinned_pfns, in pfn_reader_fill_span()
996 * Pull as many pages from the first domain we find in the in pfn_reader_fill_span()
1000 area = iopt_pages_find_domain_area(pfns->pages, start_index); in pfn_reader_fill_span()
1004 /* The storage_domain cannot change without the pages mutex */ in pfn_reader_fill_span()
1012 rc = pfn_reader_user_pin(&pfns->user, pfns->pages, start_index, in pfn_reader_fill_span()
1063 static int pfn_reader_init(struct pfn_reader *pfns, struct iopt_pages *pages, in pfn_reader_init() argument
1068 lockdep_assert_held(&pages->mutex); in pfn_reader_init()
1070 pfns->pages = pages; in pfn_reader_init()
1074 pfn_reader_user_init(&pfns->user, pages); in pfn_reader_init()
1078 interval_tree_double_span_iter_first(&pfns->span, &pages->access_itree, in pfn_reader_init()
1079 &pages->domains_itree, start_index, in pfn_reader_init()
1085 * There are many assertions regarding the state of pages->npinned vs
1086 * pages->last_pinned, for instance something like unmapping a domain must only
1094 struct iopt_pages *pages = pfns->pages; in pfn_reader_release_pins() local
1099 /* Any pages not transferred to the batch are just unpinned */ in pfn_reader_release_pins()
1103 iopt_pages_sub_npinned(pages, npages); in pfn_reader_release_pins()
1114 struct iopt_pages *pages = pfns->pages; in pfn_reader_destroy() local
1117 pfn_reader_user_destroy(&pfns->user, pfns->pages); in pfn_reader_destroy()
1119 WARN_ON(pages->last_npinned != pages->npinned); in pfn_reader_destroy()
1122 static int pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages, in pfn_reader_first() argument
1131 rc = pfn_reader_init(pfns, pages, start_index, last_index); in pfn_reader_first()
1145 struct iopt_pages *pages; in iopt_alloc_pages() local
1158 pages = kzalloc(sizeof(*pages), GFP_KERNEL_ACCOUNT); in iopt_alloc_pages()
1159 if (!pages) in iopt_alloc_pages()
1162 kref_init(&pages->kref); in iopt_alloc_pages()
1163 xa_init_flags(&pages->pinned_pfns, XA_FLAGS_ACCOUNT); in iopt_alloc_pages()
1164 mutex_init(&pages->mutex); in iopt_alloc_pages()
1165 pages->source_mm = current->mm; in iopt_alloc_pages()
1166 mmgrab(pages->source_mm); in iopt_alloc_pages()
1167 pages->uptr = (void __user *)ALIGN_DOWN((uintptr_t)uptr, PAGE_SIZE); in iopt_alloc_pages()
1168 pages->npages = DIV_ROUND_UP(length + (uptr - pages->uptr), PAGE_SIZE); in iopt_alloc_pages()
1169 pages->access_itree = RB_ROOT_CACHED; in iopt_alloc_pages()
1170 pages->domains_itree = RB_ROOT_CACHED; in iopt_alloc_pages()
1171 pages->writable = writable; in iopt_alloc_pages()
1173 pages->account_mode = IOPT_PAGES_ACCOUNT_NONE; in iopt_alloc_pages()
1175 pages->account_mode = IOPT_PAGES_ACCOUNT_USER; in iopt_alloc_pages()
1176 pages->source_task = current->group_leader; in iopt_alloc_pages()
1178 pages->source_user = get_uid(current_user()); in iopt_alloc_pages()
1179 return pages; in iopt_alloc_pages()
1184 struct iopt_pages *pages = container_of(kref, struct iopt_pages, kref); in iopt_release_pages() local
1186 WARN_ON(!RB_EMPTY_ROOT(&pages->access_itree.rb_root)); in iopt_release_pages()
1187 WARN_ON(!RB_EMPTY_ROOT(&pages->domains_itree.rb_root)); in iopt_release_pages()
1188 WARN_ON(pages->npinned); in iopt_release_pages()
1189 WARN_ON(!xa_empty(&pages->pinned_pfns)); in iopt_release_pages()
1190 mmdrop(pages->source_mm); in iopt_release_pages()
1191 mutex_destroy(&pages->mutex); in iopt_release_pages()
1192 put_task_struct(pages->source_task); in iopt_release_pages()
1193 free_uid(pages->source_user); in iopt_release_pages()
1194 kfree(pages); in iopt_release_pages()
1199 struct iopt_pages *pages, struct iommu_domain *domain, in iopt_area_unpin_domain() argument
1229 * contiguous pages. Thus, if we have to stop unpinning in the in iopt_area_unpin_domain()
1249 batch_unpin(batch, pages, 0, in iopt_area_unpin_domain()
1259 struct iopt_pages *pages, in __iopt_area_unfill_domain() argument
1269 lockdep_assert_held(&pages->mutex); in __iopt_area_unfill_domain()
1273 * so this must unmap any IOVA before we go ahead and unpin the pages. in __iopt_area_unfill_domain()
1274 * This creates a complexity where we need to skip over unpinning pages in __iopt_area_unfill_domain()
1286 interval_tree_for_each_double_span(&span, &pages->domains_itree, in __iopt_area_unfill_domain()
1287 &pages->access_itree, start_index, in __iopt_area_unfill_domain()
1294 iopt_area_unpin_domain(&batch, area, pages, domain, in __iopt_area_unfill_domain()
1307 update_unpinned(pages); in __iopt_area_unfill_domain()
1311 struct iopt_pages *pages, in iopt_area_unfill_partial_domain() argument
1316 __iopt_area_unfill_domain(area, pages, domain, end_index - 1); in iopt_area_unfill_partial_domain()
1336 * @pages: page supplier for the area (area->pages is NULL)
1343 void iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages, in iopt_area_unfill_domain() argument
1346 __iopt_area_unfill_domain(area, pages, domain, in iopt_area_unfill_domain()
1364 lockdep_assert_held(&area->pages->mutex); in iopt_area_fill_domain()
1366 rc = pfn_reader_first(&pfns, area->pages, iopt_area_index(area), in iopt_area_fill_domain()
1391 iopt_area_unfill_partial_domain(area, area->pages, domain, in iopt_area_fill_domain()
1401 * @pages: The pages associated with the area (area->pages is NULL)
1409 int iopt_area_fill_domains(struct iopt_area *area, struct iopt_pages *pages) in iopt_area_fill_domains() argument
1424 mutex_lock(&pages->mutex); in iopt_area_fill_domains()
1425 rc = pfn_reader_first(&pfns, pages, iopt_area_index(area), in iopt_area_fill_domains()
1450 interval_tree_insert(&area->pages_node, &pages->domains_itree); in iopt_area_fill_domains()
1474 iopt_area_unfill_partial_domain(area, pages, domain, in iopt_area_fill_domains()
1481 mutex_unlock(&pages->mutex); in iopt_area_fill_domains()
1488 * @pages: The pages associated with the area (area->pages is NULL)
1493 void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages) in iopt_area_unfill_domains() argument
1501 mutex_lock(&pages->mutex); in iopt_area_unfill_domains()
1513 interval_tree_remove(&area->pages_node, &pages->domains_itree); in iopt_area_unfill_domains()
1514 iopt_area_unfill_domain(area, pages, area->storage_domain); in iopt_area_unfill_domains()
1517 mutex_unlock(&pages->mutex); in iopt_area_unfill_domains()
1521 struct iopt_pages *pages, in iopt_pages_unpin_xarray() argument
1526 batch_from_xarray_clear(batch, &pages->pinned_pfns, start_index, in iopt_pages_unpin_xarray()
1528 batch_unpin(batch, pages, 0, batch->total_pfns); in iopt_pages_unpin_xarray()
1536 * @pages: The pages to act on
1540 * Called when an iopt_pages_access is removed, removes pages from the itree.
1543 void iopt_pages_unfill_xarray(struct iopt_pages *pages, in iopt_pages_unfill_xarray() argument
1552 lockdep_assert_held(&pages->mutex); in iopt_pages_unfill_xarray()
1554 interval_tree_for_each_double_span(&span, &pages->access_itree, in iopt_pages_unfill_xarray()
1555 &pages->domains_itree, start_index, in iopt_pages_unfill_xarray()
1564 iopt_pages_unpin_xarray(&batch, pages, span.start_hole, in iopt_pages_unfill_xarray()
1568 clear_xarray(&pages->pinned_pfns, span.start_used, in iopt_pages_unfill_xarray()
1575 update_unpinned(pages); in iopt_pages_unfill_xarray()
1580 * @pages: The pages to act on
1583 * @out_pages: The output array to return the pages
1587 * the pages directly from the xarray.
1589 * This is part of the SW iommu interface to read pages for in-kernel use.
1591 void iopt_pages_fill_from_xarray(struct iopt_pages *pages, in iopt_pages_fill_from_xarray() argument
1596 XA_STATE(xas, &pages->pinned_pfns, start_index); in iopt_pages_fill_from_xarray()
1611 static int iopt_pages_fill_from_domain(struct iopt_pages *pages, in iopt_pages_fill_from_domain() argument
1620 area = iopt_pages_find_domain_area(pages, start_index); in iopt_pages_fill_from_domain()
1633 static int iopt_pages_fill_from_mm(struct iopt_pages *pages, in iopt_pages_fill_from_mm() argument
1644 rc = pfn_reader_user_pin(user, pages, cur_index, last_index); in iopt_pages_fill_from_mm()
1653 iopt_pages_err_unpin(pages, start_index, cur_index - 1, in iopt_pages_fill_from_mm()
1660 * @pages: The pages to act on
1663 * @out_pages: The output array to return the pages, may be NULL
1665 * This populates the xarray and returns the pages in out_pages. As the slow
1666 * path this is able to copy pages from other storage tiers into the xarray.
1670 * This is part of the SW iommu interface to read pages for in-kernel use.
1672 int iopt_pages_fill_xarray(struct iopt_pages *pages, unsigned long start_index, in iopt_pages_fill_xarray() argument
1680 lockdep_assert_held(&pages->mutex); in iopt_pages_fill_xarray()
1682 pfn_reader_user_init(&user, pages); in iopt_pages_fill_xarray()
1684 interval_tree_for_each_double_span(&span, &pages->access_itree, in iopt_pages_fill_xarray()
1685 &pages->domains_itree, start_index, in iopt_pages_fill_xarray()
1691 iopt_pages_fill_from_xarray(pages, span.start_used, in iopt_pages_fill_xarray()
1698 iopt_pages_fill_from_domain(pages, span.start_used, in iopt_pages_fill_xarray()
1700 rc = pages_to_xarray(&pages->pinned_pfns, in iopt_pages_fill_xarray()
1711 rc = iopt_pages_fill_from_mm(pages, &user, span.start_hole, in iopt_pages_fill_xarray()
1715 rc = pages_to_xarray(&pages->pinned_pfns, span.start_hole, in iopt_pages_fill_xarray()
1718 iopt_pages_err_unpin(pages, span.start_hole, in iopt_pages_fill_xarray()
1724 rc = pfn_reader_user_update_pinned(&user, pages); in iopt_pages_fill_xarray()
1728 pfn_reader_user_destroy(&user, pages); in iopt_pages_fill_xarray()
1733 iopt_pages_unfill_xarray(pages, start_index, xa_end - 1); in iopt_pages_fill_xarray()
1735 pfn_reader_user_destroy(&user, pages); in iopt_pages_fill_xarray()
1744 static int iopt_pages_rw_slow(struct iopt_pages *pages, in iopt_pages_rw_slow() argument
1753 mutex_lock(&pages->mutex); in iopt_pages_rw_slow()
1755 rc = pfn_reader_first(&pfns, pages, start_index, last_index); in iopt_pages_rw_slow()
1777 mutex_unlock(&pages->mutex); in iopt_pages_rw_slow()
1785 static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index, in iopt_pages_rw_page() argument
1792 if (!mmget_not_zero(pages->source_mm)) in iopt_pages_rw_page()
1793 return iopt_pages_rw_slow(pages, index, index, offset, data, in iopt_pages_rw_page()
1801 mmap_read_lock(pages->source_mm); in iopt_pages_rw_page()
1803 pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE), in iopt_pages_rw_page()
1806 mmap_read_unlock(pages->source_mm); in iopt_pages_rw_page()
1817 mmput(pages->source_mm); in iopt_pages_rw_page()
1822 * iopt_pages_rw_access - Copy to/from a linear slice of the pages
1823 * @pages: pages to act on
1824 * @start_byte: First byte of pages to copy to/from
1832 int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte, in iopt_pages_rw_access() argument
1837 bool change_mm = current->mm != pages->source_mm; in iopt_pages_rw_access()
1844 if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable) in iopt_pages_rw_access()
1849 return iopt_pages_rw_page(pages, start_index, in iopt_pages_rw_access()
1852 return iopt_pages_rw_slow(pages, start_index, last_index, in iopt_pages_rw_access()
1862 if (!mmget_not_zero(pages->source_mm)) in iopt_pages_rw_access()
1863 return iopt_pages_rw_slow(pages, start_index, in iopt_pages_rw_access()
1867 kthread_use_mm(pages->source_mm); in iopt_pages_rw_access()
1871 if (copy_to_user(pages->uptr + start_byte, data, length)) in iopt_pages_rw_access()
1874 if (copy_from_user(data, pages->uptr + start_byte, length)) in iopt_pages_rw_access()
1879 kthread_unuse_mm(pages->source_mm); in iopt_pages_rw_access()
1880 mmput(pages->source_mm); in iopt_pages_rw_access()
1887 iopt_pages_get_exact_access(struct iopt_pages *pages, unsigned long index, in iopt_pages_get_exact_access() argument
1892 lockdep_assert_held(&pages->mutex); in iopt_pages_get_exact_access()
1895 for (node = interval_tree_iter_first(&pages->access_itree, index, last); in iopt_pages_get_exact_access()
1911 * Record that an in-kernel access will be accessing the pages, ensure they are
1920 struct iopt_pages *pages = area->pages; in iopt_area_add_access() local
1924 if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable) in iopt_area_add_access()
1927 mutex_lock(&pages->mutex); in iopt_area_add_access()
1928 access = iopt_pages_get_exact_access(pages, start_index, last_index); in iopt_area_add_access()
1932 iopt_pages_fill_from_xarray(pages, start_index, last_index, in iopt_area_add_access()
1934 mutex_unlock(&pages->mutex); in iopt_area_add_access()
1944 rc = iopt_pages_fill_xarray(pages, start_index, last_index, out_pages); in iopt_area_add_access()
1952 interval_tree_insert(&access->node, &pages->access_itree); in iopt_area_add_access()
1953 mutex_unlock(&pages->mutex); in iopt_area_add_access()
1959 mutex_unlock(&pages->mutex); in iopt_area_add_access()
1969 * Undo iopt_area_add_access() and unpin the pages if necessary. The caller
1975 struct iopt_pages *pages = area->pages; in iopt_area_remove_access() local
1978 mutex_lock(&pages->mutex); in iopt_area_remove_access()
1979 access = iopt_pages_get_exact_access(pages, start_index, last_index); in iopt_area_remove_access()
1989 interval_tree_remove(&access->node, &pages->access_itree); in iopt_area_remove_access()
1990 iopt_pages_unfill_xarray(pages, start_index, last_index); in iopt_area_remove_access()
1993 mutex_unlock(&pages->mutex); in iopt_area_remove_access()