Lines Matching full:pages
24 struct iopt_pages *pages; member
43 if (!iter->area->pages) { in iopt_area_contig_init()
66 !iter->area->pages) { in iopt_area_contig_next()
196 * The area takes a slice of the pages from start_bytes to start_byte + length
199 struct iopt_pages *pages, unsigned long iova, in iopt_insert_area() argument
205 if ((iommu_prot & IOMMU_WRITE) && !pages->writable) in iopt_insert_area()
221 if (WARN_ON(area->pages_node.last >= pages->npages)) in iopt_insert_area()
225 * The area is inserted with a NULL pages indicating it is not fully in iopt_insert_area()
272 (uintptr_t)elm->pages->uptr + elm->start_byte, length); in iopt_alloc_area_pages()
287 * Areas are created with a NULL pages so that the IOVA space is in iopt_alloc_area_pages()
292 rc = iopt_insert_area(iopt, elm->area, elm->pages, iova, in iopt_alloc_area_pages()
307 WARN_ON(area->pages); in iopt_abort_area()
324 if (elm->pages) in iopt_free_pages_list()
325 iopt_put_pages(elm->pages); in iopt_free_pages_list()
338 rc = iopt_area_fill_domains(elm->area, elm->pages); in iopt_fill_domains_pages()
348 iopt_area_unfill_domains(undo_elm->area, undo_elm->pages); in iopt_fill_domains_pages()
373 * area->pages must be set inside the domains_rwsem to ensure in iopt_map_pages()
377 elm->area->pages = elm->pages; in iopt_map_pages()
378 elm->pages = NULL; in iopt_map_pages()
399 * page tables this will pin the pages and load them into the domain at iova.
415 elm.pages = iopt_alloc_pages(uptr, length, iommu_prot & IOMMU_WRITE); in iopt_map_user_pages()
416 if (IS_ERR(elm.pages)) in iopt_map_user_pages()
417 return PTR_ERR(elm.pages); in iopt_map_user_pages()
419 elm.pages->account_mode == IOPT_PAGES_ACCOUNT_USER) in iopt_map_user_pages()
420 elm.pages->account_mode = IOPT_PAGES_ACCOUNT_MM; in iopt_map_user_pages()
421 elm.start_byte = uptr - elm.pages->uptr; in iopt_map_user_pages()
429 if (elm.pages) in iopt_map_user_pages()
430 iopt_put_pages(elm.pages); in iopt_map_user_pages()
568 if (!area->pages) in iopt_clear_dirty_data()
631 elm->pages = area->pages; in iopt_get_pages()
633 kref_get(&elm->pages->kref); in iopt_get_pages()
657 * The domains_rwsem must be held in read mode any time any area->pages in iopt_unmap_iova_range()
667 struct iopt_pages *pages; in iopt_unmap_iova_range() local
670 if (!area->pages) { in iopt_unmap_iova_range()
686 * without the pages->mutex. in iopt_unmap_iova_range()
704 pages = area->pages; in iopt_unmap_iova_range()
705 area->pages = NULL; in iopt_unmap_iova_range()
708 iopt_area_unfill_domains(area, pages); in iopt_unmap_iova_range()
710 iopt_put_pages(pages); in iopt_unmap_iova_range()
895 struct iopt_pages *pages = area->pages; in iopt_unfill_domain() local
897 if (!pages) in iopt_unfill_domain()
900 mutex_lock(&pages->mutex); in iopt_unfill_domain()
905 mutex_unlock(&pages->mutex); in iopt_unfill_domain()
914 struct iopt_pages *pages = area->pages; in iopt_unfill_domain() local
916 if (!pages) in iopt_unfill_domain()
919 mutex_lock(&pages->mutex); in iopt_unfill_domain()
920 interval_tree_remove(&area->pages_node, &pages->domains_itree); in iopt_unfill_domain()
923 iopt_area_unfill_domain(area, pages, domain); in iopt_unfill_domain()
924 mutex_unlock(&pages->mutex); in iopt_unfill_domain()
948 struct iopt_pages *pages = area->pages; in iopt_fill_domain() local
950 if (!pages) in iopt_fill_domain()
953 mutex_lock(&pages->mutex); in iopt_fill_domain()
956 mutex_unlock(&pages->mutex); in iopt_fill_domain()
963 &pages->domains_itree); in iopt_fill_domain()
965 mutex_unlock(&pages->mutex); in iopt_fill_domain()
973 struct iopt_pages *pages = area->pages; in iopt_fill_domain() local
977 if (!pages) in iopt_fill_domain()
979 mutex_lock(&pages->mutex); in iopt_fill_domain()
982 &pages->domains_itree); in iopt_fill_domain()
985 iopt_area_unfill_domain(area, pages, domain); in iopt_fill_domain()
986 mutex_unlock(&pages->mutex); in iopt_fill_domain()
1183 struct iopt_pages *pages = area->pages; in iopt_area_split() local
1193 if (!pages || area->prevent_access) in iopt_area_split()
1210 mutex_lock(&pages->mutex); in iopt_area_split()
1222 * huge pages. in iopt_area_split()
1230 rc = iopt_insert_area(iopt, lhs, area->pages, start_iova, in iopt_area_split()
1237 rc = iopt_insert_area(iopt, rhs, area->pages, new_start, in iopt_area_split()
1248 interval_tree_remove(&area->pages_node, &pages->domains_itree); in iopt_area_split()
1249 interval_tree_insert(&lhs->pages_node, &pages->domains_itree); in iopt_area_split()
1250 interval_tree_insert(&rhs->pages_node, &pages->domains_itree); in iopt_area_split()
1254 lhs->pages = area->pages; in iopt_area_split()
1256 rhs->pages = area->pages; in iopt_area_split()
1257 kref_get(&rhs->pages->kref); in iopt_area_split()
1259 mutex_unlock(&pages->mutex); in iopt_area_split()
1262 * No change to domains or accesses because the pages hasn't been in iopt_area_split()
1272 mutex_unlock(&pages->mutex); in iopt_area_split()
1322 /* Won't do it if domains already have pages mapped in them */ in iopt_disable_large_pages()