Lines Matching +full:scatter +full:- +full:gather

1 // SPDX-License-Identifier: MIT
8 #include <linux/dma-mapping.h>
19 return (end - start) >> PAGE_SHIFT; in xe_npages_in_range()
23 * xe_mark_range_accessed() - mark a range is accessed, so core mm
36 npages = xe_npages_in_range(range->start, range->end); in xe_mark_range_accessed()
38 page = hmm_pfn_to_page(range->hmm_pfns[i]); in xe_mark_range_accessed()
47 * xe_build_sg() - build a scatter gather table for all the physical pages/pfn
48 * in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
51 * @xe: the xe device who will access the dma-address in sg table
52 * @range: the hmm range that we build the sg table from. range->hmm_pfns[]
56 * for system pages. If write we map it bi-diretional; otherwise
60 * the scatter gather table. This is for the purpose of efficiently
65 * do a dma-mapping so it can be accessed by GPU/DMA.
70 * address), and there is no need of dma-mapping. This is TBD.
72 * FIXME: dma-mapping for peer gpu device to access remote gpu's
78 * Returns 0 if successful; -ENOMEM if fails to allocate memory
83 struct device *dev = xe->drm.dev; in xe_build_sg()
88 npages = xe_npages_in_range(range->start, range->end); in xe_build_sg()
91 return -ENOMEM; in xe_build_sg()
94 pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]); in xe_build_sg()
116 * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
118 * @uvma: the userptr vma which hold the scatter gather table
126 struct xe_userptr *userptr = &uvma->userptr; in xe_hmm_userptr_free_sg()
127 struct xe_vma *vma = &uvma->vma; in xe_hmm_userptr_free_sg()
130 struct xe_device *xe = vm->xe; in xe_hmm_userptr_free_sg()
131 struct device *dev = xe->drm.dev; in xe_hmm_userptr_free_sg()
133 xe_assert(xe, userptr->sg); in xe_hmm_userptr_free_sg()
134 dma_unmap_sgtable(dev, userptr->sg, in xe_hmm_userptr_free_sg()
137 sg_free_table(userptr->sg); in xe_hmm_userptr_free_sg()
138 userptr->sg = NULL; in xe_hmm_userptr_free_sg()
142 * xe_hmm_userptr_populate_range() - Populate physical pages of a virtual
171 struct xe_vma *vma = &uvma->vma; in xe_hmm_userptr_populate_range()
181 userptr = &uvma->userptr; in xe_hmm_userptr_populate_range()
184 mmap_assert_locked(userptr->notifier.mm); in xe_hmm_userptr_populate_range()
186 if (vma->gpuva.flags & XE_VMA_DESTROYED) in xe_hmm_userptr_populate_range()
189 notifier_seq = mmu_interval_read_begin(&userptr->notifier); in xe_hmm_userptr_populate_range()
190 if (notifier_seq == userptr->notifier_seq) in xe_hmm_userptr_populate_range()
193 if (userptr->sg) in xe_hmm_userptr_populate_range()
199 return -ENOMEM; in xe_hmm_userptr_populate_range()
204 if (!mmget_not_zero(userptr->notifier.mm)) { in xe_hmm_userptr_populate_range()
205 ret = -EFAULT; in xe_hmm_userptr_populate_range()
211 hmm_range.notifier = &userptr->notifier; in xe_hmm_userptr_populate_range()
214 hmm_range.dev_private_owner = vm->xe; in xe_hmm_userptr_populate_range()
217 hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier); in xe_hmm_userptr_populate_range()
220 mmap_read_lock(userptr->notifier.mm); in xe_hmm_userptr_populate_range()
225 mmap_read_unlock(userptr->notifier.mm); in xe_hmm_userptr_populate_range()
227 if (ret == -EBUSY) { in xe_hmm_userptr_populate_range()
236 mmput(userptr->notifier.mm); in xe_hmm_userptr_populate_range()
241 ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write); in xe_hmm_userptr_populate_range()
246 userptr->sg = &userptr->sgt; in xe_hmm_userptr_populate_range()
247 userptr->notifier_seq = hmm_range.notifier_seq; in xe_hmm_userptr_populate_range()