Lines Matching +full:entry +full:- +full:address
1 // SPDX-License-Identifier: GPL-2.0
20 if (pvmw->flags & PVMW_SYNC) { in map_pte()
22 pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, in map_pte()
23 pvmw->address, &pvmw->ptl); in map_pte()
24 *ptlp = pvmw->ptl; in map_pte()
25 return !!pvmw->pte; in map_pte()
30 * in case *pvmw->pmd changes underneath us; so we need to in map_pte()
35 pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd, in map_pte()
36 pvmw->address, ptlp); in map_pte()
37 if (!pvmw->pte) in map_pte()
40 ptent = ptep_get(pvmw->pte); in map_pte()
42 if (pvmw->flags & PVMW_MIGRATION) { in map_pte()
46 swp_entry_t entry; in map_pte() local
48 * Handle un-addressable ZONE_DEVICE memory. in map_pte()
51 * device page from the process address space. Such in map_pte()
53 * a special swap entry, nonetheless it still does in map_pte()
63 entry = pte_to_swp_entry(ptent); in map_pte()
64 if (!is_device_private_entry(entry) && in map_pte()
65 !is_device_exclusive_entry(entry)) in map_pte()
70 pvmw->ptl = *ptlp; in map_pte()
71 spin_lock(pvmw->ptl); in map_pte()
76 * check_pte - check if [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) is
77 * mapped at the @pvmw->pte
84 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
87 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
88 * entry that points to [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
90 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
91 * [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
99 pte_t ptent = ptep_get(pvmw->pte); in check_pte()
101 if (pvmw->flags & PVMW_MIGRATION) { in check_pte()
102 swp_entry_t entry; in check_pte() local
105 entry = pte_to_swp_entry(ptent); in check_pte()
107 if (!is_migration_entry(entry) && in check_pte()
108 !is_device_exclusive_entry(entry)) in check_pte()
111 pfn = swp_offset_pfn(entry); in check_pte()
113 swp_entry_t entry; in check_pte() local
115 /* Handle un-addressable ZONE_DEVICE memory */ in check_pte()
116 entry = pte_to_swp_entry(ptent); in check_pte()
117 if (!is_device_private_entry(entry) && in check_pte()
118 !is_device_exclusive_entry(entry)) in check_pte()
121 pfn = swp_offset_pfn(entry); in check_pte()
129 return (pfn - pvmw->pfn) < pvmw->nr_pages; in check_pte()
135 if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn) in check_pmd()
137 if (pfn > pvmw->pfn + pvmw->nr_pages - 1) in check_pmd()
144 pvmw->address = (pvmw->address + size) & ~(size - 1); in step_forward()
145 if (!pvmw->address) in step_forward()
146 pvmw->address = ULONG_MAX; in step_forward()
150 * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
151 * @pvmw->address
152 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
155 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
156 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
157 * adjusted if needed (for PTE-mapped THPs).
159 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
160 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
163 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
164 * regardless of which page table level the page is mapped at. @pvmw->pmd is
168 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
175 struct vm_area_struct *vma = pvmw->vma; in page_vma_mapped_walk()
176 struct mm_struct *mm = vma->vm_mm; in page_vma_mapped_walk()
185 if (pvmw->pmd && !pvmw->pte) in page_vma_mapped_walk()
192 if (pvmw->pte) in page_vma_mapped_walk()
199 pvmw->pte = hugetlb_walk(vma, pvmw->address, size); in page_vma_mapped_walk()
200 if (!pvmw->pte) in page_vma_mapped_walk()
203 pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte); in page_vma_mapped_walk()
210 if (pvmw->pte) in page_vma_mapped_walk()
214 pgd = pgd_offset(mm, pvmw->address); in page_vma_mapped_walk()
219 p4d = p4d_offset(pgd, pvmw->address); in page_vma_mapped_walk()
224 pud = pud_offset(p4d, pvmw->address); in page_vma_mapped_walk()
230 pvmw->pmd = pmd_offset(pud, pvmw->address); in page_vma_mapped_walk()
236 pmde = pmdp_get_lockless(pvmw->pmd); in page_vma_mapped_walk()
240 pvmw->ptl = pmd_lock(mm, pvmw->pmd); in page_vma_mapped_walk()
241 pmde = *pvmw->pmd; in page_vma_mapped_walk()
243 swp_entry_t entry; in page_vma_mapped_walk() local
246 !(pvmw->flags & PVMW_MIGRATION)) in page_vma_mapped_walk()
248 entry = pmd_to_swp_entry(pmde); in page_vma_mapped_walk()
249 if (!is_migration_entry(entry) || in page_vma_mapped_walk()
250 !check_pmd(swp_offset_pfn(entry), pvmw)) in page_vma_mapped_walk()
255 if (pvmw->flags & PVMW_MIGRATION) in page_vma_mapped_walk()
262 spin_unlock(pvmw->ptl); in page_vma_mapped_walk()
263 pvmw->ptl = NULL; in page_vma_mapped_walk()
270 if ((pvmw->flags & PVMW_SYNC) && in page_vma_mapped_walk()
271 thp_vma_suitable_order(vma, pvmw->address, in page_vma_mapped_walk()
273 (pvmw->nr_pages >= HPAGE_PMD_NR)) { in page_vma_mapped_walk()
274 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd); in page_vma_mapped_walk()
282 if (!pvmw->pte) in page_vma_mapped_walk()
291 pvmw->address += PAGE_SIZE; in page_vma_mapped_walk()
292 if (pvmw->address >= end) in page_vma_mapped_walk()
295 if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) { in page_vma_mapped_walk()
296 if (pvmw->ptl) { in page_vma_mapped_walk()
297 spin_unlock(pvmw->ptl); in page_vma_mapped_walk()
298 pvmw->ptl = NULL; in page_vma_mapped_walk()
300 pte_unmap(pvmw->pte); in page_vma_mapped_walk()
301 pvmw->pte = NULL; in page_vma_mapped_walk()
304 pvmw->pte++; in page_vma_mapped_walk()
305 } while (pte_none(ptep_get(pvmw->pte))); in page_vma_mapped_walk()
307 if (!pvmw->ptl) { in page_vma_mapped_walk()
308 pvmw->ptl = ptl; in page_vma_mapped_walk()
309 spin_lock(pvmw->ptl); in page_vma_mapped_walk()
312 } while (pvmw->address < end); in page_vma_mapped_walk()
319 * page_mapped_in_vma - check whether a page is really mapped in a VMA
323 * Return: The address the page is mapped at if the page is in the range
325 * outside the VMA or not present, returns -EFAULT.
331 pgoff_t pgoff = folio->index + folio_page_idx(folio, page); in page_mapped_in_vma()
339 pvmw.address = vma_address(vma, pgoff, 1); in page_mapped_in_vma()
340 if (pvmw.address == -EFAULT) in page_mapped_in_vma()
343 return -EFAULT; in page_mapped_in_vma()
346 return pvmw.address; in page_mapped_in_vma()