Lines Matching +full:4 +full:kb +full:- +full:page
1 // SPDX-License-Identifier: GPL-2.0
33 seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
50 hiwater_vm = total_vm = mm->total_vm; in task_mem()
51 if (hiwater_vm < mm->hiwater_vm) in task_mem()
52 hiwater_vm = mm->hiwater_vm; in task_mem()
54 if (hiwater_rss < mm->hiwater_rss) in task_mem()
55 hiwater_rss = mm->hiwater_rss; in task_mem()
58 text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK); in task_mem()
59 text = min(text, mm->exec_vm << PAGE_SHIFT); in task_mem()
60 lib = (mm->exec_vm << PAGE_SHIFT) - text; in task_mem()
64 SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm); in task_mem()
65 SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); in task_mem()
66 SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm)); in task_mem()
67 SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss); in task_mem()
68 SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss); in task_mem()
69 SEQ_PUT_DEC(" kB\nRssAnon:\t", anon); in task_mem()
70 SEQ_PUT_DEC(" kB\nRssFile:\t", file); in task_mem()
71 SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem); in task_mem()
72 SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm); in task_mem()
73 SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm); in task_mem()
75 " kB\nVmExe:\t", text >> 10, 8); in task_mem()
77 " kB\nVmLib:\t", lib >> 10, 8); in task_mem()
79 " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8); in task_mem()
80 SEQ_PUT_DEC(" kB\nVmSwap:\t", swap); in task_mem()
81 seq_puts(m, " kB\n"); in task_mem()
88 return PAGE_SIZE * mm->total_vm; in task_vsize()
97 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) in task_statm()
99 *data = mm->data_vm + mm->stack_vm; in task_statm()
101 return mm->total_vm; in task_statm()
110 struct task_struct *task = priv->task; in hold_task_mempolicy()
113 priv->task_mempolicy = get_task_policy(task); in hold_task_mempolicy()
114 mpol_get(priv->task_mempolicy); in hold_task_mempolicy()
119 mpol_put(priv->task_mempolicy); in release_task_mempolicy()
133 struct vm_area_struct *vma = vma_next(&priv->iter); in proc_get_vma()
136 *ppos = vma->vm_start; in proc_get_vma()
138 *ppos = -2UL; in proc_get_vma()
139 vma = get_gate_vma(priv->mm); in proc_get_vma()
147 struct proc_maps_private *priv = m->private; in m_start()
152 if (last_addr == -1UL) in m_start()
155 priv->task = get_proc_task(priv->inode); in m_start()
156 if (!priv->task) in m_start()
157 return ERR_PTR(-ESRCH); in m_start()
159 mm = priv->mm; in m_start()
161 put_task_struct(priv->task); in m_start()
162 priv->task = NULL; in m_start()
168 put_task_struct(priv->task); in m_start()
169 priv->task = NULL; in m_start()
170 return ERR_PTR(-EINTR); in m_start()
173 vma_iter_init(&priv->iter, mm, last_addr); in m_start()
175 if (last_addr == -2UL) in m_start()
183 if (*ppos == -2UL) { in m_next()
184 *ppos = -1UL; in m_next()
187 return proc_get_vma(m->private, ppos); in m_next()
192 struct proc_maps_private *priv = m->private; in m_stop()
193 struct mm_struct *mm = priv->mm; in m_stop()
195 if (!priv->task) in m_stop()
201 put_task_struct(priv->task); in m_stop()
202 priv->task = NULL; in m_stop()
211 return -ENOMEM; in proc_maps_open()
213 priv->inode = inode; in proc_maps_open()
214 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); in proc_maps_open()
215 if (IS_ERR(priv->mm)) { in proc_maps_open()
216 int err = PTR_ERR(priv->mm); in proc_maps_open()
227 struct seq_file *seq = file->private_data; in proc_map_release()
228 struct proc_maps_private *priv = seq->private; in proc_map_release()
230 if (priv->mm) in proc_map_release()
231 mmdrop(priv->mm); in proc_map_release()
248 struct anon_vma_name *anon_name = vma->vm_mm ? anon_vma_name(vma) : NULL; in get_vma_name()
258 if (vma->vm_file) { in get_vma_name()
265 *name = anon_name->name; in get_vma_name()
267 *path = file_user_path(vma->vm_file); in get_vma_name()
272 if (vma->vm_ops && vma->vm_ops->name) { in get_vma_name()
273 *name = vma->vm_ops->name(vma); in get_vma_name()
282 if (!vma->vm_mm) { in get_vma_name()
299 *name = anon_name->name; in get_vma_name()
309 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); in show_vma_header_prefix()
311 seq_put_hex_ll(m, "-", end, 8); in show_vma_header_prefix()
313 seq_putc(m, flags & VM_READ ? 'r' : '-'); in show_vma_header_prefix()
314 seq_putc(m, flags & VM_WRITE ? 'w' : '-'); in show_vma_header_prefix()
315 seq_putc(m, flags & VM_EXEC ? 'x' : '-'); in show_vma_header_prefix()
329 vm_flags_t flags = vma->vm_flags; in show_map_vma()
335 if (vma->vm_file) { in show_map_vma()
336 const struct inode *inode = file_user_inode(vma->vm_file); in show_map_vma()
338 dev = inode->i_sb->s_dev; in show_map_vma()
339 ino = inode->i_ino; in show_map_vma()
340 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; in show_map_vma()
343 start = vma->vm_start; in show_map_vma()
344 end = vma->vm_end; in show_map_vma()
417 /* user requested only file-backed VMA, keep iterating */ in query_matching_vma()
418 if ((flags & PROCMAP_QUERY_FILE_BACKED_VMA) && !vma->vm_file) in query_matching_vma()
434 if ((vma->vm_flags & perm) != perm) in query_matching_vma()
439 if ((flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA) || vma->vm_start <= addr) in query_matching_vma()
446 addr = vma->vm_end; in query_matching_vma()
451 return ERR_PTR(-ENOENT); in query_matching_vma()
465 return -EFAULT; in do_procmap_query()
468 return -E2BIG; in do_procmap_query()
471 return -EINVAL; in do_procmap_query()
478 return -EINVAL; in do_procmap_query()
481 return -EINVAL; in do_procmap_query()
483 return -EINVAL; in do_procmap_query()
485 mm = priv->mm; in do_procmap_query()
487 return -ESRCH; in do_procmap_query()
502 karg.vma_start = vma->vm_start; in do_procmap_query()
503 karg.vma_end = vma->vm_end; in do_procmap_query()
506 if (vma->vm_flags & VM_READ) in do_procmap_query()
508 if (vma->vm_flags & VM_WRITE) in do_procmap_query()
510 if (vma->vm_flags & VM_EXEC) in do_procmap_query()
512 if (vma->vm_flags & VM_MAYSHARE) in do_procmap_query()
517 if (vma->vm_file) { in do_procmap_query()
518 const struct inode *inode = file_user_inode(vma->vm_file); in do_procmap_query()
520 karg.vma_offset = ((__u64)vma->vm_pgoff) << PAGE_SHIFT; in do_procmap_query()
521 karg.dev_major = MAJOR(inode->i_sb->s_dev); in do_procmap_query()
522 karg.dev_minor = MINOR(inode->i_sb->s_dev); in do_procmap_query()
523 karg.inode = inode->i_ino; in do_procmap_query()
539 err = -ENAMETOOLONG; in do_procmap_query()
557 err = -ENOMEM; in do_procmap_query()
567 name_sz = name_buf + name_buf_sz - name; in do_procmap_query()
573 err = -ENAMETOOLONG; in do_procmap_query()
586 return -EFAULT; in do_procmap_query()
592 return -EFAULT; in do_procmap_query()
595 return -EFAULT; in do_procmap_query()
608 struct seq_file *seq = file->private_data; in procfs_procmap_ioctl()
609 struct proc_maps_private *priv = seq->private; in procfs_procmap_ioctl()
615 return -ENOIOCTLCMD; in procfs_procmap_ioctl()
632 * page is divided by the number of processes sharing it. So if a
637 * fixed-point pss counter to minimize division errors. So (pss >>
640 * A shift of 12 before division means (assuming 4K page size):
641 * - 1M 3-user-pages add up to 8KB errors;
642 * - supports mapcount up to 2^24, or 16M;
643 * - supports PSS up to 2^52 bytes, or 4PB.
677 mss->pss += pss; in smaps_page_accumulate()
680 mss->pss_anon += pss; in smaps_page_accumulate()
682 mss->pss_shmem += pss; in smaps_page_accumulate()
684 mss->pss_file += pss; in smaps_page_accumulate()
687 mss->pss_locked += pss; in smaps_page_accumulate()
690 mss->pss_dirty += pss; in smaps_page_accumulate()
692 mss->private_dirty += size; in smaps_page_accumulate()
694 mss->shared_dirty += size; in smaps_page_accumulate()
697 mss->private_clean += size; in smaps_page_accumulate()
699 mss->shared_clean += size; in smaps_page_accumulate()
703 static void smaps_account(struct mem_size_stats *mss, struct page *page, in smaps_account() argument
707 struct folio *folio = page_folio(page); in smaps_account()
708 int i, nr = compound ? compound_nr(page) : 1; in smaps_account()
713 * of the compound page. in smaps_account()
716 mss->anonymous += size; in smaps_account()
719 mss->lazyfree += size; in smaps_account()
723 mss->ksm += size; in smaps_account()
725 mss->resident += size; in smaps_account()
728 mss->referenced += size; in smaps_account()
732 * differ page-by-page. in smaps_account()
738 * Treat all non-present entries (where relying on the mapcount and in smaps_account()
740 * often". We treat device private entries as being fake-present. in smaps_account()
755 for (i = 0; i < nr; i++, page++) { in smaps_account()
756 int mapcount = folio_precise_page_mapcount(folio, page); in smaps_account()
769 struct mem_size_stats *mss = walk->private; in smaps_pte_hole()
770 struct vm_area_struct *vma = walk->vma; in smaps_pte_hole()
772 mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping, in smaps_pte_hole()
785 if (walk->ops->pte_hole) { in smaps_pte_hole_lookup()
795 struct mem_size_stats *mss = walk->private; in smaps_pte_entry()
796 struct vm_area_struct *vma = walk->vma; in smaps_pte_entry()
797 bool locked = !!(vma->vm_flags & VM_LOCKED); in smaps_pte_entry()
798 struct page *page = NULL; in smaps_pte_entry() local
803 page = vm_normal_page(vma, addr, ptent); in smaps_pte_entry()
813 mss->swap += PAGE_SIZE; in smaps_pte_entry()
819 mss->swap_pss += pss_delta; in smaps_pte_entry()
821 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; in smaps_pte_entry()
826 page = pfn_swap_entry_to_page(swpent); in smaps_pte_entry()
833 if (!page) in smaps_pte_entry()
836 smaps_account(mss, page, false, young, dirty, locked, present); in smaps_pte_entry()
843 struct mem_size_stats *mss = walk->private; in smaps_pmd_entry()
844 struct vm_area_struct *vma = walk->vma; in smaps_pmd_entry()
845 bool locked = !!(vma->vm_flags & VM_LOCKED); in smaps_pmd_entry()
846 struct page *page = NULL; in smaps_pmd_entry() local
851 page = vm_normal_page_pmd(vma, addr, *pmd); in smaps_pmd_entry()
857 page = pfn_swap_entry_to_page(entry); in smaps_pmd_entry()
859 if (IS_ERR_OR_NULL(page)) in smaps_pmd_entry()
861 folio = page_folio(page); in smaps_pmd_entry()
863 mss->anonymous_thp += HPAGE_PMD_SIZE; in smaps_pmd_entry()
865 mss->shmem_thp += HPAGE_PMD_SIZE; in smaps_pmd_entry()
869 mss->file_thp += HPAGE_PMD_SIZE; in smaps_pmd_entry()
871 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), in smaps_pmd_entry()
884 struct vm_area_struct *vma = walk->vma; in smaps_pte_range()
895 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in smaps_pte_range()
897 walk->action = ACTION_AGAIN; in smaps_pte_range()
902 pte_unmap_unlock(pte - 1, ptl); in smaps_pte_range()
917 * -Werror=unterminated-string-initialization warning in show_smap_vma_flags()
924 [0 ... (BITS_PER_LONG-1)] = "??", in show_smap_vma_flags()
997 if (vma->vm_flags & (1UL << i)) in show_smap_vma_flags()
1008 struct mem_size_stats *mss = walk->private; in smaps_hugetlb_range()
1009 struct vm_area_struct *vma = walk->vma; in smaps_hugetlb_range()
1010 pte_t ptent = huge_ptep_get(walk->mm, addr, pte); in smaps_hugetlb_range()
1025 /* We treat non-present entries as "maybe shared". */ in smaps_hugetlb_range()
1028 mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); in smaps_hugetlb_range()
1030 mss->private_hugetlb += huge_page_size(hstate_vma(vma)); in smaps_hugetlb_range()
1063 if (start >= vma->vm_end) in smap_gather_stats()
1066 if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) { in smap_gather_stats()
1073 * object, so we have to distinguish them during the page walk. in smap_gather_stats()
1079 if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) || in smap_gather_stats()
1080 !(vma->vm_flags & VM_WRITE))) { in smap_gather_stats()
1081 mss->swap += shmem_swapped; in smap_gather_stats()
1091 walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss); in smap_gather_stats()
1101 SEQ_PUT_DEC("Rss: ", mss->resident); in __show_smap()
1102 SEQ_PUT_DEC(" kB\nPss: ", mss->pss >> PSS_SHIFT); in __show_smap()
1103 SEQ_PUT_DEC(" kB\nPss_Dirty: ", mss->pss_dirty >> PSS_SHIFT); in __show_smap()
1109 SEQ_PUT_DEC(" kB\nPss_Anon: ", in __show_smap()
1110 mss->pss_anon >> PSS_SHIFT); in __show_smap()
1111 SEQ_PUT_DEC(" kB\nPss_File: ", in __show_smap()
1112 mss->pss_file >> PSS_SHIFT); in __show_smap()
1113 SEQ_PUT_DEC(" kB\nPss_Shmem: ", in __show_smap()
1114 mss->pss_shmem >> PSS_SHIFT); in __show_smap()
1116 SEQ_PUT_DEC(" kB\nShared_Clean: ", mss->shared_clean); in __show_smap()
1117 SEQ_PUT_DEC(" kB\nShared_Dirty: ", mss->shared_dirty); in __show_smap()
1118 SEQ_PUT_DEC(" kB\nPrivate_Clean: ", mss->private_clean); in __show_smap()
1119 SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss->private_dirty); in __show_smap()
1120 SEQ_PUT_DEC(" kB\nReferenced: ", mss->referenced); in __show_smap()
1121 SEQ_PUT_DEC(" kB\nAnonymous: ", mss->anonymous); in __show_smap()
1122 SEQ_PUT_DEC(" kB\nKSM: ", mss->ksm); in __show_smap()
1123 SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree); in __show_smap()
1124 SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp); in __show_smap()
1125 SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp); in __show_smap()
1126 SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss->file_thp); in __show_smap()
1127 SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb); in __show_smap()
1128 seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ", in __show_smap()
1129 mss->private_hugetlb >> 10, 7); in __show_smap()
1130 SEQ_PUT_DEC(" kB\nSwap: ", mss->swap); in __show_smap()
1131 SEQ_PUT_DEC(" kB\nSwapPss: ", in __show_smap()
1132 mss->swap_pss >> PSS_SHIFT); in __show_smap()
1133 SEQ_PUT_DEC(" kB\nLocked: ", in __show_smap()
1134 mss->pss_locked >> PSS_SHIFT); in __show_smap()
1135 seq_puts(m, " kB\n"); in __show_smap()
1147 SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start); in show_smap()
1148 SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma)); in show_smap()
1149 SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma)); in show_smap()
1150 seq_puts(m, " kB\n"); in show_smap()
1155 !!thp_vma_allowable_orders(vma, vma->vm_flags, in show_smap()
1167 struct proc_maps_private *priv = m->private; in show_smaps_rollup()
1169 struct mm_struct *mm = priv->mm; in show_smaps_rollup()
1175 priv->task = get_proc_task(priv->inode); in show_smaps_rollup()
1176 if (!priv->task) in show_smaps_rollup()
1177 return -ESRCH; in show_smaps_rollup()
1180 ret = -ESRCH; in show_smaps_rollup()
1194 vma_start = vma->vm_start; in show_smaps_rollup()
1197 last_vma_end = vma->vm_end; in show_smaps_rollup()
1216 * +------+------+-----------+ in show_smaps_rollup()
1218 * +------+------+-----------+ in show_smaps_rollup()
1220 * 4k 8k 16k 400k in show_smaps_rollup()
1242 * 4) (last_vma_end - 1) is the middle of a vma (VMA'): in show_smaps_rollup()
1254 if (vma->vm_start >= last_vma_end) { in show_smaps_rollup()
1256 last_vma_end = vma->vm_end; in show_smaps_rollup()
1260 /* Case 4 above */ in show_smaps_rollup()
1261 if (vma->vm_end > last_vma_end) { in show_smaps_rollup()
1263 last_vma_end = vma->vm_end; in show_smaps_rollup()
1281 put_task_struct(priv->task); in show_smaps_rollup()
1282 priv->task = NULL; in show_smaps_rollup()
1307 return -ENOMEM; in smaps_rollup_open()
1313 priv->inode = inode; in smaps_rollup_open()
1314 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); in smaps_rollup_open()
1315 if (IS_ERR(priv->mm)) { in smaps_rollup_open()
1316 ret = PTR_ERR(priv->mm); in smaps_rollup_open()
1331 struct seq_file *seq = file->private_data; in smaps_rollup_release()
1332 struct proc_maps_private *priv = seq->private; in smaps_rollup_release()
1334 if (priv->mm) in smaps_rollup_release()
1335 mmdrop(priv->mm); in smaps_rollup_release()
1376 if (!is_cow_mapping(vma->vm_flags)) in pte_is_pinned()
1378 if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))) in pte_is_pinned()
1390 * The soft-dirty tracker uses #PF-s to catch writes in clear_soft_dirty()
1391 * to pages, so write-protect the pte as well. See the in clear_soft_dirty()
1392 * Documentation/admin-guide/mm/soft-dirty.rst for full description in clear_soft_dirty()
1393 * of how soft-dirty works. in clear_soft_dirty()
1408 set_pte_at(vma->vm_mm, addr, pte, ptent); in clear_soft_dirty()
1435 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
1438 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
1451 struct clear_refs_private *cp = walk->private; in clear_refs_pte_range()
1452 struct vm_area_struct *vma = walk->vma; in clear_refs_pte_range()
1459 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { in clear_refs_pte_range()
1478 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in clear_refs_pte_range()
1480 walk->action = ACTION_AGAIN; in clear_refs_pte_range()
1486 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { in clear_refs_pte_range()
1503 pte_unmap_unlock(pte - 1, ptl); in clear_refs_pte_range()
1511 struct clear_refs_private *cp = walk->private; in clear_refs_test_walk()
1512 struct vm_area_struct *vma = walk->vma; in clear_refs_test_walk()
1514 if (vma->vm_flags & VM_PFNMAP) in clear_refs_test_walk()
1521 * Writing 4 to /proc/pid/clear_refs affects all pages. in clear_refs_test_walk()
1523 if (cp->type == CLEAR_REFS_ANON && vma->vm_file) in clear_refs_test_walk()
1525 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) in clear_refs_test_walk()
1547 if (count > sizeof(buffer) - 1) in clear_refs_write()
1548 count = sizeof(buffer) - 1; in clear_refs_write()
1550 return -EFAULT; in clear_refs_write()
1556 return -EINVAL; in clear_refs_write()
1560 return -ESRCH; in clear_refs_write()
1570 count = -EINTR; in clear_refs_write()
1584 if (!(vma->vm_flags & VM_SOFTDIRTY)) in clear_refs_write()
1592 0, mm, 0, -1UL); in clear_refs_write()
1595 walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp); in clear_refs_write()
1631 #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1648 pm->buffer[pm->pos++] = *pme; in add_to_pagemap()
1649 if (pm->pos >= pm->len) in add_to_pagemap()
1657 struct pagemapread *pm = walk->private; in pagemap_pte_hole()
1662 struct vm_area_struct *vma = find_vma(walk->mm, addr); in pagemap_pte_hole()
1664 /* End of address space hole, which we mark as non-present. */ in pagemap_pte_hole()
1668 hole_end = min(end, vma->vm_start); in pagemap_pte_hole()
1682 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_pte_hole()
1684 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { in pagemap_pte_hole()
1698 struct page *page = NULL; in pte_to_pagemap_entry() local
1702 if (pm->show_pfn) in pte_to_pagemap_entry()
1705 page = vm_normal_page(vma, addr, pte); in pte_to_pagemap_entry()
1717 if (pm->show_pfn) { in pte_to_pagemap_entry()
1732 page = pfn_swap_entry_to_page(entry); in pte_to_pagemap_entry()
1737 if (page) { in pte_to_pagemap_entry()
1738 folio = page_folio(page); in pte_to_pagemap_entry()
1742 folio_precise_page_mapcount(folio, page) == 1) in pte_to_pagemap_entry()
1745 if (vma->vm_flags & VM_SOFTDIRTY) in pte_to_pagemap_entry()
1754 struct vm_area_struct *vma = walk->vma; in pagemap_pmd_range()
1755 struct pagemapread *pm = walk->private; in pagemap_pmd_range()
1766 struct page *page = NULL; in pagemap_pmd_range() local
1769 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_pmd_range()
1773 page = pmd_page(pmd); in pagemap_pmd_range()
1780 if (pm->show_pfn) in pagemap_pmd_range()
1788 if (pm->show_pfn) { in pagemap_pmd_range()
1802 page = pfn_swap_entry_to_page(entry); in pagemap_pmd_range()
1806 if (page) { in pagemap_pmd_range()
1807 folio = page_folio(page); in pagemap_pmd_range()
1817 folio_precise_page_mapcount(folio, page + idx) == 1) in pagemap_pmd_range()
1824 if (pm->show_pfn) { in pagemap_pmd_range()
1838 * goes beyond vma->vm_end. in pagemap_pmd_range()
1840 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); in pagemap_pmd_range()
1842 walk->action = ACTION_AGAIN; in pagemap_pmd_range()
1866 struct pagemapread *pm = walk->private; in pagemap_hugetlb_range()
1867 struct vm_area_struct *vma = walk->vma; in pagemap_hugetlb_range()
1872 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_hugetlb_range()
1875 pte = huge_ptep_get(walk->mm, addr, ptep); in pagemap_hugetlb_range()
1890 if (pm->show_pfn) in pagemap_hugetlb_range()
1903 if (pm->show_pfn && (flags & PM_PRESENT)) in pagemap_hugetlb_range()
1923 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1925 * For each page in the address space, this file contains one 64-bit entry
1928 * Bits 0-54 page frame number (PFN) if present
1929 * Bits 0-4 swap type if swapped
1930 * Bits 5-54 swap offset if swapped
1931 * Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1932 * Bit 56 page exclusively mapped
1933 * Bit 57 pte is uffd-wp write-protected
1934 * Bits 58-60 zero
1935 * Bit 61 page is file-page or shared-anon
1936 * Bit 62 page swapped
1937 * Bit 63 page present
1939 * If the page is not present but in swap, then the PFN contains an
1940 * encoding of the swap file number and the page's offset into the
1952 struct mm_struct *mm = file->private_data; in pagemap_read()
1963 ret = -EINVAL; in pagemap_read()
1977 ret = -ENOMEM; in pagemap_read()
1983 end_vaddr = mm->task_size; in pagemap_read()
1997 if (end >= start_vaddr && end < mm->task_size) in pagemap_read()
2002 if (start_vaddr > mm->task_size) in pagemap_read()
2024 ret = -EFAULT; in pagemap_read()
2029 count -= len; in pagemap_read()
2050 file->private_data = mm; in pagemap_open()
2056 struct mm_struct *mm = file->private_data; in pagemap_release()
2084 struct page *page; in pagemap_page_category() local
2090 if (p->masks_of_interest & PAGE_IS_FILE) { in pagemap_page_category()
2091 page = vm_normal_page(vma, addr, pte); in pagemap_page_category()
2092 if (page && !PageAnon(page)) in pagemap_page_category()
2107 if (p->masks_of_interest & PAGE_IS_FILE) { in pagemap_page_category()
2131 set_pte_at(vma->vm_mm, addr, pte, ptent); in make_uffd_wp_pte()
2133 set_pte_at(vma->vm_mm, addr, pte, in make_uffd_wp_pte()
2146 struct page *page; in pagemap_thp_category() local
2152 if (p->masks_of_interest & PAGE_IS_FILE) { in pagemap_thp_category()
2153 page = vm_normal_page_pmd(vma, addr, pmd); in pagemap_thp_category()
2154 if (page && !PageAnon(page)) in pagemap_thp_category()
2171 if (p->masks_of_interest & PAGE_IS_FILE) { in pagemap_thp_category()
2190 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in make_uffd_wp_pmd()
2193 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in make_uffd_wp_pmd()
2204 * According to pagemap_hugetlb_range(), file-backed HugeTLB in pagemap_hugetlb_category()
2205 * page cannot be swapped. So PAGE_IS_FILE is not checked for in pagemap_hugetlb_category()
2241 set_huge_pte_at(vma->vm_mm, addr, ptep, in make_uffd_wp_huge_pte()
2247 set_huge_pte_at(vma->vm_mm, addr, ptep, in make_uffd_wp_huge_pte()
2256 struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index]; in pagemap_scan_backout_range()
2258 if (cur_buf->start != addr) in pagemap_scan_backout_range()
2259 cur_buf->end = addr; in pagemap_scan_backout_range()
2261 cur_buf->start = cur_buf->end = 0; in pagemap_scan_backout_range()
2263 p->found_pages -= (end - addr) / PAGE_SIZE; in pagemap_scan_backout_range()
2270 categories ^= p->arg.category_inverted; in pagemap_scan_is_interesting_page()
2271 if ((categories & p->arg.category_mask) != p->arg.category_mask) in pagemap_scan_is_interesting_page()
2273 if (p->arg.category_anyof_mask && !(categories & p->arg.category_anyof_mask)) in pagemap_scan_is_interesting_page()
2282 unsigned long required = p->arg.category_mask & PAGE_IS_WPALLOWED; in pagemap_scan_is_interesting_vma()
2284 categories ^= p->arg.category_inverted; in pagemap_scan_is_interesting_vma()
2294 struct pagemap_scan_private *p = walk->private; in pagemap_scan_test_walk()
2295 struct vm_area_struct *vma = walk->vma; in pagemap_scan_test_walk()
2301 /* User requested explicit failure over wp-async capability */ in pagemap_scan_test_walk()
2302 if (p->arg.flags & PM_SCAN_CHECK_WPASYNC) in pagemap_scan_test_walk()
2303 return -EPERM; in pagemap_scan_test_walk()
2305 * User requires wr-protect, and allows silently skipping in pagemap_scan_test_walk()
2308 if (p->arg.flags & PM_SCAN_WP_MATCHING) in pagemap_scan_test_walk()
2311 * Then the request doesn't involve wr-protects at all, in pagemap_scan_test_walk()
2316 if (vma->vm_flags & VM_PFNMAP) in pagemap_scan_test_walk()
2322 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_scan_test_walk()
2328 p->cur_vma_category = vma_category; in pagemap_scan_test_walk()
2337 struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index]; in pagemap_scan_push_range()
2341 * won't match here. There is no other way for `cur_buf->end` to be in pagemap_scan_push_range()
2342 * non-zero other than it being non-empty. in pagemap_scan_push_range()
2344 if (addr == cur_buf->end && categories == cur_buf->categories) { in pagemap_scan_push_range()
2345 cur_buf->end = end; in pagemap_scan_push_range()
2349 if (cur_buf->end) { in pagemap_scan_push_range()
2350 if (p->vec_buf_index >= p->vec_buf_len - 1) in pagemap_scan_push_range()
2353 cur_buf = &p->vec_buf[++p->vec_buf_index]; in pagemap_scan_push_range()
2356 cur_buf->start = addr; in pagemap_scan_push_range()
2357 cur_buf->end = end; in pagemap_scan_push_range()
2358 cur_buf->categories = categories; in pagemap_scan_push_range()
2370 if (!p->vec_buf) in pagemap_scan_output()
2373 categories &= p->arg.return_mask; in pagemap_scan_output()
2375 n_pages = (*end - addr) / PAGE_SIZE; in pagemap_scan_output()
2376 if (check_add_overflow(p->found_pages, n_pages, &total_pages) || in pagemap_scan_output()
2377 total_pages > p->arg.max_pages) { in pagemap_scan_output()
2378 size_t n_too_much = total_pages - p->arg.max_pages; in pagemap_scan_output()
2379 *end -= n_too_much * PAGE_SIZE; in pagemap_scan_output()
2380 n_pages -= n_too_much; in pagemap_scan_output()
2381 ret = -ENOSPC; in pagemap_scan_output()
2387 ret = -ENOSPC; in pagemap_scan_output()
2390 p->found_pages += n_pages; in pagemap_scan_output()
2392 p->arg.walk_end = *end; in pagemap_scan_output()
2401 struct pagemap_scan_private *p = walk->private; in pagemap_scan_thp_entry()
2402 struct vm_area_struct *vma = walk->vma; in pagemap_scan_thp_entry()
2409 return -ENOENT; in pagemap_scan_thp_entry()
2411 categories = p->cur_vma_category | in pagemap_scan_thp_entry()
2421 if (~p->arg.flags & PM_SCAN_WP_MATCHING) in pagemap_scan_thp_entry()
2427 * Break huge page into small pages if the WP operation in pagemap_scan_thp_entry()
2428 * needs to be performed on a portion of the huge page. in pagemap_scan_thp_entry()
2435 return -ENOENT; in pagemap_scan_thp_entry()
2444 return -ENOENT; in pagemap_scan_thp_entry()
2451 struct pagemap_scan_private *p = walk->private; in pagemap_scan_pmd_entry()
2452 struct vm_area_struct *vma = walk->vma; in pagemap_scan_pmd_entry()
2461 if (ret != -ENOENT) { in pagemap_scan_pmd_entry()
2467 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in pagemap_scan_pmd_entry()
2470 walk->action = ACTION_AGAIN; in pagemap_scan_pmd_entry()
2474 if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) { in pagemap_scan_pmd_entry()
2490 if (!p->arg.category_anyof_mask && !p->arg.category_inverted && in pagemap_scan_pmd_entry()
2491 p->arg.category_mask == PAGE_IS_WRITTEN && in pagemap_scan_pmd_entry()
2492 p->arg.return_mask == PAGE_IS_WRITTEN) { in pagemap_scan_pmd_entry()
2500 ret = pagemap_scan_output(p->cur_vma_category | PAGE_IS_WRITTEN, in pagemap_scan_pmd_entry()
2504 if (~p->arg.flags & PM_SCAN_WP_MATCHING) in pagemap_scan_pmd_entry()
2516 unsigned long categories = p->cur_vma_category | in pagemap_scan_pmd_entry()
2527 if (~p->arg.flags & PM_SCAN_WP_MATCHING) in pagemap_scan_pmd_entry()
2554 struct pagemap_scan_private *p = walk->private; in pagemap_scan_hugetlb_entry()
2555 struct vm_area_struct *vma = walk->vma; in pagemap_scan_hugetlb_entry()
2561 if (~p->arg.flags & PM_SCAN_WP_MATCHING) { in pagemap_scan_hugetlb_entry()
2562 /* Go the short route when not write-protecting pages. */ in pagemap_scan_hugetlb_entry()
2564 pte = huge_ptep_get(walk->mm, start, ptep); in pagemap_scan_hugetlb_entry()
2565 categories = p->cur_vma_category | pagemap_hugetlb_category(pte); in pagemap_scan_hugetlb_entry()
2573 i_mmap_lock_write(vma->vm_file->f_mapping); in pagemap_scan_hugetlb_entry()
2574 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep); in pagemap_scan_hugetlb_entry()
2576 pte = huge_ptep_get(walk->mm, start, ptep); in pagemap_scan_hugetlb_entry()
2577 categories = p->cur_vma_category | pagemap_hugetlb_category(pte); in pagemap_scan_hugetlb_entry()
2590 /* Partial HugeTLB page WP isn't possible. */ in pagemap_scan_hugetlb_entry()
2592 p->arg.walk_end = start; in pagemap_scan_hugetlb_entry()
2602 i_mmap_unlock_write(vma->vm_file->f_mapping); in pagemap_scan_hugetlb_entry()
2613 struct pagemap_scan_private *p = walk->private; in pagemap_scan_pte_hole()
2614 struct vm_area_struct *vma = walk->vma; in pagemap_scan_pte_hole()
2617 if (!vma || !pagemap_scan_is_interesting_page(p->cur_vma_category, p)) in pagemap_scan_pte_hole()
2620 ret = pagemap_scan_output(p->cur_vma_category, p, addr, &end); in pagemap_scan_pte_hole()
2624 if (~p->arg.flags & PM_SCAN_WP_MATCHING) in pagemap_scan_pte_hole()
2627 err = uffd_wp_range(vma, addr, end - addr, true); in pagemap_scan_pte_hole()
2645 return -EFAULT; in pagemap_scan_get_args()
2647 if (arg->size != sizeof(struct pm_scan_arg)) in pagemap_scan_get_args()
2648 return -EINVAL; in pagemap_scan_get_args()
2651 if (arg->flags & ~PM_SCAN_FLAGS) in pagemap_scan_get_args()
2652 return -EINVAL; in pagemap_scan_get_args()
2653 if ((arg->category_inverted | arg->category_mask | in pagemap_scan_get_args()
2654 arg->category_anyof_mask | arg->return_mask) & ~PM_SCAN_CATEGORIES) in pagemap_scan_get_args()
2655 return -EINVAL; in pagemap_scan_get_args()
2657 arg->start = untagged_addr((unsigned long)arg->start); in pagemap_scan_get_args()
2658 arg->end = untagged_addr((unsigned long)arg->end); in pagemap_scan_get_args()
2659 arg->vec = untagged_addr((unsigned long)arg->vec); in pagemap_scan_get_args()
2662 if (!IS_ALIGNED(arg->start, PAGE_SIZE)) in pagemap_scan_get_args()
2663 return -EINVAL; in pagemap_scan_get_args()
2664 if (!access_ok((void __user *)(long)arg->start, arg->end - arg->start)) in pagemap_scan_get_args()
2665 return -EFAULT; in pagemap_scan_get_args()
2666 if (!arg->vec && arg->vec_len) in pagemap_scan_get_args()
2667 return -EINVAL; in pagemap_scan_get_args()
2668 if (UINT_MAX == SIZE_MAX && arg->vec_len > SIZE_MAX) in pagemap_scan_get_args()
2669 return -EINVAL; in pagemap_scan_get_args()
2670 if (arg->vec && !access_ok((void __user *)(long)arg->vec, in pagemap_scan_get_args()
2671 size_mul(arg->vec_len, sizeof(struct page_region)))) in pagemap_scan_get_args()
2672 return -EFAULT; in pagemap_scan_get_args()
2675 arg->end = ALIGN(arg->end, PAGE_SIZE); in pagemap_scan_get_args()
2676 arg->walk_end = 0; in pagemap_scan_get_args()
2677 if (!arg->max_pages) in pagemap_scan_get_args()
2678 arg->max_pages = ULONG_MAX; in pagemap_scan_get_args()
2688 if (copy_to_user(&uarg->walk_end, &arg->walk_end, sizeof(arg->walk_end))) in pagemap_scan_writeback_args()
2689 return -EFAULT; in pagemap_scan_writeback_args()
2696 if (!p->arg.vec_len) in pagemap_scan_init_bounce_buffer()
2699 p->vec_buf_len = min_t(size_t, PAGEMAP_WALK_SIZE >> PAGE_SHIFT, in pagemap_scan_init_bounce_buffer()
2700 p->arg.vec_len); in pagemap_scan_init_bounce_buffer()
2701 p->vec_buf = kmalloc_array(p->vec_buf_len, sizeof(*p->vec_buf), in pagemap_scan_init_bounce_buffer()
2703 if (!p->vec_buf) in pagemap_scan_init_bounce_buffer()
2704 return -ENOMEM; in pagemap_scan_init_bounce_buffer()
2706 p->vec_buf->start = p->vec_buf->end = 0; in pagemap_scan_init_bounce_buffer()
2707 p->vec_out = (struct page_region __user *)(long)p->arg.vec; in pagemap_scan_init_bounce_buffer()
2714 const struct page_region *buf = p->vec_buf; in pagemap_scan_flush_buffer()
2715 long n = p->vec_buf_index; in pagemap_scan_flush_buffer()
2717 if (!p->vec_buf) in pagemap_scan_flush_buffer()
2726 if (copy_to_user(p->vec_out, buf, n * sizeof(*buf))) in pagemap_scan_flush_buffer()
2727 return -EFAULT; in pagemap_scan_flush_buffer()
2729 p->arg.vec_len -= n; in pagemap_scan_flush_buffer()
2730 p->vec_out += n; in pagemap_scan_flush_buffer()
2732 p->vec_buf_index = 0; in pagemap_scan_flush_buffer()
2733 p->vec_buf_len = min_t(size_t, p->vec_buf_len, p->arg.vec_len); in pagemap_scan_flush_buffer()
2734 p->vec_buf->start = p->vec_buf->end = 0; in pagemap_scan_flush_buffer()
2762 ret = -EINTR; in do_pagemap_scan()
2791 if (ret != -ENOSPC) in do_pagemap_scan()
2799 if (!ret || ret == -ENOSPC) in do_pagemap_scan()
2806 ret = -EFAULT; in do_pagemap_scan()
2815 struct mm_struct *mm = file->private_data; in do_pagemap_cmd()
2822 return -EINVAL; in do_pagemap_cmd()
2854 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, in gather_stats() argument
2857 struct folio *folio = page_folio(page); in gather_stats()
2858 int count = folio_precise_page_mapcount(folio, page); in gather_stats()
2860 md->pages += nr_pages; in gather_stats()
2862 md->dirty += nr_pages; in gather_stats()
2865 md->swapcache += nr_pages; in gather_stats()
2868 md->active += nr_pages; in gather_stats()
2871 md->writeback += nr_pages; in gather_stats()
2874 md->anon += nr_pages; in gather_stats()
2876 if (count > md->mapcount_max) in gather_stats()
2877 md->mapcount_max = count; in gather_stats()
2879 md->node[folio_nid(folio)] += nr_pages; in gather_stats()
2882 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, in can_gather_numa_stats()
2885 struct page *page; in can_gather_numa_stats() local
2891 page = vm_normal_page(vma, addr, pte); in can_gather_numa_stats()
2892 if (!page || is_zone_device_page(page)) in can_gather_numa_stats()
2895 if (PageReserved(page)) in can_gather_numa_stats()
2898 nid = page_to_nid(page); in can_gather_numa_stats()
2902 return page; in can_gather_numa_stats()
2906 static struct page *can_gather_numa_stats_pmd(pmd_t pmd, in can_gather_numa_stats_pmd()
2910 struct page *page; in can_gather_numa_stats_pmd() local
2916 page = vm_normal_page_pmd(vma, addr, pmd); in can_gather_numa_stats_pmd()
2917 if (!page) in can_gather_numa_stats_pmd()
2920 if (PageReserved(page)) in can_gather_numa_stats_pmd()
2923 nid = page_to_nid(page); in can_gather_numa_stats_pmd()
2927 return page; in can_gather_numa_stats_pmd()
2934 struct numa_maps *md = walk->private; in gather_pte_stats()
2935 struct vm_area_struct *vma = walk->vma; in gather_pte_stats()
2943 struct page *page; in gather_pte_stats() local
2945 page = can_gather_numa_stats_pmd(*pmd, vma, addr); in gather_pte_stats()
2946 if (page) in gather_pte_stats()
2947 gather_stats(page, md, pmd_dirty(*pmd), in gather_pte_stats()
2953 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in gather_pte_stats()
2955 walk->action = ACTION_AGAIN; in gather_pte_stats()
2960 struct page *page = can_gather_numa_stats(ptent, vma, addr); in gather_pte_stats() local
2961 if (!page) in gather_pte_stats()
2963 gather_stats(page, md, pte_dirty(ptent), 1); in gather_pte_stats()
2974 pte_t huge_pte = huge_ptep_get(walk->mm, addr, pte); in gather_hugetlb_stats()
2976 struct page *page; in gather_hugetlb_stats() local
2981 page = pte_page(huge_pte); in gather_hugetlb_stats()
2983 md = walk->private; in gather_hugetlb_stats()
2984 gather_stats(page, md, pte_dirty(huge_pte), 1); in gather_hugetlb_stats()
3007 struct numa_maps_private *numa_priv = m->private; in show_numa_map()
3008 struct proc_maps_private *proc_priv = &numa_priv->proc_maps; in show_numa_map()
3010 struct numa_maps *md = &numa_priv->md; in show_numa_map()
3011 struct file *file = vma->vm_file; in show_numa_map()
3012 struct mm_struct *mm = vma->vm_mm; in show_numa_map()
3024 pol = __get_vma_policy(vma, vma->vm_start, &ilx); in show_numa_map()
3029 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy); in show_numa_map()
3032 seq_printf(m, "%08lx %s", vma->vm_start, buffer); in show_numa_map()
3049 if (!md->pages) in show_numa_map()
3052 if (md->anon) in show_numa_map()
3053 seq_printf(m, " anon=%lu", md->anon); in show_numa_map()
3055 if (md->dirty) in show_numa_map()
3056 seq_printf(m, " dirty=%lu", md->dirty); in show_numa_map()
3058 if (md->pages != md->anon && md->pages != md->dirty) in show_numa_map()
3059 seq_printf(m, " mapped=%lu", md->pages); in show_numa_map()
3061 if (md->mapcount_max > 1) in show_numa_map()
3062 seq_printf(m, " mapmax=%lu", md->mapcount_max); in show_numa_map()
3064 if (md->swapcache) in show_numa_map()
3065 seq_printf(m, " swapcache=%lu", md->swapcache); in show_numa_map()
3067 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) in show_numa_map()
3068 seq_printf(m, " active=%lu", md->active); in show_numa_map()
3070 if (md->writeback) in show_numa_map()
3071 seq_printf(m, " writeback=%lu", md->writeback); in show_numa_map()
3074 if (md->node[nid]) in show_numa_map()
3075 seq_printf(m, " N%d=%lu", nid, md->node[nid]); in show_numa_map()