/linux-6.12.1/arch/powerpc/mm/book3s64/ |
D | radix_hugetlbpage.c | 16 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__flush_hugetlb_page() 25 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__local_flush_hugetlb_page() 39 radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range() 41 radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range() 42 mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end); in radix__flush_hugetlb_tlb_range() 49 struct mm_struct *mm = vma->vm_mm; in radix__huge_ptep_modify_prot_commit() 62 set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); in radix__huge_ptep_modify_prot_commit()
|
D | pgtable.c | 54 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp)); in pmdp_set_access_flags() 74 assert_spin_locked(pud_lockptr(vma->vm_mm, pudp)); in pudp_set_access_flags() 92 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); in pmdp_test_and_clear_young() 98 return __pudp_test_and_clear_young(vma->vm_mm, address, pudp); in pudp_test_and_clear_young() 174 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID); in pmdp_invalidate() 185 old_pud = pud_hugepage_update(vma->vm_mm, address, pudp, _PAGE_PRESENT, _PAGE_INVALID); in pudp_invalidate() 197 pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); in pmdp_huge_get_and_clear_full() 216 pud = pudp_huge_get_and_clear(vma->vm_mm, addr, pudp); in pudp_huge_get_and_clear_full() 539 pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0); in ptep_modify_prot_start() 551 set_pte_at(vma->vm_mm, addr, ptep, pte); in ptep_modify_prot_commit()
|
/linux-6.12.1/arch/mips/mm/ |
D | tlb-r3k.c | 71 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 152 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page() 157 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); in local_flush_tlb_page() 159 newpid = cpu_context(cpu, vma->vm_mm) & asid_mask; in local_flush_tlb_page() 188 if (current->active_mm != vma->vm_mm) in __update_tlb() 194 if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) { in __update_tlb() 196 (cpu_context(cpu, vma->vm_mm)), pid); in __update_tlb()
|
D | tlb-r4k.c | 109 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 215 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page() 227 write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm)); in local_flush_tlb_page() 229 write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm)); in local_flush_tlb_page() 306 if (current->active_mm != vma->vm_mm) in __update_tlb() 319 pgdp = pgd_offset(vma->vm_mm, address); in __update_tlb()
|
/linux-6.12.1/arch/sh/mm/ |
D | tlbflush_32.c | 19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page() 24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page() 28 if (vma->vm_mm != current->mm) { in local_flush_tlb_page() 42 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
|
/linux-6.12.1/arch/riscv/mm/ |
D | tlbflush.c | 131 __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm), in flush_tlb_page() 164 __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm), in flush_tlb_range() 178 __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm), in flush_pmd_tlb_range()
|
D | pgtable.c | 16 __set_pte_at(vma->vm_mm, ptep, entry); in ptep_set_access_flags() 25 __set_pte_at(vma->vm_mm, ptep, entry); in ptep_set_access_flags() 142 pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_collapse_flush() 154 flush_tlb_mm(vma->vm_mm); in pmdp_collapse_flush()
|
/linux-6.12.1/Documentation/translations/zh_CN/core-api/ |
D | cachetlb.rst | 58 这个接口必须确保以前对‘start’到‘end-1’范围内的地址空间‘vma->vm_mm’ 71 踪进程的mmap区域的支持结构体,地址空间可以通过vma->vm_mm获得。另 76 “vma->vm_mm”的页表修改对cpu来说是可见的。也就是说,在运行后,TLB 77 中不会有虚拟地址‘addr’的‘vma->vm_mm’的页表项。 85 软件页表中,在地址空间“vma->vm_mm”的虚拟地址“地址”处,现在存在 141 后,在“start”到“end-1”范围内的虚拟地址的“vma->vm_mm”的缓存中 154 vma->vm_mm获得。另外,我们可以通过测试(vma->vm_flags & 161 在运行之后,对于虚拟地址‘addr’的‘vma->vm_mm’,在缓存中不会
|
/linux-6.12.1/arch/arc/mm/ |
D | tlb.c | 222 local_flush_tlb_mm(vma->vm_mm); in local_flush_tlb_range() 235 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_range() 237 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_range() 288 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_page() 289 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_page() 351 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); in flush_tlb_page() 363 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); in flush_tlb_range() 376 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1); in flush_pmd_tlb_range() 425 if (current->active_mm != vma->vm_mm) in create_tlb() 547 if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) { in local_flush_pmd_tlb_range() [all …]
|
/linux-6.12.1/arch/csky/kernel/ |
D | vdso.c | 105 if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso)) in arch_vma_name() 107 if (vma->vm_mm && (vma->vm_start == in arch_vma_name() 108 (long)vma->vm_mm->context.vdso + PAGE_SIZE)) in arch_vma_name()
|
/linux-6.12.1/arch/arm/mm/ |
D | fault-armv.c | 57 set_pte_at(vma->vm_mm, address, ptep, entry); in do_adjust_pte() 99 pgd = pgd_offset(vma->vm_mm, address); in adjust_pte() 120 pte = pte_offset_map_nolock(vma->vm_mm, pmd, address, &ptl); in adjust_pte() 138 struct mm_struct *mm = vma->vm_mm; in make_coherent() 158 if (mpnt->vm_mm != mm || mpnt == vma) in make_coherent()
|
/linux-6.12.1/mm/ |
D | mremap.c | 141 struct mm_struct *mm = vma->vm_mm; in move_ptes() 186 flush_tlb_batched_pending(vma->vm_mm); in move_ptes() 240 struct mm_struct *mm = vma->vm_mm; in move_normal_pmd() 276 old_ptl = pmd_lock(vma->vm_mm, old_pmd); in move_normal_pmd() 315 struct mm_struct *mm = vma->vm_mm; in move_normal_pud() 331 old_ptl = pud_lock(vma->vm_mm, old_pud); in move_normal_pud() 364 struct mm_struct *mm = vma->vm_mm; in move_huge_pud() 378 old_ptl = pud_lock(vma->vm_mm, old_pud); in move_huge_pud() 526 return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL; in can_align_down() 579 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, in move_page_tables() [all …]
|
D | memory.c | 500 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte() 756 set_pte_at(vma->vm_mm, address, ptep, pte); in restore_exclusive_pte() 945 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_page() 953 struct mm_struct *src_mm = src_vma->vm_mm; in __copy_present_ptes() 969 set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr); in __copy_present_ptes() 1084 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pte_range() 1085 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pte_range() 1230 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pmd_range() 1231 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pmd_range() 1267 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pud_range() [all …]
|
D | pgtable-generic.c | 74 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 97 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush() 116 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags() 144 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_huge_clear_flush() 157 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); in pudp_huge_clear_flush() 229 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_collapse_flush()
|
D | mprotect.c | 99 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range() 105 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range() 108 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range() 249 pte_clear(vma->vm_mm, addr, pte); in change_pte_range() 263 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range() 284 set_pte_at(vma->vm_mm, addr, pte, in change_pte_range() 337 if (pte_alloc(vma->vm_mm, pmd)) \ 352 low##_t *p = low##_alloc(vma->vm_mm, high, addr); \ 457 vma->vm_mm, addr, end); in change_pud_range() 515 struct mm_struct *mm = vma->vm_mm; in change_protection_range() [all …]
|
D | huge_memory.c | 109 if (!vma->vm_mm) /* vdso */ in __thp_vma_allowable_orders() 1151 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { in __do_huge_pmd_anonymous_page() 1161 pgtable = pte_alloc_one(vma->vm_mm); in __do_huge_pmd_anonymous_page() 1175 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page() 1181 ret = check_stable_address_space(vma->vm_mm); in __do_huge_pmd_anonymous_page() 1189 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page() 1199 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page() 1200 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page() 1202 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page() 1203 mm_inc_nr_ptes(vma->vm_mm); in __do_huge_pmd_anonymous_page() [all …]
|
D | madvise.c | 96 mmap_assert_locked(vma->vm_mm); in anon_vma_name() 142 struct mm_struct *mm = vma->vm_mm; in madvise_update_vma() 186 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in swapin_walk_pmd_entry() 265 struct mm_struct *mm = vma->vm_mm; in madvise_willneed() 272 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); in madvise_willneed() 434 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range() 571 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_cold_page_range() 584 struct mm_struct *mm = vma->vm_mm; in madvise_cold() 609 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_pageout_page_range() 617 struct mm_struct *mm = vma->vm_mm; in madvise_pageout() [all …]
|
/linux-6.12.1/arch/s390/include/asm/ |
D | hugetlb.h | 60 return huge_ptep_get_and_clear(vma->vm_mm, address, ptep); in huge_ptep_clear_flush() 67 int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte); in huge_ptep_set_access_flags() 69 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); in huge_ptep_set_access_flags() 70 __set_huge_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
|
/linux-6.12.1/arch/arm/kernel/ |
D | smp_tlb.c | 202 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, in flush_tlb_page() 206 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_page() 228 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, in flush_tlb_range() 232 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_range()
|
/linux-6.12.1/arch/m68k/include/asm/ |
D | tlbflush.h | 86 if (vma->vm_mm == current->active_mm) in flush_tlb_page() 93 if (vma->vm_mm == current->active_mm) in flush_tlb_range() 171 sun3_put_context(vma->vm_mm->context); in flush_tlb_page() 188 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
|
/linux-6.12.1/arch/loongarch/mm/ |
D | tlb.c | 60 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 117 if (asid_valid(vma->vm_mm, cpu)) { in local_flush_tlb_page() 120 newpid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page() 124 cpumask_clear_cpu(cpu, mm_cpumask(vma->vm_mm)); in local_flush_tlb_page() 177 if (current->active_mm != vma->vm_mm) in __update_tlb()
|
/linux-6.12.1/arch/um/include/asm/ |
D | tlbflush.h | 41 um_tlb_mark_sync(vma->vm_mm, address, address + PAGE_SIZE); in flush_tlb_page() 47 um_tlb_mark_sync(vma->vm_mm, start, end); in flush_tlb_range()
|
/linux-6.12.1/arch/parisc/include/asm/ |
D | tlbflush.h | 20 __flush_tlb_range((vma)->vm_mm->context.space_id, start, end) 67 purge_tlb_entries(vma->vm_mm, addr); in flush_tlb_page()
|
/linux-6.12.1/arch/hexagon/mm/ |
D | vm_tlb.c | 29 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() 69 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
|
/linux-6.12.1/arch/mips/kernel/ |
D | smp.c | 583 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() 663 write_c0_memorymapid(cpu_asid(0, vma->vm_mm)); in flush_tlb_page() 670 } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) || in flush_tlb_page() 671 (current->mm != vma->vm_mm)) { in flush_tlb_page() 689 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) in flush_tlb_page() 690 set_cpu_context(cpu, vma->vm_mm, 1); in flush_tlb_page()
|