/linux-6.12.1/mm/ |
D | util.c | 519 unsigned long locked_vm, limit; in __account_locked_vm() local 524 locked_vm = mm->locked_vm; in __account_locked_vm() 528 if (locked_vm + pages > limit) in __account_locked_vm() 532 mm->locked_vm = locked_vm + pages; in __account_locked_vm() 534 WARN_ON_ONCE(pages > locked_vm); in __account_locked_vm() 535 mm->locked_vm = locked_vm - pages; in __account_locked_vm() 540 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), in __account_locked_vm()
|
D | vma.h | 47 unsigned long locked_vm; /* Number of locked pages */ member 195 vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0; in init_vma_munmap()
|
D | mlock.c | 495 mm->locked_vm += nr_pages; in mlock_fixup() 633 locked += current->mm->locked_vm; in do_mlock()
|
D | debug.c | 223 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, in dump_mm()
|
D | mmap.c | 240 locked_pages += mm->locked_vm; in mlock_future_ok() 1118 mm->locked_vm += grow; in expand_upwards() 1210 mm->locked_vm += grow; in expand_downwards() 1550 mm->locked_vm += pglen; in __mmap_region() 1848 mm->locked_vm += (len >> PAGE_SHIFT); in do_brk_flags()
|
D | mremap.c | 800 mm->locked_vm += new_len >> PAGE_SHIFT; in move_vma() 1151 mm->locked_vm += pages; in SYSCALL_DEFINE5()
|
D | vma.c | 1137 mm->locked_vm -= vms->locked_vm; in vms_complete_munmap_vmas() 1245 vms->locked_vm += nrpages; in vms_gather_munmap_vmas()
|
/linux-6.12.1/net/xdp/ |
D | xdp_umem.c | 35 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages() 139 old_npgs = atomic_long_read(&umem->user->locked_vm); in xdp_umem_account_pages() 146 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs, in xdp_umem_account_pages()
|
/linux-6.12.1/include/linux/sched/ |
D | user.h | 29 atomic_long_t locked_vm; member
|
/linux-6.12.1/arch/s390/kvm/ |
D | pci.c | 199 atomic_long_sub(nr_pages, &user->locked_vm); in unaccount_mem() 212 cur_pages = atomic_long_read(&user->locked_vm); in account_mem() 216 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages, in account_mem()
|
/linux-6.12.1/io_uring/ |
D | rsrc.h | 142 atomic_long_sub(nr_pages, &user->locked_vm); in __io_unaccount_mem()
|
D | rsrc.c | 54 cur_pages = atomic_long_read(&user->locked_vm); in __io_account_mem() 59 } while (!atomic_long_try_cmpxchg(&user->locked_vm, in __io_account_mem()
|
/linux-6.12.1/drivers/iommu/iommufd/ |
D | pages.c | 813 cur_pages = atomic_long_read(&pages->source_user->locked_vm); in incr_user_locked_vm() 818 } while (!atomic_long_try_cmpxchg(&pages->source_user->locked_vm, in incr_user_locked_vm() 825 if (WARN_ON(atomic_long_read(&pages->source_user->locked_vm) < npages)) in decr_user_locked_vm() 827 atomic_long_sub(npages, &pages->source_user->locked_vm); in decr_user_locked_vm()
|
/linux-6.12.1/tools/testing/vma/ |
D | vma_internal.h | 175 unsigned long locked_vm; /* Pages that have PG_mlocked set */ member
|
/linux-6.12.1/drivers/vfio/ |
D | vfio_iommu_type1.c | 101 size_t locked_vm; member 439 dma->locked_vm += npage; in vfio_lock_acct() 659 mm->locked_vm + lock_acct + 1 > limit) { in vfio_pin_pages_remote() 1513 long npage = dma->locked_vm; in vfio_change_dma_owner()
|
/linux-6.12.1/include/linux/ |
D | mm_types.h | 906 unsigned long locked_vm; /* Pages that have PG_mlocked set */ member
|
/linux-6.12.1/net/core/ |
D | skbuff.c | 1670 old_pg = atomic_long_read(&user->locked_vm); in mm_account_pinned_pages() 1675 } while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg)); in mm_account_pinned_pages() 1691 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); in mm_unaccount_pinned_pages()
|
/linux-6.12.1/Documentation/driver-api/ |
D | vfio.rst | 636 mm::locked_vm counter to make sure we do not exceed the rlimit.
|
/linux-6.12.1/kernel/ |
D | fork.c | 1270 mm->locked_vm = 0; in mm_init()
|
/linux-6.12.1/Documentation/mm/ |
D | unevictable-lru.rst | 363 VMAs against the task's "locked_vm".
|
/linux-6.12.1/kernel/events/ |
D | core.c | 6493 atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm); in perf_mmap_close() 6568 &mmap_user->locked_vm); in perf_mmap_close() 6720 user_locked = atomic_long_read(&user->locked_vm); in perf_mmap() 6782 atomic_long_add(user_extra, &user->locked_vm); in perf_mmap()
|
/linux-6.12.1/fs/proc/ |
D | task_mmu.c | 65 SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); in task_mem()
|