/linux-6.12.1/mm/ |
D | vma.c | 1079 static inline void vms_clear_ptes(struct vma_munmap_struct *vms, in vms_clear_ptes() argument 1084 if (!vms->clear_ptes) /* Nothing to do */ in vms_clear_ptes() 1093 tlb_gather_mmu(&tlb, vms->vma->vm_mm); in vms_clear_ptes() 1094 update_hiwater_rss(vms->vma->vm_mm); in vms_clear_ptes() 1095 unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, in vms_clear_ptes() 1096 vms->vma_count, mm_wr_locked); in vms_clear_ptes() 1100 free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, in vms_clear_ptes() 1101 vms->unmap_end, mm_wr_locked); in vms_clear_ptes() 1103 vms->clear_ptes = false; in vms_clear_ptes() 1106 void vms_clean_up_area(struct vma_munmap_struct *vms, in vms_clean_up_area() argument [all …]
|
D | vma.h | 179 static inline void init_vma_munmap(struct vma_munmap_struct *vms, in init_vma_munmap() argument 184 vms->vmi = vmi; in init_vma_munmap() 185 vms->vma = vma; in init_vma_munmap() 187 vms->start = start; in init_vma_munmap() 188 vms->end = end; in init_vma_munmap() 190 vms->start = vms->end = 0; in init_vma_munmap() 192 vms->unlock = unlock; in init_vma_munmap() 193 vms->uf = uf; in init_vma_munmap() 194 vms->vma_count = 0; in init_vma_munmap() 195 vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0; in init_vma_munmap() [all …]
|
D | percpu-vm.c | 336 struct vm_struct **vms; in pcpu_create_chunk() local 342 vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes, in pcpu_create_chunk() 344 if (!vms) { in pcpu_create_chunk() 349 chunk->data = vms; in pcpu_create_chunk() 350 chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0]; in pcpu_create_chunk()
|
D | mmap.c | 1369 struct vma_munmap_struct vms; in __mmap_region() local 1380 init_vma_munmap(&vms, &vmi, vma, addr, end, uf, /* unlock = */ false); in __mmap_region() 1386 error = vms_gather_munmap_vmas(&vms, &mas_detach); in __mmap_region() 1390 vmg.next = vms.next; in __mmap_region() 1391 vmg.prev = vms.prev; in __mmap_region() 1398 if (!may_expand_vm(mm, vm_flags, pglen - vms.nr_pages)) { in __mmap_region() 1408 charged -= vms.nr_accounted; in __mmap_region() 1415 vms.nr_accounted = 0; in __mmap_region() 1426 vms_clean_up_area(&vms, &mas_detach); in __mmap_region() 1541 vms_complete_munmap_vmas(&vms, &mas_detach); in __mmap_region() [all …]
|
D | vmalloc.c | 4615 struct vm_struct **vms; in pcpu_get_vm_areas() local 4648 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); in pcpu_get_vm_areas() 4650 if (!vas || !vms) in pcpu_get_vm_areas() 4655 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); in pcpu_get_vm_areas() 4656 if (!vas[area] || !vms[area]) in pcpu_get_vm_areas() 4755 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, in pcpu_get_vm_areas() 4767 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, in pcpu_get_vm_areas() 4768 vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); in pcpu_get_vm_areas() 4771 return vms; in pcpu_get_vm_areas() 4816 kfree(vms[area]); in pcpu_get_vm_areas() [all …]
|
/linux-6.12.1/tools/testing/selftests/powerpc/tm/ |
D | tm-signal-context-chk-vmx.c | 36 long tm_signal_self_context_load(pid_t pid, long *gprs, double *fps, vector int *vms, vector int *v… 41 vector int vms[] = { variable 65 &vms[i], sizeof(vector int)); in signal_usr1() 75 printf("%08x", vms[i][j]); in signal_usr1() 83 &vms[NV_VMX_REGS + i], sizeof (vector int)); in signal_usr1() 93 printf("%08x", vms[NV_VMX_REGS + i][j]); in signal_usr1() 125 rc = tm_signal_self_context_load(pid, NULL, NULL, vms, NULL); in tm_signal_context_chk()
|
D | tm-signal-context-chk-fpu.c | 35 long tm_signal_self_context_load(pid_t pid, long *gprs, double *fps, vector int *vms, vector int *v…
|
D | tm-signal-context-chk-gpr.c | 35 long tm_signal_self_context_load(pid_t pid, long *gprs, double *fps, vector int *vms, vector int *v…
|
D | tm-signal-context-chk-vsx.c | 37 long tm_signal_self_context_load(pid_t pid, long *gprs, double *fps, vector int *vms, vector int *v…
|
/linux-6.12.1/tools/testing/selftests/kvm/ |
D | kvm_binary_stats_test.c | 189 struct kvm_vm **vms; in main() local 213 vms = malloc(sizeof(vms[0]) * max_vm); in main() 214 TEST_ASSERT(vms, "Allocate memory for storing VM pointers"); in main() 227 vms[i] = vm_create_barebones(); in main() 229 vcpus[i * max_vcpu + j] = __vm_vcpu_add(vms[i], j); in main() 241 vm_stats_fds = vm_get_stats_fd(vms[i]); in main() 245 stats_test(vm_get_stats_fd(vms[i])); in main() 259 kvm_vm_free(vms[i]); in main() 268 free(vms); in main()
|
/linux-6.12.1/drivers/gpu/drm/panthor/ |
D | panthor_drv.c | 799 ret = panthor_vm_pool_create_vm(ptdev, pfile->vms, args); in panthor_ioctl_vm_create() 818 return panthor_vm_pool_destroy_vm(pfile->vms, args->id); in panthor_ioctl_vm_destroy() 841 vm = panthor_vm_pool_get_vm(pfile->vms, args->exclusive_vm_id); in panthor_ioctl_bo_create() 1065 vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id); in panthor_ioctl_tiler_heap_create() 1111 vm = panthor_vm_pool_get_vm(pfile->vms, args->handle >> 16); in panthor_ioctl_tiler_heap_destroy() 1139 vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id); in panthor_ioctl_vm_bind_async() 1207 vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id); in panthor_ioctl_vm_bind_sync() 1258 vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id); in panthor_ioctl_vm_get_state()
|
D | panthor_mmu.c | 1606 if (!pfile->vms) in panthor_vm_pool_destroy() 1609 xa_for_each(&pfile->vms->xa, i, vm) in panthor_vm_pool_destroy() 1612 xa_destroy(&pfile->vms->xa); in panthor_vm_pool_destroy() 1613 kfree(pfile->vms); in panthor_vm_pool_destroy() 1624 pfile->vms = kzalloc(sizeof(*pfile->vms), GFP_KERNEL); in panthor_vm_pool_create() 1625 if (!pfile->vms) in panthor_vm_pool_create() 1628 xa_init_flags(&pfile->vms->xa, XA_FLAGS_ALLOC1); in panthor_vm_pool_create()
|
D | panthor_device.h | 175 struct panthor_vm_pool *vms; member
|
D | panthor_sched.c | 1820 struct panthor_vm *vms[MAX_CS_PER_CSG]; member 1877 if (ctx->vms[i] == group->vm) in tick_ctx_pick_groups_from_list() 1893 ctx->vms[ctx->as_count++] = group->vm; in tick_ctx_pick_groups_from_list() 3147 group->vm = panthor_vm_pool_get_vm(pfile->vms, group_args->vm_id); in panthor_group_create()
|
/linux-6.12.1/include/linux/ |
D | vmalloc.h | 304 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); 315 pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) in pcpu_free_vm_areas() argument
|
/linux-6.12.1/tools/kvm/kvm_stat/ |
D | kvm_stat | 846 vms = self.walkdir(PATH_DEBUGFS_KVM)[1] 847 if len(vms) == 0: 850 self.paths = list(filter(lambda x: "{}-".format(pid) in x, vms))
|
/linux-6.12.1/Documentation/virt/kvm/ |
D | api.rst | 5444 Not all PV vms can be dumped, the owner needs to set `dump 7910 allows the in-guest workload to maintain its own NPTs and keeps the two vms
|