Home
last modified time | relevance | path

Searched refs:vm_start (Results 1 – 25 of 285) sorted by relevance

12345678910>>...12

/linux-6.12.1/mm/
Dnommu.c103 return vma->vm_end - vma->vm_start; in kobjsize()
422 BUG_ON(last->vm_end <= last->vm_start); in validate_nommu_regions()
429 BUG_ON(region->vm_end <= region->vm_start); in validate_nommu_regions()
431 BUG_ON(region->vm_start < last->vm_top); in validate_nommu_regions()
457 if (region->vm_start < pregion->vm_start) in add_nommu_region()
459 else if (region->vm_start > pregion->vm_start) in add_nommu_region()
510 if (region->vm_top > region->vm_start) in __put_nommu_region()
520 free_page_series(region->vm_start, region->vm_top); in __put_nommu_region()
573 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start); in delete_vma_from_mm()
575 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); in delete_vma_from_mm()
[all …]
Dvma.c161 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); in vma_prepare()
164 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start, in vma_prepare()
249 uprobe_munmap(vp->remove, vp->remove->vm_start, in vma_complete()
306 if (!vmg->next || vmg->end != vmg->next->vm_start || in can_vma_merge_right()
353 unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end, in unmap_region()
357 next ? next->vm_start : USER_PGTABLES_CEILING, in unmap_region()
374 WARN_ON(vma->vm_start >= addr); in __split_vma()
390 new->vm_start = addr; in __split_vma()
391 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); in __split_vma()
395 vma_iter_config(vmi, new->vm_start, new->vm_end); in __split_vma()
[all …]
Dvma.h101 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start); in vma_pgoff_offset()
157 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) in vma_iter_store_gfp()
160 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store_gfp()
458 vmi->mas.index > vma->vm_start)) { in vma_iter_store()
460 vmi->mas.index, vma->vm_start, vma->vm_start, in vma_iter_store()
464 vmi->mas.last < vma->vm_start)) { in vma_iter_store()
466 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, in vma_iter_store()
472 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) in vma_iter_store()
475 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store()
Dmsync.c73 if (start < vma->vm_start) { in SYSCALL_DEFINE3()
76 start = vma->vm_start; in SYSCALL_DEFINE3()
88 fstart = (start - vma->vm_start) + in SYSCALL_DEFINE3()
Dmremap.c515 if (!for_stack && vma->vm_start != addr_to_align) in can_align_down()
519 if (for_stack && addr_masked >= vma->vm_start) in can_align_down()
526 return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL; in can_align_down()
687 if (vma->vm_start != old_addr) in move_vma()
713 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); in move_vma()
753 if (vma->vm_start < old_addr) in move_vma()
754 account_start = vma->vm_start; in move_vma()
783 if (new_vma != vma && vma->vm_start == old_addr && in move_vma()
856 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; in vma_to_resize()
950 ((addr - vma->vm_start) >> PAGE_SHIFT), in mremap_to()
[all …]
Dmmap.c168 if (!brkvma || brkvma->vm_start >= oldbrk) in SYSCALL_DEFINE1()
696 high_limit = tmp->vm_start; in unmapped_area_topdown()
1023 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : in acct_stack_growth()
1049 VMA_ITERATOR(vmi, mm, vma->vm_start); in expand_upwards()
1077 vma_iter_config(&vmi, vma->vm_start, address); in expand_upwards()
1100 size = address - vma->vm_start; in expand_upwards()
1147 VMA_ITERATOR(vmi, mm, vma->vm_start); in expand_downwards()
1167 vma_iter_next_range_limit(&vmi, vma->vm_start); in expand_downwards()
1189 if (address < vma->vm_start) { in expand_downwards()
1193 grow = (vma->vm_start - address) >> PAGE_SHIFT; in expand_downwards()
[all …]
Dmlock.c533 if (start > vma->vm_start) in apply_vma_lock_flags()
537 tmp = vma->vm_start; in apply_vma_lock_flags()
542 if (vma->vm_start != tmp) in apply_vma_lock_flags()
587 if (start > vma->vm_start) in count_mm_mlocked_page_nr()
588 count -= (start - vma->vm_start); in count_mm_mlocked_page_nr()
590 count += end - vma->vm_start; in count_mm_mlocked_page_nr()
593 count += vma->vm_end - vma->vm_start; in count_mm_mlocked_page_nr()
734 error = mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end, in apply_mlockall_flags()
/linux-6.12.1/tools/testing/vma/
Dvma.c60 ret->vm_start = start; in alloc_vma()
249 ASSERT_EQ(vma->vm_start, 0); in test_simple_merge()
280 ASSERT_EQ(vma->vm_start, 0x1000); in test_simple_modify()
292 ASSERT_EQ(vma->vm_start, 0); in test_simple_modify()
301 ASSERT_EQ(vma->vm_start, 0x1000); in test_simple_modify()
310 ASSERT_EQ(vma->vm_start, 0x2000); in test_simple_modify()
338 ASSERT_EQ(vma->vm_start, 0); in test_simple_expand()
359 ASSERT_EQ(vma->vm_start, 0); in test_simple_shrink()
442 ASSERT_EQ(vma->vm_start, 0); in test_merge_new()
459 ASSERT_EQ(vma->vm_start, 0); in test_merge_new()
[all …]
/linux-6.12.1/mm/damon/tests/
Dvaddr-kunit.h28 mas_set_range(&mas, vmas[i].vm_start, vmas[i].vm_end - 1); in __link_vmas()
72 (struct vm_area_struct) {.vm_start = 10, .vm_end = 20}, in damon_test_three_regions_in_vmas()
73 (struct vm_area_struct) {.vm_start = 20, .vm_end = 25}, in damon_test_three_regions_in_vmas()
74 (struct vm_area_struct) {.vm_start = 200, .vm_end = 210}, in damon_test_three_regions_in_vmas()
75 (struct vm_area_struct) {.vm_start = 210, .vm_end = 220}, in damon_test_three_regions_in_vmas()
76 (struct vm_area_struct) {.vm_start = 300, .vm_end = 305}, in damon_test_three_regions_in_vmas()
77 (struct vm_area_struct) {.vm_start = 307, .vm_end = 330}, in damon_test_three_regions_in_vmas()
/linux-6.12.1/tools/testing/selftests/bpf/progs/
Diters_task_vma.c13 __u64 vm_start; member
34 vm_ranges[seen].vm_start = vma->vm_start; in iter_task_vma_for_each()
Dfind_vma_fail1.c5 #define vm_flags vm_start
17 vma->vm_start = 0xffffffffff600000; in write_vma()
Dbpf_iter_vma_offset.c32 if (vma->vm_start <= address && vma->vm_end > address) { in get_vma_offset()
33 offset = address - vma->vm_start + (vma->vm_pgoff << page_shift); in get_vma_offset()
/linux-6.12.1/include/trace/events/
Dmmap.h79 __field(unsigned long, vm_start)
86 __entry->vm_start = vma->vm_start;
92 (unsigned long) __entry->vm_start,
/linux-6.12.1/fs/proc/
Dtask_nommu.c35 size += region->vm_end - region->vm_start; in task_mem()
37 size = vma->vm_end - vma->vm_start; in task_mem()
89 vsize += vma->vm_end - vma->vm_start; in task_vsize()
109 size += region->vm_end - region->vm_start; in task_statm()
149 vma->vm_start, in nommu_vma_show()
184 *ppos = vma->vm_start; in proc_get_vma()
/linux-6.12.1/arch/powerpc/include/asm/
Dvideo.h8 unsigned long vm_start, unsigned long vm_end, in pgprot_framebuffer() argument
11 return __phys_mem_access_prot(PHYS_PFN(offset), vm_end - vm_start, prot); in pgprot_framebuffer()
/linux-6.12.1/scripts/coccinelle/api/
Dvma_pages.cocci22 * (vma->vm_end - vma->vm_start) >> PAGE_SHIFT
32 - ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
44 (vma->vm_end@p - vma->vm_start) >> PAGE_SHIFT
/linux-6.12.1/drivers/accel/habanalabs/common/
Dmemory_mgr.c205 new_mmap_size = buf->real_mapped_size - (vma->vm_end - vma->vm_start); in hl_mmap_mem_buf_vm_close()
253 user_mem_size = vma->vm_end - vma->vm_start;
263 if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
266 if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
270 buf->behavior->topic, vma->vm_start);
/linux-6.12.1/arch/parisc/mm/
Dfault.c130 if (tree->vm_start > addr) {
136 if (prev->vm_next->vm_start > addr)
262 vma->vm_start, vma->vm_end); in show_signal_msg()
296 if (!vma || address < vma->vm_start) { in do_page_fault()
373 address < vma->vm_start || address >= vma->vm_end) { in do_page_fault()
/linux-6.12.1/arch/x86/um/
Dmem_32.c17 gate_vma.vm_start = FIXADDR_USER_START; in gate_vma_init()
49 return (addr >= vma->vm_start) && (addr < vma->vm_end); in in_gate_area()
/linux-6.12.1/arch/arc/kernel/
Darc_hostlink.c22 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in arc_hl_mmap()
23 vma->vm_end - vma->vm_start, in arc_hl_mmap()
/linux-6.12.1/drivers/gpu/drm/i915/gem/
Di915_gem_mman.c36 return vma->vm_start == addr && in __vma_matches()
37 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); in __vma_matches()
280 area->vm_start, area->vm_end - area->vm_start, in vm_fault_cpu()
303 unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */ in set_address_limits() local
310 vm_start = area->vm_start >> PAGE_SHIFT; in set_address_limits()
319 start = vm_start; in set_address_limits()
324 start = max_t(long, start, vm_start); in set_address_limits()
332 *pfn += (*start_vaddr - area->vm_start) >> PAGE_SHIFT; in set_address_limits()
358 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; in vm_fault_gtt()
518 addr -= area->vm_start; in vm_access()
/linux-6.12.1/drivers/soc/qcom/
Drmtfs_mem.c136 if (vma->vm_end - vma->vm_start > rmtfs_mem->size) { in qcom_rmtfs_mem_mmap()
139 vma->vm_end, vma->vm_start, in qcom_rmtfs_mem_mmap()
140 (vma->vm_end - vma->vm_start), &rmtfs_mem->size); in qcom_rmtfs_mem_mmap()
146 vma->vm_start, in qcom_rmtfs_mem_mmap()
148 vma->vm_end - vma->vm_start, in qcom_rmtfs_mem_mmap()
/linux-6.12.1/arch/powerpc/platforms/book3s/
Dvas-api.c438 fault = vmf_insert_pfn(vma, vma->vm_start, in vas_mmap_fault()
482 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { in coproc_mmap()
484 (vma->vm_end - vma->vm_start), PAGE_SIZE); in coproc_mmap()
532 rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, in coproc_mmap()
533 vma->vm_end - vma->vm_start, prot); in coproc_mmap()
536 vma->vm_start, rc); in coproc_mmap()
/linux-6.12.1/arch/powerpc/kernel/
Dproc_powerpc.c33 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) in page_map_mmap()
36 remap_pfn_range(vma, vma->vm_start, in page_map_mmap()
/linux-6.12.1/drivers/xen/xenfs/
Dxenstored.c36 size_t size = vma->vm_end - vma->vm_start; in xsd_kva_mmap()
41 if (remap_pfn_range(vma, vma->vm_start, in xsd_kva_mmap()

12345678910>>...12