Lines Matching full:va

830  * All vmap_area objects in this tree are sorted by va->va_start
971 va_size(struct vmap_area *va) in va_size() argument
973 return (va->va_end - va->va_start); in va_size()
979 struct vmap_area *va; in get_subtree_max_size() local
981 va = rb_entry_safe(node, struct vmap_area, rb_node); in get_subtree_max_size()
982 return va ? va->subtree_max_size : 0; in get_subtree_max_size()
1007 struct vmap_area *va; in __find_vmap_area() local
1009 va = rb_entry(n, struct vmap_area, rb_node); in __find_vmap_area()
1010 if (addr < va->va_start) in __find_vmap_area()
1012 else if (addr >= va->va_end) in __find_vmap_area()
1015 return va; in __find_vmap_area()
1021 /* Look up the first VA which satisfies addr < va_end, NULL if none. */
1025 struct vmap_area *va = NULL; in __find_vmap_area_exceed_addr() local
1035 va = tmp; in __find_vmap_area_exceed_addr()
1044 return va; in __find_vmap_area_exceed_addr()
1048 * Returns a node where a first VA, that satisfies addr < va_end, resides.
1050 * VA is no longer needed to be accessed.
1055 find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va) in find_vmap_area_exceed_addr_lock() argument
1066 *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root); in find_vmap_area_exceed_addr_lock()
1068 if (*va) in find_vmap_area_exceed_addr_lock()
1069 if (!va_start_lowest || (*va)->va_start < va_start_lowest) in find_vmap_area_exceed_addr_lock()
1070 va_start_lowest = (*va)->va_start; in find_vmap_area_exceed_addr_lock()
1075 * Check if found VA exists, it might have gone away. In this case we in find_vmap_area_exceed_addr_lock()
1076 * repeat the search because a VA has been removed concurrently and we in find_vmap_area_exceed_addr_lock()
1083 *va = __find_vmap_area(va_start_lowest, &vn->busy.root); in find_vmap_area_exceed_addr_lock()
1085 if (*va) in find_vmap_area_exceed_addr_lock()
1104 find_va_links(struct vmap_area *va, in find_va_links() argument
1124 * it link, where the new va->rb_node will be attached to. in find_va_links()
1134 if (va->va_end <= tmp_va->va_start) in find_va_links()
1136 else if (va->va_start >= tmp_va->va_end) in find_va_links()
1140 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); in find_va_links()
1157 * The red-black tree where we try to find VA neighbors in get_va_next_sibling()
1169 __link_va(struct vmap_area *va, struct rb_root *root, in __link_va() argument
1174 * VA is still not in the list, but we can in __link_va()
1184 rb_link_node(&va->rb_node, parent, link); in __link_va()
1188 * to the tree. We do not set va->subtree_max_size to in __link_va()
1197 rb_insert_augmented(&va->rb_node, in __link_va()
1199 va->subtree_max_size = 0; in __link_va()
1201 rb_insert_color(&va->rb_node, root); in __link_va()
1205 list_add(&va->list, head); in __link_va()
1209 link_va(struct vmap_area *va, struct rb_root *root, in link_va() argument
1213 __link_va(va, root, parent, link, head, false); in link_va()
1217 link_va_augment(struct vmap_area *va, struct rb_root *root, in link_va_augment() argument
1221 __link_va(va, root, parent, link, head, true); in link_va_augment()
1225 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment) in __unlink_va() argument
1227 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) in __unlink_va()
1231 rb_erase_augmented(&va->rb_node, in __unlink_va()
1234 rb_erase(&va->rb_node, root); in __unlink_va()
1236 list_del_init(&va->list); in __unlink_va()
1237 RB_CLEAR_NODE(&va->rb_node); in __unlink_va()
1241 unlink_va(struct vmap_area *va, struct rb_root *root) in unlink_va() argument
1243 __unlink_va(va, root, false); in unlink_va()
1247 unlink_va_augment(struct vmap_area *va, struct rb_root *root) in unlink_va_augment() argument
1249 __unlink_va(va, root, true); in unlink_va_augment()
1257 compute_subtree_max_size(struct vmap_area *va) in compute_subtree_max_size() argument
1259 return max3(va_size(va), in compute_subtree_max_size()
1260 get_subtree_max_size(va->rb_node.rb_left), in compute_subtree_max_size()
1261 get_subtree_max_size(va->rb_node.rb_right)); in compute_subtree_max_size()
1267 struct vmap_area *va; in augment_tree_propagate_check() local
1270 list_for_each_entry(va, &free_vmap_area_list, list) { in augment_tree_propagate_check()
1271 computed_size = compute_subtree_max_size(va); in augment_tree_propagate_check()
1272 if (computed_size != va->subtree_max_size) in augment_tree_propagate_check()
1274 va_size(va), va->subtree_max_size); in augment_tree_propagate_check()
1281 * levels starting from VA point. The propagation must be done
1282 * when VA size is modified by changing its va_start/va_end. Or
1283 * in case of newly inserting of VA to the tree.
1286 * - After VA has been inserted to the tree(free path);
1287 * - After VA has been shrunk(allocation path);
1288 * - After VA has been increased(merging path).
1307 augment_tree_propagate_from(struct vmap_area *va) in augment_tree_propagate_from() argument
1314 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); in augment_tree_propagate_from()
1322 insert_vmap_area(struct vmap_area *va, in insert_vmap_area() argument
1328 link = find_va_links(va, root, NULL, &parent); in insert_vmap_area()
1330 link_va(va, root, parent, link, head); in insert_vmap_area()
1334 insert_vmap_area_augment(struct vmap_area *va, in insert_vmap_area_augment() argument
1342 link = find_va_links(va, NULL, from, &parent); in insert_vmap_area_augment()
1344 link = find_va_links(va, root, NULL, &parent); in insert_vmap_area_augment()
1347 link_va_augment(va, root, parent, link, head); in insert_vmap_area_augment()
1348 augment_tree_propagate_from(va); in insert_vmap_area_augment()
1353 * Merge de-allocated chunk of VA memory with previous
1355 * free area is inserted. If VA has been merged, it is
1364 __merge_or_add_vmap_area(struct vmap_area *va, in __merge_or_add_vmap_area() argument
1374 * Find a place in the tree where VA potentially will be in __merge_or_add_vmap_area()
1377 link = find_va_links(va, root, NULL, &parent); in __merge_or_add_vmap_area()
1382 * Get next node of VA to check if merging can be done. in __merge_or_add_vmap_area()
1391 * |<------VA------>|<-----Next----->| in __merge_or_add_vmap_area()
1397 if (sibling->va_start == va->va_end) { in __merge_or_add_vmap_area()
1398 sibling->va_start = va->va_start; in __merge_or_add_vmap_area()
1401 kmem_cache_free(vmap_area_cachep, va); in __merge_or_add_vmap_area()
1404 va = sibling; in __merge_or_add_vmap_area()
1412 * |<-----Prev----->|<------VA------>| in __merge_or_add_vmap_area()
1418 if (sibling->va_end == va->va_start) { in __merge_or_add_vmap_area()
1427 __unlink_va(va, root, augment); in __merge_or_add_vmap_area()
1429 sibling->va_end = va->va_end; in __merge_or_add_vmap_area()
1432 kmem_cache_free(vmap_area_cachep, va); in __merge_or_add_vmap_area()
1435 va = sibling; in __merge_or_add_vmap_area()
1442 __link_va(va, root, parent, link, head, augment); in __merge_or_add_vmap_area()
1444 return va; in __merge_or_add_vmap_area()
1448 merge_or_add_vmap_area(struct vmap_area *va, in merge_or_add_vmap_area() argument
1451 return __merge_or_add_vmap_area(va, root, head, false); in merge_or_add_vmap_area()
1455 merge_or_add_vmap_area_augment(struct vmap_area *va, in merge_or_add_vmap_area_augment() argument
1458 va = __merge_or_add_vmap_area(va, root, head, true); in merge_or_add_vmap_area_augment()
1459 if (va) in merge_or_add_vmap_area_augment()
1460 augment_tree_propagate_from(va); in merge_or_add_vmap_area_augment()
1462 return va; in merge_or_add_vmap_area_augment()
1466 is_within_this_va(struct vmap_area *va, unsigned long size, in is_within_this_va() argument
1471 if (va->va_start > vstart) in is_within_this_va()
1472 nva_start_addr = ALIGN(va->va_start, align); in is_within_this_va()
1481 return (nva_start_addr + size <= va->va_end); in is_within_this_va()
1495 struct vmap_area *va; in find_vmap_lowest_match() local
1506 va = rb_entry(node, struct vmap_area, rb_node); in find_vmap_lowest_match()
1509 vstart < va->va_start) { in find_vmap_lowest_match()
1512 if (is_within_this_va(va, size, align, vstart)) in find_vmap_lowest_match()
1513 return va; in find_vmap_lowest_match()
1532 va = rb_entry(node, struct vmap_area, rb_node); in find_vmap_lowest_match()
1533 if (is_within_this_va(va, size, align, vstart)) in find_vmap_lowest_match()
1534 return va; in find_vmap_lowest_match()
1537 vstart <= va->va_start) { in find_vmap_lowest_match()
1544 vstart = va->va_start + 1; in find_vmap_lowest_match()
1562 struct vmap_area *va; in find_vmap_lowest_linear_match() local
1564 list_for_each_entry(va, head, list) { in find_vmap_lowest_linear_match()
1565 if (!is_within_this_va(va, size, align, vstart)) in find_vmap_lowest_linear_match()
1568 return va; in find_vmap_lowest_linear_match()
1603 classify_va_fit_type(struct vmap_area *va, in classify_va_fit_type() argument
1608 /* Check if it is within VA. */ in classify_va_fit_type()
1609 if (nva_start_addr < va->va_start || in classify_va_fit_type()
1610 nva_start_addr + size > va->va_end) in classify_va_fit_type()
1614 if (va->va_start == nva_start_addr) { in classify_va_fit_type()
1615 if (va->va_end == nva_start_addr + size) in classify_va_fit_type()
1619 } else if (va->va_end == nva_start_addr + size) { in classify_va_fit_type()
1630 struct vmap_area *va, unsigned long nva_start_addr, in va_clip() argument
1634 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); in va_clip()
1638 * No need to split VA, it fully fits. in va_clip()
1644 unlink_va_augment(va, root); in va_clip()
1645 kmem_cache_free(vmap_area_cachep, va); in va_clip()
1648 * Split left edge of fit VA. in va_clip()
1654 va->va_start += size; in va_clip()
1657 * Split right edge of fit VA. in va_clip()
1663 va->va_end = nva_start_addr; in va_clip()
1666 * Split no edge of fit VA. in va_clip()
1707 lva->va_start = va->va_start; in va_clip()
1711 * Shrink this VA to remaining size. in va_clip()
1713 va->va_start = nva_start_addr + size; in va_clip()
1719 augment_tree_propagate_from(va); in va_clip()
1722 insert_vmap_area_augment(lva, &va->rb_node, root, head); in va_clip()
1729 va_alloc(struct vmap_area *va, in va_alloc() argument
1737 if (va->va_start > vstart) in va_alloc()
1738 nva_start_addr = ALIGN(va->va_start, align); in va_alloc()
1747 ret = va_clip(root, head, va, nva_start_addr, size); in va_alloc()
1765 struct vmap_area *va; in __alloc_vmap_area() local
1779 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); in __alloc_vmap_area()
1780 if (unlikely(!va)) in __alloc_vmap_area()
1783 nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); in __alloc_vmap_area()
1797 static void free_vmap_area(struct vmap_area *va) in free_vmap_area() argument
1799 struct vmap_node *vn = addr_to_node(va->va_start); in free_vmap_area()
1805 unlink_va(va, &vn->busy.root); in free_vmap_area()
1812 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); in free_vmap_area()
1819 struct vmap_area *va = NULL, *tmp; in preload_this_cpu_lock() local
1831 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); in preload_this_cpu_lock()
1836 if (va && !__this_cpu_try_cmpxchg(ne_fit_preload_node, &tmp, va)) in preload_this_cpu_lock()
1837 kmem_cache_free(vmap_area_cachep, va); in preload_this_cpu_lock()
1852 node_pool_add_va(struct vmap_node *n, struct vmap_area *va) in node_pool_add_va() argument
1856 vp = size_to_va_pool(n, va_size(va)); in node_pool_add_va()
1861 list_add(&va->list, &vp->head); in node_pool_add_va()
1873 struct vmap_area *va = NULL; in node_pool_del_va() local
1883 va = list_first_entry(&vp->head, struct vmap_area, list); in node_pool_del_va()
1885 if (IS_ALIGNED(va->va_start, align)) { in node_pool_del_va()
1890 err |= (va_size(va) != size); in node_pool_del_va()
1891 err |= (va->va_start < vstart); in node_pool_del_va()
1892 err |= (va->va_end > vend); in node_pool_del_va()
1895 list_del_init(&va->list); in node_pool_del_va()
1898 va = NULL; in node_pool_del_va()
1901 list_move_tail(&va->list, &vp->head); in node_pool_del_va()
1902 va = NULL; in node_pool_del_va()
1907 return va; in node_pool_del_va()
1915 struct vmap_area *va; in node_alloc() local
1929 va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend); in node_alloc()
1932 if (va) in node_alloc()
1933 *addr = va->va_start; in node_alloc()
1935 return va; in node_alloc()
1939 struct vmap_area *va, unsigned long flags, const void *caller) in setup_vmalloc_vm() argument
1942 vm->addr = (void *)va->va_start; in setup_vmalloc_vm()
1943 vm->size = va_size(va); in setup_vmalloc_vm()
1945 va->vm = vm; in setup_vmalloc_vm()
1959 struct vmap_area *va; in alloc_vmap_area() local
1975 * If a VA is obtained from a global heap(if it fails here) in alloc_vmap_area()
1980 * On success a ready to go VA is returned. in alloc_vmap_area()
1982 va = node_alloc(size, align, vstart, vend, &addr, &vn_id); in alloc_vmap_area()
1983 if (!va) { in alloc_vmap_area()
1986 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); in alloc_vmap_area()
1987 if (unlikely(!va)) in alloc_vmap_area()
1994 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); in alloc_vmap_area()
2014 va->va_start = addr; in alloc_vmap_area()
2015 va->va_end = addr + size; in alloc_vmap_area()
2016 va->vm = NULL; in alloc_vmap_area()
2017 va->flags = (va_flags | vn_id); in alloc_vmap_area()
2020 vm->addr = (void *)va->va_start; in alloc_vmap_area()
2021 vm->size = va_size(va); in alloc_vmap_area()
2022 va->vm = vm; in alloc_vmap_area()
2025 vn = addr_to_node(va->va_start); in alloc_vmap_area()
2028 insert_vmap_area(va, &vn->busy.root, &vn->busy.head); in alloc_vmap_area()
2031 BUG_ON(!IS_ALIGNED(va->va_start, align)); in alloc_vmap_area()
2032 BUG_ON(va->va_start < vstart); in alloc_vmap_area()
2033 BUG_ON(va->va_end > vend); in alloc_vmap_area()
2037 free_vmap_area(va); in alloc_vmap_area()
2041 return va; in alloc_vmap_area()
2062 kmem_cache_free(vmap_area_cachep, va); in alloc_vmap_area()
2119 struct vmap_area *va, *n; in reclaim_list_global() local
2125 list_for_each_entry_safe(va, n, head, list) in reclaim_list_global()
2126 merge_or_add_vmap_area_augment(va, in reclaim_list_global()
2136 struct vmap_area *va, *nva; in decay_va_pool_node() local
2157 list_for_each_entry_safe(va, nva, &tmp_list, list) { in decay_va_pool_node()
2158 list_del_init(&va->list); in decay_va_pool_node()
2159 merge_or_add_vmap_area(va, &decay_root, &decay_list); in decay_va_pool_node()
2190 struct vmap_area *va, *n_va; in purge_vmap_node() local
2195 list_for_each_entry_safe(va, n_va, &vn->purge_list, list) { in purge_vmap_node()
2196 unsigned long nr = va_size(va) >> PAGE_SHIFT; in purge_vmap_node()
2197 unsigned long orig_start = va->va_start; in purge_vmap_node()
2198 unsigned long orig_end = va->va_end; in purge_vmap_node()
2199 unsigned int vn_id = decode_vn_id(va->flags); in purge_vmap_node()
2201 list_del_init(&va->list); in purge_vmap_node()
2205 va->va_start, va->va_end); in purge_vmap_node()
2211 if (node_pool_add_va(vn, va)) in purge_vmap_node()
2215 list_add(&va->list, &local_list); in purge_vmap_node()
2331 static void free_vmap_area_noflush(struct vmap_area *va) in free_vmap_area_noflush() argument
2334 unsigned long va_start = va->va_start; in free_vmap_area_noflush()
2335 unsigned int vn_id = decode_vn_id(va->flags); in free_vmap_area_noflush()
2339 if (WARN_ON_ONCE(!list_empty(&va->list))) in free_vmap_area_noflush()
2342 nr_lazy = atomic_long_add_return(va_size(va) >> PAGE_SHIFT, in free_vmap_area_noflush()
2350 id_to_node(vn_id):addr_to_node(va->va_start); in free_vmap_area_noflush()
2353 insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head); in free_vmap_area_noflush()
2358 /* After this point, we may free va at any time */ in free_vmap_area_noflush()
2366 static void free_unmap_vmap_area(struct vmap_area *va) in free_unmap_vmap_area() argument
2368 flush_cache_vunmap(va->va_start, va->va_end); in free_unmap_vmap_area()
2369 vunmap_range_noflush(va->va_start, va->va_end); in free_unmap_vmap_area()
2371 flush_tlb_kernel_range(va->va_start, va->va_end); in free_unmap_vmap_area()
2373 free_vmap_area_noflush(va); in free_unmap_vmap_area()
2379 struct vmap_area *va; in find_vmap_area() local
2387 * where a VA is located. If VA spans several zones and passed in find_vmap_area()
2388 * addr is not the same as va->va_start, what is not common, we in find_vmap_area()
2391 * <----va----> in find_vmap_area()
2395 * VA resides in node 1 whereas it spans 1, 2 an 0. If passed in find_vmap_area()
2403 va = __find_vmap_area(addr, &vn->busy.root); in find_vmap_area()
2406 if (va) in find_vmap_area()
2407 return va; in find_vmap_area()
2416 struct vmap_area *va; in find_unlink_vmap_area() local
2427 va = __find_vmap_area(addr, &vn->busy.root); in find_unlink_vmap_area()
2428 if (va) in find_unlink_vmap_area()
2429 unlink_va(va, &vn->busy.root); in find_unlink_vmap_area()
2432 if (va) in find_unlink_vmap_area()
2433 return va; in find_unlink_vmap_area()
2493 struct vmap_area *va; member
2593 struct vmap_area *va; in new_vmap_block() local
2606 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, in new_vmap_block()
2610 if (IS_ERR(va)) { in new_vmap_block()
2612 return ERR_CAST(va); in new_vmap_block()
2615 vaddr = vmap_block_vaddr(va->va_start, 0); in new_vmap_block()
2617 vb->va = va; in new_vmap_block()
2629 xa = addr_to_vb_xa(va->va_start); in new_vmap_block()
2630 vb_idx = addr_to_vb_idx(va->va_start); in new_vmap_block()
2634 free_vmap_area(va); in new_vmap_block()
2658 xa = addr_to_vb_xa(vb->va->va_start); in free_vmap_block()
2659 tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start)); in free_vmap_block()
2662 vn = addr_to_node(vb->va->va_start); in free_vmap_block()
2664 unlink_va(vb->va, &vn->busy.root); in free_vmap_block()
2667 free_vmap_area_noflush(vb->va); in free_vmap_block()
2772 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); in vb_alloc()
2862 unsigned long va_start = vb->va->va_start; in _vm_unmap_aliases()
2919 struct vmap_area *va; in vm_unmap_ram() local
2935 va = find_unlink_vmap_area(addr); in vm_unmap_ram()
2936 if (WARN_ON_ONCE(!va)) in vm_unmap_ram()
2939 debug_check_no_locks_freed((void *)va->va_start, va_size(va)); in vm_unmap_ram()
2940 free_unmap_vmap_area(va); in vm_unmap_ram()
2970 struct vmap_area *va; in vm_map_ram() local
2971 va = alloc_vmap_area(size, PAGE_SIZE, in vm_map_ram()
2975 if (IS_ERR(va)) in vm_map_ram()
2978 addr = va->va_start; in vm_map_ram()
3093 struct vmap_area *va; in __get_vm_area_node() local
3116 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area); in __get_vm_area_node()
3117 if (IS_ERR(va)) { in __get_vm_area_node()
3184 struct vmap_area *va; in find_vm_area() local
3186 va = find_vmap_area((unsigned long)addr); in find_vm_area()
3187 if (!va) in find_vm_area()
3190 return va->vm; in find_vm_area()
3205 struct vmap_area *va; in remove_vm_area() local
3214 va = find_unlink_vmap_area((unsigned long)addr); in remove_vm_area()
3215 if (!va || !va->vm) in remove_vm_area()
3217 vm = va->vm; in remove_vm_area()
3224 free_unmap_vmap_area(va); in remove_vm_area()
4257 start = vmap_block_vaddr(vb->va->va_start, rs); in vmap_ram_vread_iter()
4323 struct vmap_area *va; in vread_iter() local
4337 vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va); in vread_iter()
4342 if ((unsigned long)addr + remains <= va->va_start) in vread_iter()
4351 vm = va->vm; in vread_iter()
4352 flags = va->flags & VMAP_FLAGS_MASK; in vread_iter()
4368 vaddr = (char *) va->va_start; in vread_iter()
4369 size = vm ? get_vm_area_size(vm) : va_size(va); in vread_iter()
4403 next = va->va_end; in vread_iter()
4405 } while ((vn = find_vmap_area_exceed_addr_lock(next, &va))); in vread_iter()
4528 * i.e. va->va_start < addr && va->va_end < addr or NULL
4534 struct vmap_area *va, *tmp; in pvm_find_va_enclose_addr() local
4538 va = NULL; in pvm_find_va_enclose_addr()
4543 va = tmp; in pvm_find_va_enclose_addr()
4553 return va; in pvm_find_va_enclose_addr()
4559 * @va:
4560 * in - the VA we start the search(reverse order);
4561 * out - the VA with the highest aligned end address.
4567 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) in pvm_determine_end_from_reverse() argument
4572 if (likely(*va)) { in pvm_determine_end_from_reverse()
4573 list_for_each_entry_from_reverse((*va), in pvm_determine_end_from_reverse()
4575 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); in pvm_determine_end_from_reverse()
4576 if ((*va)->va_start < addr) in pvm_determine_end_from_reverse()
4614 struct vmap_area **vas, *va; in pcpu_get_vm_areas() local
4667 va = pvm_find_va_enclose_addr(vmalloc_end); in pcpu_get_vm_areas()
4668 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
4681 if (va == NULL) in pcpu_get_vm_areas()
4685 * If required width exceeds current VA block, move in pcpu_get_vm_areas()
4688 if (base + end > va->va_end) { in pcpu_get_vm_areas()
4689 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
4695 * If this VA does not fit, move base downwards and recheck. in pcpu_get_vm_areas()
4697 if (base + start < va->va_start) { in pcpu_get_vm_areas()
4698 va = node_to_va(rb_prev(&va->rb_node)); in pcpu_get_vm_areas()
4699 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
4714 va = pvm_find_va_enclose_addr(base + end); in pcpu_get_vm_areas()
4717 /* we've found a fitting base, insert all va's */ in pcpu_get_vm_areas()
4724 va = pvm_find_va_enclose_addr(start); in pcpu_get_vm_areas()
4725 if (WARN_ON_ONCE(va == NULL)) in pcpu_get_vm_areas()
4730 &free_vmap_area_list, va, start, size); in pcpu_get_vm_areas()
4736 va = vas[area]; in pcpu_get_vm_areas()
4737 va->va_start = start; in pcpu_get_vm_areas()
4738 va->va_end = start + size; in pcpu_get_vm_areas()
4783 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4785 if (va) in pcpu_get_vm_areas()
4787 va->va_start, va->va_end); in pcpu_get_vm_areas()
4833 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4835 if (va) in pcpu_get_vm_areas()
4837 va->va_start, va->va_end); in pcpu_get_vm_areas()
4869 struct vmap_area *va; in vmalloc_dump_obj() local
4880 va = __find_vmap_area(addr, &vn->busy.root); in vmalloc_dump_obj()
4881 if (!va || !va->vm) { in vmalloc_dump_obj()
4886 vm = va->vm; in vmalloc_dump_obj()
4927 struct vmap_area *va; in show_purge_info() local
4934 list_for_each_entry(va, &vn->lazy.head, list) { in show_purge_info()
4936 (void *)va->va_start, (void *)va->va_end, in show_purge_info()
4937 va_size(va)); in show_purge_info()
4946 struct vmap_area *va; in vmalloc_info_show() local
4954 list_for_each_entry(va, &vn->busy.head, list) { in vmalloc_info_show()
4955 if (!va->vm) { in vmalloc_info_show()
4956 if (va->flags & VMAP_RAM) in vmalloc_info_show()
4958 (void *)va->va_start, (void *)va->va_end, in vmalloc_info_show()
4959 va_size(va)); in vmalloc_info_show()
4964 v = va->vm; in vmalloc_info_show()
5155 struct vmap_area *va; in vmalloc_init() local
5185 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); in vmalloc_init()
5186 if (WARN_ON_ONCE(!va)) in vmalloc_init()
5189 va->va_start = (unsigned long)tmp->addr; in vmalloc_init()
5190 va->va_end = va->va_start + tmp->size; in vmalloc_init()
5191 va->vm = tmp; in vmalloc_init()
5193 vn = addr_to_node(va->va_start); in vmalloc_init()
5194 insert_vmap_area(va, &vn->busy.root, &vn->busy.head); in vmalloc_init()