Lines Matching full:va
36 * The DRM GPU VA Manager, represented by struct drm_gpuvm keeps track of a
37 * GPU's virtual address (VA) space and manages the corresponding virtual
42 * all existent GPU VA mappings using this &drm_gem_object as backing buffer.
47 * The GPU VA manager internally uses a rb-tree to manage the
51 * portion of VA space reserved by the kernel. This node is initialized together
52 * with the GPU VA manager instance and removed when the GPU VA manager is
105 * Besides its capability to manage and represent a GPU VA space, the
106 * GPU VA manager also provides functions to let the &drm_gpuvm calculate a
109 * Therefore the DRM GPU VA manager provides an algorithm implementing splitting
110 * and merging of existent GPU VA mappings with the ones that are requested to
119 * of the GPU VA space.
121 * Depending on how the new GPU VA mapping intersects with the existent mappings
122 * of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount
146 * call back into the driver in order to unmap a range of GPU VA space. The
163 * To update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert() and
170 * The following diagram depicts the basic relationships of existent GPU VA
432 * &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by
438 * the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed
503 * struct drm_gpuva *va;
507 * va = driver_gpuva_alloc();
508 * if (!va)
509 * ; // unwind previous VA space updates,
513 * drm_gpuva_map(gpuvm, va, &op->map);
514 * drm_gpuva_link(va, vm_bo);
520 * va = op->remap.unmap->va;
525 * ; // unwind previous VA space
533 * ; // unwind previous VA space
542 * drm_gpuva_link(prev, va->vm_bo);
544 * drm_gpuva_link(next, va->vm_bo);
545 * drm_gpuva_unlink(va);
550 * va = op->unmap->va;
553 * drm_gpuva_unlink(va);
643 * struct drm_gpuva *va = op->remap.unmap->va;
648 * drm_gpuva_link(ctx->prev_va, va->vm_bo);
653 * drm_gpuva_link(ctx->next_va, va->vm_bo);
657 * drm_gpuva_unlink(va);
658 * kfree(va);
665 * drm_gpuva_unlink(op->unmap.va);
667 * kfree(op->unmap.va);
870 #define GPUVA_START(node) ((node)->va.addr)
871 #define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
881 struct drm_gpuva *va);
882 static void __drm_gpuva_remove(struct drm_gpuva *va);
913 u64 kstart = gpuvm->kernel_alloc_node.va.addr; in drm_gpuvm_in_kernel_node()
914 u64 krange = gpuvm->kernel_alloc_node.va.range; in drm_gpuvm_in_kernel_node()
981 * @name: the name of the GPU VA space
985 * @start_offset: the start offset of the GPU VA space
986 * @range: the size of the GPU VA space
987 * @reserve_offset: the start of the kernel reserved GPU VA area
988 * @reserve_range: the size of the kernel reserved GPU VA area
1030 gpuvm->kernel_alloc_node.va.addr = reserve_offset; in drm_gpuvm_init()
1031 gpuvm->kernel_alloc_node.va.range = reserve_range; in drm_gpuvm_init()
1045 if (gpuvm->kernel_alloc_node.va.range) in drm_gpuvm_fini()
1201 * @addr: the start address within the VA space
1202 * @range: the range to iterate within the VA space
1215 struct drm_gpuva *va; in drm_gpuvm_prepare_range() local
1219 drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) { in drm_gpuvm_prepare_range()
1220 struct drm_gem_object *obj = va->gem.obj; in drm_gpuvm_prepare_range()
1328 * @addr: the start address within the VA space
1329 * @range: the range to iterate within the VA space
1718 struct drm_gpuva *va) in __drm_gpuva_insert() argument
1724 GPUVA_START(va), in __drm_gpuva_insert()
1725 GPUVA_LAST(va))) in __drm_gpuva_insert()
1728 va->vm = gpuvm; in __drm_gpuva_insert()
1730 drm_gpuva_it_insert(va, &gpuvm->rb.tree); in __drm_gpuva_insert()
1732 node = rb_prev(&va->rb.node); in __drm_gpuva_insert()
1738 list_add(&va->rb.entry, head); in __drm_gpuva_insert()
1746 * @va: the &drm_gpuva to insert
1752 * VA space, such as drm_gpuvm_for_each_va_safe() and
1759 struct drm_gpuva *va) in drm_gpuva_insert() argument
1761 u64 addr = va->va.addr; in drm_gpuva_insert()
1762 u64 range = va->va.range; in drm_gpuva_insert()
1768 ret = __drm_gpuva_insert(gpuvm, va); in drm_gpuva_insert()
1782 __drm_gpuva_remove(struct drm_gpuva *va) in __drm_gpuva_remove() argument
1784 drm_gpuva_it_remove(va, &va->vm->rb.tree); in __drm_gpuva_remove()
1785 list_del_init(&va->rb.entry); in __drm_gpuva_remove()
1790 * @va: the &drm_gpuva to remove
1792 * This removes the given &va from the underlaying tree.
1795 * VA space, such as drm_gpuvm_for_each_va_safe() and
1799 drm_gpuva_remove(struct drm_gpuva *va) in drm_gpuva_remove() argument
1801 struct drm_gpuvm *gpuvm = va->vm; in drm_gpuva_remove()
1803 if (unlikely(va == &gpuvm->kernel_alloc_node)) { in drm_gpuva_remove()
1809 __drm_gpuva_remove(va); in drm_gpuva_remove()
1810 drm_gpuvm_put(va->vm); in drm_gpuva_remove()
1816 * @va: the &drm_gpuva to link
1819 * This adds the given &va to the GPU VA list of the &drm_gpuvm_bo and the
1830 drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo) in drm_gpuva_link() argument
1832 struct drm_gem_object *obj = va->gem.obj; in drm_gpuva_link()
1833 struct drm_gpuvm *gpuvm = va->vm; in drm_gpuva_link()
1840 va->vm_bo = drm_gpuvm_bo_get(vm_bo); in drm_gpuva_link()
1843 list_add_tail(&va->gem.entry, &vm_bo->list.gpuva); in drm_gpuva_link()
1849 * @va: the &drm_gpuva to unlink
1851 * This removes the given &va from the GPU VA list of the &drm_gem_object it is
1854 * This removes the given &va from the GPU VA list of the &drm_gpuvm_bo and
1866 drm_gpuva_unlink(struct drm_gpuva *va) in drm_gpuva_unlink() argument
1868 struct drm_gem_object *obj = va->gem.obj; in drm_gpuva_unlink()
1869 struct drm_gpuvm_bo *vm_bo = va->vm_bo; in drm_gpuva_unlink()
1875 list_del_init(&va->gem.entry); in drm_gpuva_unlink()
1877 va->vm_bo = NULL; in drm_gpuva_unlink()
1912 struct drm_gpuva *va; in drm_gpuva_find() local
1914 va = drm_gpuva_find_first(gpuvm, addr, range); in drm_gpuva_find()
1915 if (!va) in drm_gpuva_find()
1918 if (va->va.addr != addr || in drm_gpuva_find()
1919 va->va.range != range) in drm_gpuva_find()
1922 return va; in drm_gpuva_find()
1932 * @start: the given GPU VA's start address
1934 * Find the adjacent &drm_gpuva before the GPU VA with given &start address.
1936 * Note that if there is any free space between the GPU VA mappings no mapping
1954 * @end: the given GPU VA's end address
1956 * Find the adjacent &drm_gpuva after the GPU VA with given &end address.
1958 * Note that if there is any free space between the GPU VA mappings no mapping
1974 * drm_gpuvm_interval_empty() - indicate whether a given interval of the VA space
1993 * @va: the &drm_gpuva to insert
1994 * @op: the &drm_gpuva_op_map to initialize @va with
1996 * Initializes the @va from the @op and inserts it into the given @gpuvm.
2000 struct drm_gpuva *va, in drm_gpuva_map() argument
2003 drm_gpuva_init_from_op(va, op); in drm_gpuva_map()
2004 drm_gpuva_insert(gpuvm, va); in drm_gpuva_map()
2023 struct drm_gpuva *va = op->unmap->va; in drm_gpuva_remap() local
2024 struct drm_gpuvm *gpuvm = va->vm; in drm_gpuva_remap()
2026 drm_gpuva_remove(va); in drm_gpuva_remap()
2050 drm_gpuva_remove(op->va); in drm_gpuva_unmap()
2062 op.map.va.addr = addr; in op_map_cb()
2063 op.map.va.range = range; in op_map_cb()
2090 struct drm_gpuva *va, bool merge) in op_unmap_cb() argument
2095 op.unmap.va = va; in op_unmap_cb()
2107 struct drm_gpuva *va, *next; in __drm_gpuvm_sm_map() local
2114 drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) { in __drm_gpuvm_sm_map()
2115 struct drm_gem_object *obj = va->gem.obj; in __drm_gpuvm_sm_map()
2116 u64 offset = va->gem.offset; in __drm_gpuvm_sm_map()
2117 u64 addr = va->va.addr; in __drm_gpuvm_sm_map()
2118 u64 range = va->va.range; in __drm_gpuvm_sm_map()
2120 bool merge = !!va->gem.obj; in __drm_gpuvm_sm_map()
2127 ret = op_unmap_cb(ops, priv, va, merge); in __drm_gpuvm_sm_map()
2134 ret = op_unmap_cb(ops, priv, va, merge); in __drm_gpuvm_sm_map()
2142 .va.addr = req_end, in __drm_gpuvm_sm_map()
2143 .va.range = range - req_range, in __drm_gpuvm_sm_map()
2148 .va = va, in __drm_gpuvm_sm_map()
2160 .va.addr = addr, in __drm_gpuvm_sm_map()
2161 .va.range = ls_range, in __drm_gpuvm_sm_map()
2165 struct drm_gpuva_op_unmap u = { .va = va }; in __drm_gpuvm_sm_map()
2187 .va.addr = req_end, in __drm_gpuvm_sm_map()
2188 .va.range = end - req_end, in __drm_gpuvm_sm_map()
2205 ret = op_unmap_cb(ops, priv, va, merge); in __drm_gpuvm_sm_map()
2212 ret = op_unmap_cb(ops, priv, va, merge); in __drm_gpuvm_sm_map()
2220 .va.addr = req_end, in __drm_gpuvm_sm_map()
2221 .va.range = end - req_end, in __drm_gpuvm_sm_map()
2226 .va = va, in __drm_gpuvm_sm_map()
2248 struct drm_gpuva *va, *next; in __drm_gpuvm_sm_unmap() local
2255 drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) { in __drm_gpuvm_sm_unmap()
2258 struct drm_gem_object *obj = va->gem.obj; in __drm_gpuvm_sm_unmap()
2259 u64 offset = va->gem.offset; in __drm_gpuvm_sm_unmap()
2260 u64 addr = va->va.addr; in __drm_gpuvm_sm_unmap()
2261 u64 range = va->va.range; in __drm_gpuvm_sm_unmap()
2265 prev.va.addr = addr; in __drm_gpuvm_sm_unmap()
2266 prev.va.range = req_addr - addr; in __drm_gpuvm_sm_unmap()
2274 next.va.addr = req_end; in __drm_gpuvm_sm_unmap()
2275 next.va.range = end - req_end; in __drm_gpuvm_sm_unmap()
2283 struct drm_gpuva_op_unmap unmap = { .va = va }; in __drm_gpuvm_sm_unmap()
2292 ret = op_unmap_cb(ops, priv, va, false); in __drm_gpuvm_sm_unmap()
2303 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2310 * This function iterates the given range of the GPU VA space. It utilizes the
2314 * Drivers may use these callbacks to update the GPU VA space right away within
2317 * be called before the &drm_gpuvm's view of the GPU VA space was
2319 * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2353 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2358 * This function iterates the given range of the GPU VA space. It utilizes the
2362 * Drivers may use these callbacks to update the GPU VA space right away within
2365 * called before the &drm_gpuvm's view of the GPU VA space was updated
2367 * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or
2486 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2505 * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
2507 * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2554 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2569 * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
2571 * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2615 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2637 struct drm_gpuva *va; in drm_gpuvm_prefetch_ops_create() local
2647 drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) { in drm_gpuvm_prefetch_ops_create()
2655 op->prefetch.va = va; in drm_gpuvm_prefetch_ops_create()
2690 struct drm_gpuva *va; in drm_gpuvm_bo_unmap_ops_create() local
2701 drm_gpuvm_bo_for_each_va(va, vm_bo) { in drm_gpuvm_bo_unmap_ops_create()
2709 op->unmap.va = va; in drm_gpuvm_bo_unmap_ops_create()