Lines Matching full:va
6 * The uvmm mutex protects any operations on the GPU VA space provided by the
7 * DRM GPU VA manager.
70 } va; member
174 u64 addr = reg->va.addr; in nouveau_uvma_region_sparse_unref()
175 u64 range = reg->va.range; in nouveau_uvma_region_sparse_unref()
183 u64 addr = uvma->va.va.addr; in nouveau_uvma_vmm_put()
184 u64 range = uvma->va.va.range; in nouveau_uvma_vmm_put()
193 u64 addr = uvma->va.va.addr; in nouveau_uvma_map()
194 u64 offset = uvma->va.gem.offset; in nouveau_uvma_map()
195 u64 range = uvma->va.va.range; in nouveau_uvma_map()
204 u64 addr = uvma->va.va.addr; in nouveau_uvma_unmap()
205 u64 range = uvma->va.va.range; in nouveau_uvma_unmap()
208 if (drm_gpuva_invalidated(&uvma->va)) in nouveau_uvma_unmap()
233 drm_gem_object_get(uvma->va.gem.obj); in nouveau_uvma_gem_get()
239 drm_gem_object_put(uvma->va.gem.obj); in nouveau_uvma_gem_put()
279 u64 addr = reg->va.addr; in __nouveau_uvma_region_insert()
280 u64 range = reg->va.range; in __nouveau_uvma_region_insert()
308 reg->va.addr = addr; in nouveau_uvma_region_insert()
309 reg->va.range = range; in nouveau_uvma_region_insert()
322 MA_STATE(mas, &uvmm->region_mt, reg->va.addr, 0); in nouveau_uvma_region_remove()
377 if (reg->va.addr != addr || in nouveau_uvma_region_find()
378 reg->va.range != range) in nouveau_uvma_region_find()
390 reg->va.addr, in nouveau_uvma_region_empty()
391 reg->va.range); in nouveau_uvma_region_empty()
398 u64 addr = reg->va.addr; in __nouveau_uvma_region_destroy()
399 u64 range = reg->va.range; in __nouveau_uvma_region_destroy()
441 struct drm_gpuva *va = &uvma->va; in op_map_prepare_unwind() local
443 drm_gpuva_remove(va); in op_map_prepare_unwind()
448 op_unmap_prepare_unwind(struct drm_gpuva *va) in op_unmap_prepare_unwind() argument
450 drm_gpuva_insert(va->vm, va); in op_unmap_prepare_unwind()
472 struct drm_gpuva *va = r->unmap->va; in nouveau_uvmm_sm_prepare_unwind() local
480 op_unmap_prepare_unwind(va); in nouveau_uvmm_sm_prepare_unwind()
484 op_unmap_prepare_unwind(op->unmap.va); in nouveau_uvmm_sm_prepare_unwind()
509 struct drm_gpuva *va = r->unmap->va; in nouveau_uvmm_sm_prepare_unwind() local
510 u64 ustart = va->va.addr; in nouveau_uvmm_sm_prepare_unwind()
511 u64 urange = va->va.range; in nouveau_uvmm_sm_prepare_unwind()
527 struct drm_gpuva *va = u->va; in nouveau_uvmm_sm_prepare_unwind() local
528 u64 ustart = va->va.addr; in nouveau_uvmm_sm_prepare_unwind()
529 u64 urange = va->va.range; in nouveau_uvmm_sm_prepare_unwind()
596 drm_gpuva_map(&uvmm->base, &uvma->va, op); in op_map_prepare()
647 struct drm_gpuva *va = r->unmap->va; in nouveau_uvmm_sm_prepare() local
649 .kind = uvma_from_va(va)->kind, in nouveau_uvmm_sm_prepare()
650 .region = uvma_from_va(va)->region, in nouveau_uvmm_sm_prepare()
652 u64 ustart = va->va.addr; in nouveau_uvmm_sm_prepare()
653 u64 urange = va->va.range; in nouveau_uvmm_sm_prepare()
688 struct drm_gpuva *va = u->va; in nouveau_uvmm_sm_prepare() local
689 u64 ustart = va->va.addr; in nouveau_uvmm_sm_prepare()
690 u64 urange = va->va.range; in nouveau_uvmm_sm_prepare()
709 op_unmap_prepare_unwind(va); in nouveau_uvmm_sm_prepare()
769 return op->remap.unmap->va->gem.obj; in op_gem_obj()
771 return op->unmap.va->gem.obj; in op_gem_obj()
781 struct nouveau_bo *nvbo = nouveau_gem_object(uvma->va.gem.obj); in op_map()
789 struct drm_gpuva *va = u->va; in op_unmap() local
790 struct nouveau_uvma *uvma = uvma_from_va(va); in op_unmap()
801 struct nouveau_uvma *uvma = uvma_from_va(u->va); in op_unmap_range()
804 if (!drm_gpuva_invalidated(u->va)) in op_unmap_range()
813 struct nouveau_uvma *uvma = uvma_from_va(u->va); in op_remap()
814 u64 addr = uvma->va.va.addr; in op_remap()
815 u64 end = uvma->va.va.addr + uvma->va.va.range; in op_remap()
818 addr = r->prev->va.addr + r->prev->va.range; in op_remap()
821 end = r->next->va.addr; in op_remap()
883 struct drm_gpuva *va = r->unmap->va; in nouveau_uvmm_sm_cleanup() local
884 struct nouveau_uvma *uvma = uvma_from_va(va); in nouveau_uvmm_sm_cleanup()
887 u64 addr = va->va.addr; in nouveau_uvmm_sm_cleanup()
888 u64 end = addr + va->va.range; in nouveau_uvmm_sm_cleanup()
891 addr = p->va.addr + p->va.range; in nouveau_uvmm_sm_cleanup()
894 end = n->va.addr; in nouveau_uvmm_sm_cleanup()
905 struct drm_gpuva *va = u->va; in nouveau_uvmm_sm_cleanup() local
906 struct nouveau_uvma *uvma = uvma_from_va(va); in nouveau_uvmm_sm_cleanup()
1006 if (op->va.range > (obj->size - op->gem.offset)) in bind_validate_op()
1010 return nouveau_uvmm_validate_range(uvmm, op->va.addr, op->va.range); in bind_validate_op()
1028 u64 op_addr = op->va.addr; in bind_validate_map_sparse()
1029 u64 op_end = op_addr + op->va.range; in bind_validate_map_sparse()
1076 reg_addr = reg->va.addr; in bind_validate_map_common()
1077 reg_end = reg_addr + reg->va.range; in bind_validate_map_common()
1096 u64 op_addr = op->va.addr; in bind_validate_region()
1097 u64 op_range = op->va.range; in bind_validate_region()
1130 drm_gpuva_link(&new->map->va, vm_bo); in bind_link_gpuvas()
1133 struct drm_gpuva *va = op->remap.unmap->va; in bind_link_gpuvas() local
1136 drm_gpuva_link(&new->prev->va, va->vm_bo); in bind_link_gpuvas()
1138 drm_gpuva_link(&new->next->va, va->vm_bo); in bind_link_gpuvas()
1139 drm_gpuva_unlink(va); in bind_link_gpuvas()
1143 drm_gpuva_unlink(op->unmap.va); in bind_link_gpuvas()
1232 /* Once we start modifying the GPU VA space we need to keep holding the in nouveau_uvmm_bind_job_submit()
1234 * VA space changes must appear atomically and we need to be able to in nouveau_uvmm_bind_job_submit()
1235 * unwind all GPU VA space changes on failure. in nouveau_uvmm_bind_job_submit()
1243 op->va.addr, in nouveau_uvmm_bind_job_submit()
1244 op->va.range); in nouveau_uvmm_bind_job_submit()
1250 op->reg = nouveau_uvma_region_find(uvmm, op->va.addr, in nouveau_uvmm_bind_job_submit()
1251 op->va.range); in nouveau_uvmm_bind_job_submit()
1258 op->va.addr, in nouveau_uvmm_bind_job_submit()
1259 op->va.range); in nouveau_uvmm_bind_job_submit()
1281 op->va.addr, in nouveau_uvmm_bind_job_submit()
1282 op->va.range); in nouveau_uvmm_bind_job_submit()
1284 u64 reg_addr = reg->va.addr; in nouveau_uvmm_bind_job_submit()
1285 u64 reg_end = reg_addr + reg->va.range; in nouveau_uvmm_bind_job_submit()
1286 u64 op_addr = op->va.addr; in nouveau_uvmm_bind_job_submit()
1287 u64 op_end = op_addr + op->va.range; in nouveau_uvmm_bind_job_submit()
1304 op->va.addr, in nouveau_uvmm_bind_job_submit()
1305 op->va.range, in nouveau_uvmm_bind_job_submit()
1315 op->va.addr, in nouveau_uvmm_bind_job_submit()
1316 op->va.range, in nouveau_uvmm_bind_job_submit()
1328 op->va.addr, in nouveau_uvmm_bind_job_submit()
1329 op->va.range); in nouveau_uvmm_bind_job_submit()
1401 nouveau_uvma_region_destroy(uvmm, op->va.addr, in nouveau_uvmm_bind_job_submit()
1402 op->va.range); in nouveau_uvmm_bind_job_submit()
1412 op->va.addr, in nouveau_uvmm_bind_job_submit()
1413 op->va.range); in nouveau_uvmm_bind_job_submit()
1569 op->va.addr = uop->addr; in bind_job_op_from_uop()
1570 op->va.range = uop->range; in bind_job_op_from_uop()
1760 struct drm_gpuva *va; in nouveau_uvmm_bo_map_all() local
1765 drm_gpuvm_bo_for_each_va(va, vm_bo) { in nouveau_uvmm_bo_map_all()
1766 struct nouveau_uvma *uvma = uvma_from_va(va); in nouveau_uvmm_bo_map_all()
1769 drm_gpuva_invalidate(va, false); in nouveau_uvmm_bo_map_all()
1779 struct drm_gpuva *va; in nouveau_uvmm_bo_unmap_all() local
1784 drm_gpuvm_bo_for_each_va(va, vm_bo) { in nouveau_uvmm_bo_unmap_all()
1785 struct nouveau_uvma *uvma = uvma_from_va(va); in nouveau_uvmm_bo_unmap_all()
1788 drm_gpuva_invalidate(va, true); in nouveau_uvmm_bo_unmap_all()
1896 struct drm_gpuva *va, *next; in nouveau_uvmm_fini() local
1899 drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) { in nouveau_uvmm_fini()
1900 struct nouveau_uvma *uvma = uvma_from_va(va); in nouveau_uvmm_fini()
1901 struct drm_gem_object *obj = va->gem.obj; in nouveau_uvmm_fini()
1903 if (unlikely(va == &uvmm->base.kernel_alloc_node)) in nouveau_uvmm_fini()
1906 drm_gpuva_remove(va); in nouveau_uvmm_fini()
1909 drm_gpuva_unlink(va); in nouveau_uvmm_fini()