Lines Matching +full:software +full:- +full:locked

7  * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
54 * radeon_vm_num_pdes - return the number of page directory entries
62 return rdev->vm_manager.max_pfn >> radeon_vm_block_size; in radeon_vm_num_pdes()
66 * radeon_vm_directory_size - returns the size of the page directory in bytes
78 * radeon_vm_manager_init - init the vm manager
89 if (!rdev->vm_manager.enabled) { in radeon_vm_manager_init()
94 rdev->vm_manager.enabled = true; in radeon_vm_manager_init()
100 * radeon_vm_manager_fini - tear down the vm manager
110 if (!rdev->vm_manager.enabled) in radeon_vm_manager_fini()
114 radeon_fence_unref(&rdev->vm_manager.active[i]); in radeon_vm_manager_fini()
116 rdev->vm_manager.enabled = false; in radeon_vm_manager_fini()
120 * radeon_vm_get_bos - add the vm BOs to a validation list
136 list = kvmalloc_array(vm->max_pde_used + 2, in radeon_vm_get_bos()
142 list[0].robj = vm->page_directory; in radeon_vm_get_bos()
145 list[0].tv.bo = &vm->page_directory->tbo; in radeon_vm_get_bos()
150 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { in radeon_vm_get_bos()
151 if (!vm->page_tables[i].bo) in radeon_vm_get_bos()
154 list[idx].robj = vm->page_tables[i].bo; in radeon_vm_get_bos()
157 list[idx].tv.bo = &list[idx].robj->tbo; in radeon_vm_get_bos()
167 * radeon_vm_grab_id - allocate the next free VMID
176 * Global and local mutex must be locked!
182 struct radeon_vm_id *vm_id = &vm->ids[ring]; in radeon_vm_grab_id()
188 if (vm_id->id && vm_id->last_id_use && in radeon_vm_grab_id()
189 vm_id->last_id_use == rdev->vm_manager.active[vm_id->id]) in radeon_vm_grab_id()
193 vm_id->pd_gpu_addr = ~0ll; in radeon_vm_grab_id()
196 for (i = 1; i < rdev->vm_manager.nvm; ++i) { in radeon_vm_grab_id()
197 struct radeon_fence *fence = rdev->vm_manager.active[i]; in radeon_vm_grab_id()
201 vm_id->id = i; in radeon_vm_grab_id()
206 if (radeon_fence_is_earlier(fence, best[fence->ring])) { in radeon_vm_grab_id()
207 best[fence->ring] = fence; in radeon_vm_grab_id()
208 choices[fence->ring == ring ? 0 : 1] = i; in radeon_vm_grab_id()
214 vm_id->id = choices[i]; in radeon_vm_grab_id()
216 return rdev->vm_manager.active[choices[i]]; in radeon_vm_grab_id()
226 * radeon_vm_flush - hardware flush the vm
235 * Global and local mutex must be locked!
241 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); in radeon_vm_flush()
242 struct radeon_vm_id *vm_id = &vm->ids[ring]; in radeon_vm_flush()
244 if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates || in radeon_vm_flush()
245 radeon_fence_is_earlier(vm_id->flushed_updates, updates)) { in radeon_vm_flush()
247 trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id); in radeon_vm_flush()
248 radeon_fence_unref(&vm_id->flushed_updates); in radeon_vm_flush()
249 vm_id->flushed_updates = radeon_fence_ref(updates); in radeon_vm_flush()
250 vm_id->pd_gpu_addr = pd_addr; in radeon_vm_flush()
251 radeon_ring_vm_flush(rdev, &rdev->ring[ring], in radeon_vm_flush()
252 vm_id->id, vm_id->pd_gpu_addr); in radeon_vm_flush()
258 * radeon_vm_fence - remember fence for vm
267 * Global and local mutex must be locked!
273 unsigned vm_id = vm->ids[fence->ring].id; in radeon_vm_fence()
275 radeon_fence_unref(&rdev->vm_manager.active[vm_id]); in radeon_vm_fence()
276 rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence); in radeon_vm_fence()
278 radeon_fence_unref(&vm->ids[fence->ring].last_id_use); in radeon_vm_fence()
279 vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence); in radeon_vm_fence()
283 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
299 list_for_each_entry(bo_va, &bo->va, bo_list) { in radeon_vm_bo_find()
300 if (bo_va->vm == vm) in radeon_vm_bo_find()
308 * radeon_vm_bo_add - add a bo to a specific vm
330 bo_va->vm = vm; in radeon_vm_bo_add()
331 bo_va->bo = bo; in radeon_vm_bo_add()
332 bo_va->it.start = 0; in radeon_vm_bo_add()
333 bo_va->it.last = 0; in radeon_vm_bo_add()
334 bo_va->flags = 0; in radeon_vm_bo_add()
335 bo_va->ref_count = 1; in radeon_vm_bo_add()
336 INIT_LIST_HEAD(&bo_va->bo_list); in radeon_vm_bo_add()
337 INIT_LIST_HEAD(&bo_va->vm_status); in radeon_vm_bo_add()
339 mutex_lock(&vm->mutex); in radeon_vm_bo_add()
340 list_add_tail(&bo_va->bo_list, &bo->va); in radeon_vm_bo_add()
341 mutex_unlock(&vm->mutex); in radeon_vm_bo_add()
347 * radeon_vm_set_pages - helper to call the right asic function
369 uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; in radeon_vm_set_pages()
383 * radeon_vm_clear_bo - initially clear the page dir/table
401 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in radeon_vm_clear_bo()
422 ib.fence->is_vm_update = true; in radeon_vm_clear_bo()
434 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
452 uint64_t size = radeon_bo_size(bo_va->bo); in radeon_vm_bo_set_addr()
453 struct radeon_vm *vm = bo_va->vm; in radeon_vm_bo_set_addr()
460 eoffset = soffset + size - 1; in radeon_vm_bo_set_addr()
462 r = -EINVAL; in radeon_vm_bo_set_addr()
467 if (last_pfn >= rdev->vm_manager.max_pfn) { in radeon_vm_bo_set_addr()
468 dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n", in radeon_vm_bo_set_addr()
469 last_pfn, rdev->vm_manager.max_pfn); in radeon_vm_bo_set_addr()
470 r = -EINVAL; in radeon_vm_bo_set_addr()
478 mutex_lock(&vm->mutex); in radeon_vm_bo_set_addr()
483 it = interval_tree_iter_first(&vm->va, soffset, eoffset); in radeon_vm_bo_set_addr()
484 if (it && it != &bo_va->it) { in radeon_vm_bo_set_addr()
488 dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with " in radeon_vm_bo_set_addr()
489 "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, in radeon_vm_bo_set_addr()
490 soffset, tmp->bo, tmp->it.start, tmp->it.last); in radeon_vm_bo_set_addr()
491 mutex_unlock(&vm->mutex); in radeon_vm_bo_set_addr()
492 r = -EINVAL; in radeon_vm_bo_set_addr()
497 if (bo_va->it.start || bo_va->it.last) { in radeon_vm_bo_set_addr()
502 mutex_unlock(&vm->mutex); in radeon_vm_bo_set_addr()
503 r = -ENOMEM; in radeon_vm_bo_set_addr()
506 tmp->it.start = bo_va->it.start; in radeon_vm_bo_set_addr()
507 tmp->it.last = bo_va->it.last; in radeon_vm_bo_set_addr()
508 tmp->vm = vm; in radeon_vm_bo_set_addr()
509 tmp->bo = radeon_bo_ref(bo_va->bo); in radeon_vm_bo_set_addr()
511 interval_tree_remove(&bo_va->it, &vm->va); in radeon_vm_bo_set_addr()
512 spin_lock(&vm->status_lock); in radeon_vm_bo_set_addr()
513 bo_va->it.start = 0; in radeon_vm_bo_set_addr()
514 bo_va->it.last = 0; in radeon_vm_bo_set_addr()
515 list_del_init(&bo_va->vm_status); in radeon_vm_bo_set_addr()
516 list_add(&tmp->vm_status, &vm->freed); in radeon_vm_bo_set_addr()
517 spin_unlock(&vm->status_lock); in radeon_vm_bo_set_addr()
521 spin_lock(&vm->status_lock); in radeon_vm_bo_set_addr()
522 bo_va->it.start = soffset; in radeon_vm_bo_set_addr()
523 bo_va->it.last = eoffset; in radeon_vm_bo_set_addr()
524 list_add(&bo_va->vm_status, &vm->cleared); in radeon_vm_bo_set_addr()
525 spin_unlock(&vm->status_lock); in radeon_vm_bo_set_addr()
526 interval_tree_insert(&bo_va->it, &vm->va); in radeon_vm_bo_set_addr()
529 bo_va->flags = flags; in radeon_vm_bo_set_addr()
536 if (eoffset > vm->max_pde_used) in radeon_vm_bo_set_addr()
537 vm->max_pde_used = eoffset; in radeon_vm_bo_set_addr()
539 radeon_bo_unreserve(bo_va->bo); in radeon_vm_bo_set_addr()
545 if (vm->page_tables[pt_idx].bo) in radeon_vm_bo_set_addr()
549 mutex_unlock(&vm->mutex); in radeon_vm_bo_set_addr()
565 mutex_lock(&vm->mutex); in radeon_vm_bo_set_addr()
566 if (vm->page_tables[pt_idx].bo) { in radeon_vm_bo_set_addr()
568 mutex_unlock(&vm->mutex); in radeon_vm_bo_set_addr()
570 mutex_lock(&vm->mutex); in radeon_vm_bo_set_addr()
574 vm->page_tables[pt_idx].addr = 0; in radeon_vm_bo_set_addr()
575 vm->page_tables[pt_idx].bo = pt; in radeon_vm_bo_set_addr()
578 mutex_unlock(&vm->mutex); in radeon_vm_bo_set_addr()
582 radeon_bo_unreserve(bo_va->bo); in radeon_vm_bo_set_addr()
587 * radeon_vm_map_gart - get the physical address of a gart page
601 result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT]; in radeon_vm_map_gart()
608 * radeon_vm_page_flags - translate page flags to what the hw uses
629 * radeon_vm_update_page_directory - make sure that page directory is valid
638 * Global and local mutex must be locked!
643 struct radeon_bo *pd = vm->page_directory; in radeon_vm_update_page_directory()
655 ndw += vm->max_pde_used * 6; in radeon_vm_update_page_directory()
659 return -ENOMEM; in radeon_vm_update_page_directory()
667 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { in radeon_vm_update_page_directory()
668 struct radeon_bo *bo = vm->page_tables[pt_idx].bo; in radeon_vm_update_page_directory()
675 if (vm->page_tables[pt_idx].addr == pt) in radeon_vm_update_page_directory()
677 vm->page_tables[pt_idx].addr = pt; in radeon_vm_update_page_directory()
704 radeon_sync_resv(rdev, &ib.sync, pd->tbo.base.resv, true); in radeon_vm_update_page_directory()
711 ib.fence->is_vm_update = true; in radeon_vm_update_page_directory()
720 * radeon_vm_frag_ptes - add fragment information to PTEs
729 * Global and local mutex must be locked!
738 * field in the PTE. When this field is set to a non-zero value, page in radeon_vm_frag_ptes()
756 uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) || in radeon_vm_frag_ptes()
757 (rdev->family == CHIP_ARUBA)) ? in radeon_vm_frag_ptes()
759 uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) || in radeon_vm_frag_ptes()
760 (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80; in radeon_vm_frag_ptes()
763 uint64_t frag_end = pe_end & ~(frag_align - 1); in radeon_vm_frag_ptes()
771 count = (pe_end - pe_start) / 8; in radeon_vm_frag_ptes()
779 count = (frag_start - pe_start) / 8; in radeon_vm_frag_ptes()
786 count = (frag_end - frag_start) / 8; in radeon_vm_frag_ptes()
793 count = (pe_end - frag_end) / 8; in radeon_vm_frag_ptes()
800 * radeon_vm_update_ptes - make sure that page tables are valid
810 * Update the page tables in the range @start - @end (cayman+).
812 * Global and local mutex must be locked!
820 uint64_t mask = RADEON_VM_PTE_COUNT - 1; in radeon_vm_update_ptes()
828 struct radeon_bo *pt = vm->page_tables[pt_idx].bo; in radeon_vm_update_ptes()
833 radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true); in radeon_vm_update_ptes()
834 r = dma_resv_reserve_fences(pt->tbo.base.resv, 1); in radeon_vm_update_ptes()
839 nptes = end - addr; in radeon_vm_update_ptes()
841 nptes = RADEON_VM_PTE_COUNT - (addr & mask); in radeon_vm_update_ptes()
875 * radeon_vm_fence_pts - fence page tables after an update
882 * Fence the page tables in the range @start - @end (cayman+).
884 * Global and local mutex must be locked!
893 end = (end - 1) >> radeon_vm_block_size; in radeon_vm_fence_pts()
896 radeon_bo_fence(vm->page_tables[i].bo, fence, true); in radeon_vm_fence_pts()
900 * radeon_vm_bo_update - map a bo into the vm page table
907 * Returns 0 for success, -EINVAL for failure.
909 * Object have to be reserved and mutex must be locked!
915 struct radeon_vm *vm = bo_va->vm; in radeon_vm_bo_update()
922 if (!bo_va->it.start) { in radeon_vm_bo_update()
923 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", in radeon_vm_bo_update()
924 bo_va->bo, vm); in radeon_vm_bo_update()
925 return -EINVAL; in radeon_vm_bo_update()
928 spin_lock(&vm->status_lock); in radeon_vm_bo_update()
930 if (list_empty(&bo_va->vm_status)) { in radeon_vm_bo_update()
931 spin_unlock(&vm->status_lock); in radeon_vm_bo_update()
934 list_del_init(&bo_va->vm_status); in radeon_vm_bo_update()
936 list_del(&bo_va->vm_status); in radeon_vm_bo_update()
937 list_add(&bo_va->vm_status, &vm->cleared); in radeon_vm_bo_update()
939 spin_unlock(&vm->status_lock); in radeon_vm_bo_update()
941 bo_va->flags &= ~RADEON_VM_PAGE_VALID; in radeon_vm_bo_update()
942 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; in radeon_vm_bo_update()
943 bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; in radeon_vm_bo_update()
944 if (bo_va->bo && radeon_ttm_tt_is_readonly(rdev, bo_va->bo->tbo.ttm)) in radeon_vm_bo_update()
945 bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE; in radeon_vm_bo_update()
948 addr = (u64)mem->start << PAGE_SHIFT; in radeon_vm_bo_update()
949 if (mem->mem_type != TTM_PL_SYSTEM) in radeon_vm_bo_update()
950 bo_va->flags |= RADEON_VM_PAGE_VALID; in radeon_vm_bo_update()
952 if (mem->mem_type == TTM_PL_TT) { in radeon_vm_bo_update()
953 bo_va->flags |= RADEON_VM_PAGE_SYSTEM; in radeon_vm_bo_update()
954 if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC))) in radeon_vm_bo_update()
955 bo_va->flags |= RADEON_VM_PAGE_SNOOPED; in radeon_vm_bo_update()
958 addr += rdev->vm_manager.vram_base_offset; in radeon_vm_bo_update()
966 nptes = bo_va->it.last - bo_va->it.start + 1; in radeon_vm_bo_update()
975 flags = radeon_vm_page_flags(bo_va->flags); in radeon_vm_bo_update()
997 return -ENOMEM; in radeon_vm_bo_update()
1004 if (!(bo_va->flags & RADEON_VM_PAGE_VALID)) { in radeon_vm_bo_update()
1008 radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use); in radeon_vm_bo_update()
1011 r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start, in radeon_vm_bo_update()
1012 bo_va->it.last + 1, addr, in radeon_vm_bo_update()
1013 radeon_vm_page_flags(bo_va->flags)); in radeon_vm_bo_update()
1027 ib.fence->is_vm_update = true; in radeon_vm_bo_update()
1028 radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence); in radeon_vm_bo_update()
1029 radeon_fence_unref(&bo_va->last_pt_update); in radeon_vm_bo_update()
1030 bo_va->last_pt_update = radeon_fence_ref(ib.fence); in radeon_vm_bo_update()
1037 * radeon_vm_clear_freed - clear freed BOs in the PT
1045 * PTs have to be reserved and mutex must be locked!
1053 spin_lock(&vm->status_lock); in radeon_vm_clear_freed()
1054 while (!list_empty(&vm->freed)) { in radeon_vm_clear_freed()
1055 bo_va = list_first_entry(&vm->freed, in radeon_vm_clear_freed()
1057 spin_unlock(&vm->status_lock); in radeon_vm_clear_freed()
1060 radeon_bo_unref(&bo_va->bo); in radeon_vm_clear_freed()
1061 radeon_fence_unref(&bo_va->last_pt_update); in radeon_vm_clear_freed()
1062 spin_lock(&vm->status_lock); in radeon_vm_clear_freed()
1063 list_del(&bo_va->vm_status); in radeon_vm_clear_freed()
1069 spin_unlock(&vm->status_lock); in radeon_vm_clear_freed()
1075 * radeon_vm_clear_invalids - clear invalidated BOs in the PT
1083 * PTs have to be reserved and mutex must be locked!
1091 spin_lock(&vm->status_lock); in radeon_vm_clear_invalids()
1092 while (!list_empty(&vm->invalidated)) { in radeon_vm_clear_invalids()
1093 bo_va = list_first_entry(&vm->invalidated, in radeon_vm_clear_invalids()
1095 spin_unlock(&vm->status_lock); in radeon_vm_clear_invalids()
1101 spin_lock(&vm->status_lock); in radeon_vm_clear_invalids()
1103 spin_unlock(&vm->status_lock); in radeon_vm_clear_invalids()
1109 * radeon_vm_bo_rmv - remove a bo to a specific vm
1114 * Remove @bo_va->bo from the requested vm (cayman+).
1121 struct radeon_vm *vm = bo_va->vm; in radeon_vm_bo_rmv()
1123 list_del(&bo_va->bo_list); in radeon_vm_bo_rmv()
1125 mutex_lock(&vm->mutex); in radeon_vm_bo_rmv()
1126 if (bo_va->it.start || bo_va->it.last) in radeon_vm_bo_rmv()
1127 interval_tree_remove(&bo_va->it, &vm->va); in radeon_vm_bo_rmv()
1129 spin_lock(&vm->status_lock); in radeon_vm_bo_rmv()
1130 list_del(&bo_va->vm_status); in radeon_vm_bo_rmv()
1131 if (bo_va->it.start || bo_va->it.last) { in radeon_vm_bo_rmv()
1132 bo_va->bo = radeon_bo_ref(bo_va->bo); in radeon_vm_bo_rmv()
1133 list_add(&bo_va->vm_status, &vm->freed); in radeon_vm_bo_rmv()
1135 radeon_fence_unref(&bo_va->last_pt_update); in radeon_vm_bo_rmv()
1138 spin_unlock(&vm->status_lock); in radeon_vm_bo_rmv()
1140 mutex_unlock(&vm->mutex); in radeon_vm_bo_rmv()
1144 * radeon_vm_bo_invalidate - mark the bo as invalid
1156 list_for_each_entry(bo_va, &bo->va, bo_list) { in radeon_vm_bo_invalidate()
1157 spin_lock(&bo_va->vm->status_lock); in radeon_vm_bo_invalidate()
1158 if (list_empty(&bo_va->vm_status) && in radeon_vm_bo_invalidate()
1159 (bo_va->it.start || bo_va->it.last)) in radeon_vm_bo_invalidate()
1160 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); in radeon_vm_bo_invalidate()
1161 spin_unlock(&bo_va->vm->status_lock); in radeon_vm_bo_invalidate()
1166 * radeon_vm_init - initialize a vm instance
1180 vm->ib_bo_va = NULL; in radeon_vm_init()
1182 vm->ids[i].id = 0; in radeon_vm_init()
1183 vm->ids[i].flushed_updates = NULL; in radeon_vm_init()
1184 vm->ids[i].last_id_use = NULL; in radeon_vm_init()
1186 mutex_init(&vm->mutex); in radeon_vm_init()
1187 vm->va = RB_ROOT_CACHED; in radeon_vm_init()
1188 spin_lock_init(&vm->status_lock); in radeon_vm_init()
1189 INIT_LIST_HEAD(&vm->invalidated); in radeon_vm_init()
1190 INIT_LIST_HEAD(&vm->freed); in radeon_vm_init()
1191 INIT_LIST_HEAD(&vm->cleared); in radeon_vm_init()
1198 vm->page_tables = kzalloc(pts_size, GFP_KERNEL); in radeon_vm_init()
1199 if (vm->page_tables == NULL) { in radeon_vm_init()
1201 return -ENOMEM; in radeon_vm_init()
1206 NULL, &vm->page_directory); in radeon_vm_init()
1208 kfree(vm->page_tables); in radeon_vm_init()
1209 vm->page_tables = NULL; in radeon_vm_init()
1212 r = radeon_vm_clear_bo(rdev, vm->page_directory); in radeon_vm_init()
1214 radeon_bo_unref(&vm->page_directory); in radeon_vm_init()
1215 vm->page_directory = NULL; in radeon_vm_init()
1216 kfree(vm->page_tables); in radeon_vm_init()
1217 vm->page_tables = NULL; in radeon_vm_init()
1225 * radeon_vm_fini - tear down a vm instance
1238 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) in radeon_vm_fini()
1239 dev_err(rdev->dev, "still active bo inside vm\n"); in radeon_vm_fini()
1242 &vm->va.rb_root, it.rb) { in radeon_vm_fini()
1243 interval_tree_remove(&bo_va->it, &vm->va); in radeon_vm_fini()
1244 r = radeon_bo_reserve(bo_va->bo, false); in radeon_vm_fini()
1246 list_del_init(&bo_va->bo_list); in radeon_vm_fini()
1247 radeon_bo_unreserve(bo_va->bo); in radeon_vm_fini()
1248 radeon_fence_unref(&bo_va->last_pt_update); in radeon_vm_fini()
1252 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { in radeon_vm_fini()
1253 radeon_bo_unref(&bo_va->bo); in radeon_vm_fini()
1254 radeon_fence_unref(&bo_va->last_pt_update); in radeon_vm_fini()
1259 radeon_bo_unref(&vm->page_tables[i].bo); in radeon_vm_fini()
1260 kfree(vm->page_tables); in radeon_vm_fini()
1262 radeon_bo_unref(&vm->page_directory); in radeon_vm_fini()
1265 radeon_fence_unref(&vm->ids[i].flushed_updates); in radeon_vm_fini()
1266 radeon_fence_unref(&vm->ids[i].last_id_use); in radeon_vm_fini()
1269 mutex_destroy(&vm->mutex); in radeon_vm_fini()