/linux-6.12.1/drivers/gpu/drm/ttm/ |
D | ttm_tt.c | 72 if (bo->ttm) in ttm_tt_create() 99 bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags); in ttm_tt_create() 100 if (unlikely(bo->ttm == NULL)) in ttm_tt_create() 103 WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE && in ttm_tt_create() 104 !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)); in ttm_tt_create() 113 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm) in ttm_tt_alloc_page_directory() argument 115 ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL); in ttm_tt_alloc_page_directory() 116 if (!ttm->pages) in ttm_tt_alloc_page_directory() 122 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm) in ttm_dma_tt_alloc_page_directory() argument 124 ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) + in ttm_dma_tt_alloc_page_directory() [all …]
|
D | ttm_agp_backend.c | 45 struct ttm_tt ttm; member 50 int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) in ttm_agp_bind() argument 52 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); in ttm_agp_bind() 55 int ret, cached = ttm->caching == ttm_cached; in ttm_agp_bind() 61 mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY); in ttm_agp_bind() 66 for (i = 0; i < ttm->num_pages; i++) { in ttm_agp_bind() 67 struct page *page = ttm->pages[i]; in ttm_agp_bind() 87 void ttm_agp_unbind(struct ttm_tt *ttm) in ttm_agp_unbind() argument 89 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); in ttm_agp_unbind() 102 bool ttm_agp_is_bound(struct ttm_tt *ttm) in ttm_agp_is_bound() argument [all …]
|
D | ttm_bo_util.c | 149 struct ttm_tt *ttm = bo->ttm; in ttm_bo_move_memcpy() local 164 if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) || in ttm_bo_move_memcpy() 166 ret = ttm_tt_populate(bdev, ttm, ctx); in ttm_bo_move_memcpy() 173 dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm); in ttm_bo_move_memcpy() 179 src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm); in ttm_bo_move_memcpy() 185 clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm)); in ttm_bo_move_memcpy() 186 if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) in ttm_bo_move_memcpy() 298 caching = bo->ttm->caching; in ttm_io_prot() 299 if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED) in ttm_io_prot() 345 struct ttm_tt *ttm = bo->ttm; in ttm_bo_kmap_ttm() local [all …]
|
/linux-6.12.1/drivers/gpu/drm/radeon/ |
D | radeon_ttm.c | 56 static int radeon_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm, 58 static void radeon_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm); 198 r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem); in radeon_bo_move() 209 bo->ttm == NULL)) { in radeon_bo_move() 221 radeon_ttm_tt_unbind(bo->bdev, bo->ttm); in radeon_bo_move() 314 struct ttm_tt ttm; member 324 static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm) in radeon_ttm_tt_pin_userptr() argument 327 struct radeon_ttm_tt *gtt = (void *)ttm; in radeon_ttm_tt_pin_userptr() 341 unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE; in radeon_ttm_tt_pin_userptr() 349 unsigned num_pages = ttm->num_pages - pinned; in radeon_ttm_tt_pin_userptr() [all …]
|
/linux-6.12.1/include/drm/ttm/ |
D | ttm_tt.h | 157 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, 170 void ttm_tt_fini(struct ttm_tt *ttm); 180 void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm); 189 int ttm_tt_swapin(struct ttm_tt *ttm); 190 int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm, 202 int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, 213 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm); 223 static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm) in ttm_tt_mark_for_clear() argument 225 ttm->page_flags |= TTM_TT_FLAG_ZERO_ALLOC; in ttm_tt_mark_for_clear() 251 int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem); [all …]
|
/linux-6.12.1/drivers/gpu/drm/i915/gem/ |
D | i915_gem_ttm.c | 50 struct ttm_tt ttm; member 182 struct ttm_tt *ttm, in i915_ttm_tt_shmem_populate() argument 187 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); in i915_ttm_tt_shmem_populate() 189 const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT; in i915_ttm_tt_shmem_populate() 227 ttm->pages[i++] = page; in i915_ttm_tt_shmem_populate() 229 if (ttm->page_flags & TTM_TT_FLAG_SWAPPED) in i915_ttm_tt_shmem_populate() 230 ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; in i915_ttm_tt_shmem_populate() 240 static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm) in i915_ttm_tt_shmem_unpopulate() argument 242 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); in i915_ttm_tt_shmem_unpopulate() 243 bool backup = ttm->page_flags & TTM_TT_FLAG_SWAPPED; in i915_ttm_tt_shmem_unpopulate() [all …]
|
D | i915_gem_ttm_pm.c | 24 if (obj->ttm.backup) { in i915_ttm_backup_free() 25 i915_gem_object_put(obj->ttm.backup); in i915_ttm_backup_free() 26 obj->ttm.backup = NULL; in i915_ttm_backup_free() 56 if (!i915_ttm_cpu_maps_iomem(bo->resource) || obj->ttm.backup) in i915_ttm_backup() 93 err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); in i915_ttm_backup() 106 obj->ttm.backup = backup; in i915_ttm_backup() 173 struct drm_i915_gem_object *backup = obj->ttm.backup; in i915_ttm_restore() 192 err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); in i915_ttm_restore() 199 obj->ttm.backup = NULL; in i915_ttm_restore()
|
D | i915_gem_ttm_move.c | 53 struct ttm_tt *ttm) in i915_ttm_cache_level() argument 57 ttm->caching == ttm_cached) ? I915_CACHE_LLC : in i915_ttm_cache_level() 86 if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) { in i915_ttm_adjust_domains_after_move() 125 bo->ttm); in i915_ttm_adjust_gem_after_move() 198 struct ttm_tt *src_ttm = bo->ttm; in i915_ttm_accel_move() 330 ttm_kmap_iter_tt_init(&arg->_src_iter.tt, bo->ttm) : in i915_ttm_memcpy_init() 332 &obj->ttm.cached_io_rsgt->table, in i915_ttm_memcpy_init() 579 struct ttm_tt *ttm = bo->ttm; in i915_ttm_move() local 626 if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) { in i915_ttm_move() 627 ret = ttm_tt_populate(bo->bdev, ttm, ctx); in i915_ttm_move() [all …]
|
/linux-6.12.1/drivers/gpu/drm/nouveau/ |
D | nouveau_sgdma.c | 15 struct ttm_tt ttm; member 20 nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) in nouveau_sgdma_destroy() argument 22 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nouveau_sgdma_destroy() 24 if (ttm) { in nouveau_sgdma_destroy() 25 ttm_tt_fini(&nvbe->ttm); in nouveau_sgdma_destroy() 31 nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg) in nouveau_sgdma_bind() argument 33 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nouveau_sgdma_bind() 41 ret = nouveau_mem_host(reg, &nvbe->ttm); in nouveau_sgdma_bind() 58 nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm) in nouveau_sgdma_unbind() argument 60 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nouveau_sgdma_unbind() [all …]
|
D | nouveau_ttm.c | 170 drm->ttm.type_host[!!kind] = typei; in nouveau_ttm_init_host() 176 drm->ttm.type_ncoh[!!kind] = typei; in nouveau_ttm_init_host() 191 ttm_resource_manager_init(man, &drm->ttm.bdev, in nouveau_ttm_init_vram() 193 ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man); in nouveau_ttm_init_vram() 197 return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, false, in nouveau_ttm_init_vram() 205 struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); in nouveau_ttm_fini_vram() 209 ttm_resource_manager_evict_all(&drm->ttm.bdev, man); in nouveau_ttm_fini_vram() 211 ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL); in nouveau_ttm_fini_vram() 214 ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM); in nouveau_ttm_fini_vram() 229 return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, true, in nouveau_ttm_init_gtt() [all …]
|
D | nouveau_bo.c | 47 static int nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm, 49 static void nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm); 229 nvbo->bo.bdev = &drm->ttm.bdev; in nouveau_bo_alloc() 615 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_device() 651 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_cpu() 688 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru() 689 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); in nouveau_bo_add_io_reserve_lru() 690 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru() 698 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru() 700 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru() [all …]
|
D | nouveau_ttm.h | 8 return container_of(bd, struct nouveau_drm, ttm.bdev); in nouveau_bdev() 24 int nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg); 25 void nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm); 26 void nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
|
/linux-6.12.1/drivers/gpu/drm/xe/ |
D | xe_bo.c | 89 return resource_is_vram(bo->ttm.resource) || in xe_bo_is_vram() 90 resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource); in xe_bo_is_vram() 95 return bo->ttm.resource->mem_type == XE_PL_STOLEN; in xe_bo_is_stolen() 148 mgr = ttm_manager_type(&xe->ttm, res->mem_type); in res_to_mem_region() 174 vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram; in add_vram() 282 struct ttm_tt ttm; member 290 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_tt_map_sg() 321 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_tt_unmap_sg() 333 struct ttm_tt *tt = bo->ttm.ttm; in xe_bo_sg() 334 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_bo_sg() [all …]
|
D | xe_bo.h | 111 return container_of(bo, struct xe_bo, ttm); in ttm_to_xe_bo() 116 return container_of(obj, struct xe_bo, ttm.base); in gem_to_xe_bo() 119 #define xe_bo_device(bo) ttm_to_xe_device((bo)->ttm.bdev) 124 drm_gem_object_get(&bo->ttm.base); in xe_bo_get() 134 ttm_bo_set_bulk_move(&bo->ttm, NULL); in __xe_bo_unset_bulk_move() 140 dma_resv_assert_held((bo)->ttm.base.resv); in xe_bo_assert_held() 150 XE_WARN_ON(bo->vm && bo->ttm.base.resv != xe_vm_resv(bo->vm)); in xe_bo_unlock_vm_held() 154 dma_resv_unlock(bo->ttm.base.resv); in xe_bo_unlock_vm_held() 166 return bo->ttm.pin_count; in xe_bo_is_pinned() 236 return PAGE_ALIGN(bo->ttm.base.size); in xe_bo_ccs_pages_start() [all …]
|
D | xe_dma_buf.c | 61 if (xe_bo_is_pinned(bo) && bo->ttm.resource->placement != XE_PL_TT) { in xe_dma_buf_pin() 110 switch (bo->ttm.resource->mem_type) { in xe_dma_buf_map() 113 bo->ttm.ttm->pages, in xe_dma_buf_map() 114 bo->ttm.ttm->num_pages); in xe_dma_buf_map() 126 bo->ttm.resource, 0, in xe_dma_buf_map() 127 bo->ttm.base.size, attach->dev, in xe_dma_buf_map() 227 return &bo->ttm.base; in xe_dma_buf_init_obj() 299 attach = dma_buf_dynamic_attach(dma_buf, dev->dev, attach_ops, &bo->ttm.base); in xe_gem_prime_import()
|
D | xe_drm_client.c | 155 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_drm_client_remove_bo() 158 xe_assert(xe, !kref_read(&bo->ttm.base.refcount)); in xe_drm_client_remove_bo() 171 u32 mem_type = bo->ttm.resource->mem_type; in bo_meminfo() 175 if (drm_gem_object_is_shared_for_memory_stats(&bo->ttm.base)) in bo_meminfo() 183 if (!dma_resv_test_signaled(bo->ttm.base.resv, in bo_meminfo() 195 struct ttm_device *bdev = &xef->xe->ttm; in show_meminfo() 211 if (dma_resv_trylock(bo->ttm.base.resv)) { in show_meminfo() 231 if (!kref_get_unless_zero(&bo->ttm.base.refcount)) in show_meminfo() 234 if (dma_resv_trylock(bo->ttm.base.resv)) { in show_meminfo()
|
/linux-6.12.1/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_ttm.c | 69 struct ttm_tt *ttm, 72 struct ttm_tt *ttm); 244 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem); in amdgpu_ttm_map_buffer() 253 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT]; in amdgpu_ttm_map_buffer() 497 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); in amdgpu_bo_move() 506 bo->ttm == NULL)) { in amdgpu_bo_move() 525 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); in amdgpu_bo_move() 673 struct ttm_tt ttm; member 683 #define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm) 696 struct ttm_tt *ttm = bo->tbo.ttm; in amdgpu_ttm_tt_get_user_pages() local [all …]
|
D | amdgpu_ttm.h | 189 void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, 191 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, 200 static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, in amdgpu_ttm_tt_discard_user_pages() argument 204 static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, in amdgpu_ttm_tt_get_user_pages_done() argument 211 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); 216 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); 217 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); 218 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 220 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, 222 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm); [all …]
|
/linux-6.12.1/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_ttm_buffer.c | 267 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); in vmw_bo_sg_table() 274 struct ttm_tt *ttm, struct ttm_resource *bo_mem) in vmw_ttm_bind() argument 277 container_of(ttm, struct vmw_ttm_tt, dma_ttm); in vmw_ttm_bind() 296 ttm->num_pages, vmw_be->gmr_id); in vmw_ttm_bind() 301 vmw_mob_create(ttm->num_pages); in vmw_ttm_bind() 307 &vmw_be->vsgt, ttm->num_pages, in vmw_ttm_bind() 321 struct ttm_tt *ttm) in vmw_ttm_unbind() argument 324 container_of(ttm, struct vmw_ttm_tt, dma_ttm); in vmw_ttm_unbind() 348 static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) in vmw_ttm_destroy() argument 351 container_of(ttm, struct vmw_ttm_tt, dma_ttm); in vmw_ttm_destroy() [all …]
|
D | vmwgfx_blit.c | 559 bool src_external = (src->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0; in vmw_bo_cpu_blit() 560 bool dst_external = (dst->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0; in vmw_bo_cpu_blit() 571 if (!ttm_tt_is_populated(dst->ttm)) { in vmw_bo_cpu_blit() 572 ret = dst->bdev->funcs->ttm_tt_populate(dst->bdev, dst->ttm, &ctx); in vmw_bo_cpu_blit() 577 if (!ttm_tt_is_populated(src->ttm)) { in vmw_bo_cpu_blit() 578 ret = src->bdev->funcs->ttm_tt_populate(src->bdev, src->ttm, &ctx); in vmw_bo_cpu_blit() 588 if (!src->ttm->pages && src->ttm->sg) { in vmw_bo_cpu_blit() 589 src_pages = kvmalloc_array(src->ttm->num_pages, in vmw_bo_cpu_blit() 593 ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages, in vmw_bo_cpu_blit() 594 src->ttm->num_pages); in vmw_bo_cpu_blit() [all …]
|
/linux-6.12.1/drivers/gpu/drm/qxl/ |
D | qxl_ttm.c | 100 static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) in qxl_ttm_backend_destroy() argument 102 ttm_tt_fini(ttm); in qxl_ttm_backend_destroy() 103 kfree(ttm); in qxl_ttm_backend_destroy() 109 struct ttm_tt *ttm; in qxl_ttm_tt_create() local 111 ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); in qxl_ttm_tt_create() 112 if (ttm == NULL) in qxl_ttm_tt_create() 114 if (ttm_tt_init(ttm, bo, page_flags, ttm_cached, 0)) { in qxl_ttm_tt_create() 115 kfree(ttm); in qxl_ttm_tt_create() 118 return ttm; in qxl_ttm_tt_create() 161 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { in qxl_bo_move()
|
/linux-6.12.1/drivers/gpu/drm/xe/display/ |
D | intel_fb_bo.c | 29 struct xe_device *xe = to_xe_device(bo->ttm.base.dev); in intel_fb_bo_framebuffer_init() 42 ret = ttm_bo_reserve(&bo->ttm, true, false, NULL); in intel_fb_bo_framebuffer_init() 53 if (XE_IOCTL_DBG(xe, !list_empty(&bo->ttm.base.gpuva.list))) { in intel_fb_bo_framebuffer_init() 54 ttm_bo_unreserve(&bo->ttm); in intel_fb_bo_framebuffer_init() 60 ttm_bo_unreserve(&bo->ttm); in intel_fb_bo_framebuffer_init() 82 bo->ttm.type != ttm_bo_type_sg) { in intel_fb_bo_lookup_valid_bo()
|
D | xe_fb_pin.c | 88 u32 dpt_size, size = bo->ttm.base.size; in __xe_pin_fb_vma_dpt() 209 u32 x, size = bo->ttm.base.size; in __xe_pin_fb_vma_ggtt() 278 if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) && in __xe_pin_fb_vma() 299 ret = ttm_bo_reserve(&bo->ttm, false, false, NULL); in __xe_pin_fb_vma() 308 ttm_bo_pin(&bo->ttm); in __xe_pin_fb_vma() 309 ttm_bo_unreserve(&bo->ttm); in __xe_pin_fb_vma() 326 ttm_bo_reserve(&bo->ttm, false, false, NULL); in __xe_pin_fb_vma() 327 ttm_bo_unpin(&bo->ttm); in __xe_pin_fb_vma() 328 ttm_bo_unreserve(&bo->ttm); in __xe_pin_fb_vma() 342 ttm_bo_reserve(&vma->bo->ttm, false, false, NULL); in __xe_unpin_fb_vma() [all …]
|
/linux-6.12.1/drivers/gpu/drm/xe/tests/ |
D | xe_bo.c | 22 struct ttm_tt *ttm; in ccs_test_migrate() local 39 fence = xe_migrate_clear(tile->migrate, bo, bo->ttm.resource, in ccs_test_migrate() 56 timeout = dma_resv_wait_timeout(bo->ttm.base.resv, in ccs_test_migrate() 70 ttm = bo->ttm.ttm; in ccs_test_migrate() 71 if (!ttm || !ttm_tt_is_populated(ttm)) { in ccs_test_migrate() 77 if (ccs_page >= ttm->num_pages) { in ccs_test_migrate() 82 page = ttm->pages[ccs_page]; in ccs_test_migrate()
|
/linux-6.12.1/drivers/gpu/drm/ttm/tests/ |
D | ttm_kunit_helpers.c | 55 static void ttm_tt_simple_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) in ttm_tt_simple_destroy() argument 57 kfree(ttm); in ttm_tt_simple_destroy() 67 if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm)) { in mock_move() 116 struct ttm_device *ttm, in ttm_device_kunit_init_with_funcs() argument 124 err = ttm_device_init(ttm, funcs, drm->dev, in ttm_device_kunit_init_with_funcs() 142 struct ttm_device *ttm, in ttm_device_kunit_init() argument 146 return ttm_device_kunit_init_with_funcs(priv, ttm, use_dma_alloc, in ttm_device_kunit_init() 161 struct ttm_device *ttm, in ttm_device_kunit_init_bad_evict() argument 165 return ttm_device_kunit_init_with_funcs(priv, ttm, use_dma_alloc, in ttm_device_kunit_init_bad_evict()
|