Lines Matching +full:0 +full:xe
41 .fpfn = 0,
42 .lpfn = 0,
44 .flags = 0,
54 .fpfn = 0,
55 .lpfn = 0,
60 .fpfn = 0,
61 .lpfn = 0,
77 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res) in resource_is_stolen_vram() argument
79 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe); in resource_is_stolen_vram()
133 mem_type_to_migrate(struct xe_device *xe, u32 mem_type) in mem_type_to_migrate() argument
137 xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type)); in mem_type_to_migrate()
138 tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)]; in mem_type_to_migrate()
144 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in res_to_mem_region() local
147 xe_assert(xe, resource_is_vram(res)); in res_to_mem_region()
148 mgr = ttm_manager_type(&xe->ttm, res->mem_type); in res_to_mem_region()
152 static void try_add_system(struct xe_device *xe, struct xe_bo *bo, in try_add_system() argument
156 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_system()
165 static void add_vram(struct xe_device *xe, struct xe_bo *bo, in add_vram() argument
172 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in add_vram()
174 vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram; in add_vram()
175 xe_assert(xe, vram && vram->usable_size); in add_vram()
188 place.fpfn = 0; in add_vram()
198 static void try_add_vram(struct xe_device *xe, struct xe_bo *bo, in try_add_vram() argument
202 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c); in try_add_vram()
204 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c); in try_add_vram()
207 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo, in try_add_stolen() argument
211 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_stolen()
217 TTM_PL_FLAG_CONTIGUOUS : 0, in try_add_stolen()
223 static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, in __xe_bo_placement_for_flags() argument
226 u32 c = 0; in __xe_bo_placement_for_flags()
228 try_add_vram(xe, bo, bo_flags, &c); in __xe_bo_placement_for_flags()
229 try_add_system(xe, bo, bo_flags, &c); in __xe_bo_placement_for_flags()
230 try_add_stolen(xe, bo, bo_flags, &c); in __xe_bo_placement_for_flags()
240 return 0; in __xe_bo_placement_for_flags()
243 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, in xe_bo_placement_for_flags() argument
247 return __xe_bo_placement_for_flags(xe, bo, bo_flags); in xe_bo_placement_for_flags()
256 placement->num_placement = 0; in xe_evict_flags()
265 * For xe, sg bos that are evicted to system just triggers a in xe_evict_flags()
297 return 0; in xe_tt_map_sg()
300 num_pages, 0, in xe_tt_map_sg()
316 return 0; in xe_tt_map_sg()
325 DMA_BIDIRECTIONAL, 0); in xe_tt_unmap_sg()
343 struct xe_device *xe = xe_bo_device(bo); in xe_ttm_tt_create() local
353 tt->dev = xe->drm.dev; in xe_ttm_tt_create()
355 extra_pages = 0; in xe_ttm_tt_create()
357 extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size), in xe_ttm_tt_create()
366 if (!IS_DGFX(xe)) { in xe_ttm_tt_create()
385 (xe->info.graphics_verx100 >= 1270 && in xe_ttm_tt_create()
395 xe_assert(xe, bo->cpu_caching == 0); in xe_ttm_tt_create()
418 return 0; in xe_ttm_tt_populate()
446 struct xe_device *xe = ttm_to_xe_device(bdev); in xe_ttm_io_mem_reserve() local
451 return 0; in xe_ttm_io_mem_reserve()
474 return 0; in xe_ttm_io_mem_reserve()
476 return xe_ttm_stolen_io_mem_reserve(xe, mem); in xe_ttm_io_mem_reserve()
482 static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo, in xe_bo_trigger_rebind() argument
490 int ret = 0; in xe_bo_trigger_rebind()
525 if (timeout < 0) in xe_bo_trigger_rebind()
559 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_dmabuf() local
562 xe_assert(xe, attach); in xe_bo_move_dmabuf()
563 xe_assert(xe, ttm_bo->ttm); in xe_bo_move_dmabuf()
583 return 0; in xe_bo_move_dmabuf()
601 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
608 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_notify() local
622 ret = xe_bo_trigger_rebind(xe, bo, ctx); in xe_bo_move_notify()
636 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_bo_move_notify()
639 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_bo_move_notify()
642 return 0; in xe_bo_move_notify()
650 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move() local
660 bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) && in xe_bo_move()
662 int ret = 0; in xe_bo_move()
727 if (timeout < 0) { in xe_bo_move()
742 hop->fpfn = 0; in xe_bo_move()
743 hop->lpfn = 0; in xe_bo_move()
753 migrate = mem_type_to_migrate(xe, new_mem->mem_type); in xe_bo_move()
755 migrate = mem_type_to_migrate(xe, old_mem_type); in xe_bo_move()
757 migrate = xe->tiles[0].migrate; in xe_bo_move()
759 xe_assert(xe, migrate); in xe_bo_move()
761 if (xe_rpm_reclaim_safe(xe)) { in xe_bo_move()
766 xe_pm_runtime_get(xe); in xe_bo_move()
768 drm_WARN_ON(&xe->drm, handle_system_ccs); in xe_bo_move()
769 xe_pm_runtime_get_noresume(xe); in xe_bo_move()
794 xe_pm_runtime_put(xe); in xe_bo_move()
798 xe_assert(xe, new_mem->start == in xe_bo_move()
806 u32 flags = 0; in xe_bo_move()
820 xe_pm_runtime_put(xe); in xe_bo_move()
829 ret = 0; in xe_bo_move()
845 xe_pm_runtime_put(xe); in xe_bo_move()
864 * Return: 0 on success. Negative error code on failure.
890 return 0; in xe_bo_evict_pinned()
897 bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0); in xe_bo_evict_pinned()
916 return 0; in xe_bo_evict_pinned()
932 * Return: 0 on success. Negative error code on failure.
940 struct ttm_place *place = &bo->placements[0]; in xe_bo_restore_pinned()
958 return 0; in xe_bo_restore_pinned()
976 return 0; in xe_bo_restore_pinned()
994 xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor); in xe_ttm_io_mem_pfn()
1006 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_lock_in_destructor() local
1009 xe_assert(xe, !kref_read(&ttm_bo->kref)); in xe_ttm_bo_lock_in_destructor()
1020 xe_assert(xe, locked); in xe_ttm_bo_lock_in_destructor()
1109 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_destroy() local
1115 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list)); in xe_ttm_bo_destroy()
1128 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_ttm_bo_destroy()
1131 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_ttm_bo_destroy()
1173 struct xe_device *xe = to_xe_device(ddev); in xe_gem_fault() local
1180 xe_pm_runtime_get(xe); in xe_gem_fault()
1202 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_gem_fault()
1204 list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list); in xe_gem_fault()
1205 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_gem_fault()
1211 xe_pm_runtime_put(xe); in xe_gem_fault()
1265 struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, in ___xe_bo_create_locked() argument
1281 xe_assert(xe, !tile || type == ttm_bo_type_kernel); in ___xe_bo_create_locked()
1290 ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) || in ___xe_bo_create_locked()
1327 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size); in ___xe_bo_create_locked()
1335 err = __xe_bo_placement_for_flags(xe, bo, bo->flags); in ___xe_bo_create_locked()
1346 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type, in ___xe_bo_create_locked()
1375 if (timeout < 0) { in ___xe_bo_create_locked()
1392 static int __xe_bo_fixed_placement(struct xe_device *xe, in __xe_bo_fixed_placement() argument
1418 /* 0 or multiple of the above set */ in __xe_bo_fixed_placement()
1427 return 0; in __xe_bo_fixed_placement()
1431 __xe_bo_create_locked(struct xe_device *xe, in __xe_bo_create_locked() argument
1442 if (start || end != ~0ULL) { in __xe_bo_create_locked()
1448 err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size); in __xe_bo_create_locked()
1455 bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL, in __xe_bo_create_locked()
1476 tile = xe_device_get_root_tile(xe); in __xe_bo_create_locked()
1478 xe_assert(xe, tile); in __xe_bo_create_locked()
1500 xe_bo_create_locked_range(struct xe_device *xe, in xe_bo_create_locked_range() argument
1505 return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags); in xe_bo_create_locked_range()
1508 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_locked() argument
1512 return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags); in xe_bo_create_locked()
1515 struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_user() argument
1520 struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, in xe_bo_create_user()
1529 struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create() argument
1533 struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags); in xe_bo_create()
1541 struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_pin_map_at() argument
1548 u64 start = offset == ~0ull ? 0 : offset; in xe_bo_create_pin_map_at()
1549 u64 end = offset == ~0ull ? offset : start + size; in xe_bo_create_pin_map_at()
1552 xe_ttm_stolen_cpu_access_needs_ggtt(xe)) in xe_bo_create_pin_map_at()
1555 bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type, in xe_bo_create_pin_map_at()
1580 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_pin_map() argument
1584 return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags); in xe_bo_create_pin_map()
1587 struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_from_data() argument
1591 struct xe_bo *bo = xe_bo_create_pin_map(xe, tile, NULL, in xe_bo_create_from_data()
1597 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_bo_create_from_data()
1607 struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, in xe_managed_bo_create_pin_map() argument
1613 bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags); in xe_managed_bo_create_pin_map()
1617 ret = devm_add_action_or_reset(xe->drm.dev, __xe_bo_unpin_map_no_vm, bo); in xe_managed_bo_create_pin_map()
1624 struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, in xe_managed_bo_create_from_data() argument
1627 struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags); in xe_managed_bo_create_from_data()
1632 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_managed_bo_create_from_data()
1639 * @xe: xe device
1648 * Returns 0 for success, negative error code otherwise.
1650 int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src) in xe_managed_bo_reinit_in_vram() argument
1657 xe_assert(xe, IS_DGFX(xe)); in xe_managed_bo_reinit_in_vram()
1658 xe_assert(xe, !(*src)->vmap.is_iomem); in xe_managed_bo_reinit_in_vram()
1660 bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, in xe_managed_bo_reinit_in_vram()
1665 devm_release_action(xe->drm.dev, __xe_bo_unpin_map_no_vm, *src); in xe_managed_bo_reinit_in_vram()
1668 return 0; in xe_managed_bo_reinit_in_vram()
1677 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in vram_region_gpu_offset() local
1680 return xe_ttm_stolen_gpu_offset(xe); in vram_region_gpu_offset()
1693 * Returns 0 for success, negative error code otherwise.
1697 struct xe_device *xe = xe_bo_device(bo); in xe_bo_pin_external() local
1700 xe_assert(xe, !bo->vm); in xe_bo_pin_external()
1701 xe_assert(xe, xe_bo_is_user(bo)); in xe_bo_pin_external()
1709 spin_lock(&xe->pinned.lock); in xe_bo_pin_external()
1711 &xe->pinned.external_vram); in xe_bo_pin_external()
1712 spin_unlock(&xe->pinned.lock); in xe_bo_pin_external()
1724 return 0; in xe_bo_pin_external()
1729 struct ttm_place *place = &bo->placements[0]; in xe_bo_pin()
1730 struct xe_device *xe = xe_bo_device(bo); in xe_bo_pin() local
1734 xe_assert(xe, !xe_bo_is_user(bo)); in xe_bo_pin()
1737 xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED | in xe_bo_pin()
1744 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_pin()
1747 xe_assert(xe, !xe_bo_is_pinned(bo)); in xe_bo_pin()
1758 if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) && in xe_bo_pin()
1761 xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS); in xe_bo_pin()
1763 place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) - in xe_bo_pin()
1770 spin_lock(&xe->pinned.lock); in xe_bo_pin()
1771 list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present); in xe_bo_pin()
1772 spin_unlock(&xe->pinned.lock); in xe_bo_pin()
1783 return 0; in xe_bo_pin()
1794 * Returns 0 for success, negative error code otherwise.
1798 struct xe_device *xe = xe_bo_device(bo); in xe_bo_unpin_external() local
1800 xe_assert(xe, !bo->vm); in xe_bo_unpin_external()
1801 xe_assert(xe, xe_bo_is_pinned(bo)); in xe_bo_unpin_external()
1802 xe_assert(xe, xe_bo_is_user(bo)); in xe_bo_unpin_external()
1804 spin_lock(&xe->pinned.lock); in xe_bo_unpin_external()
1807 spin_unlock(&xe->pinned.lock); in xe_bo_unpin_external()
1820 struct ttm_place *place = &bo->placements[0]; in xe_bo_unpin()
1821 struct xe_device *xe = xe_bo_device(bo); in xe_bo_unpin() local
1823 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_unpin()
1824 xe_assert(xe, xe_bo_is_pinned(bo)); in xe_bo_unpin()
1827 spin_lock(&xe->pinned.lock); in xe_bo_unpin()
1828 xe_assert(xe, !list_empty(&bo->pinned_link)); in xe_bo_unpin()
1830 spin_unlock(&xe->pinned.lock); in xe_bo_unpin()
1848 * Return: 0 on success, negative error code on failure. May return
1885 struct xe_device *xe = xe_bo_device(bo); in __xe_bo_addr() local
1889 xe_assert(xe, page_size <= PAGE_SIZE); in __xe_bo_addr()
1894 xe_assert(xe, bo->ttm.ttm); in __xe_bo_addr()
1927 return 0; in xe_bo_vmap()
1936 ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap); in xe_bo_vmap()
1946 return 0; in xe_bo_vmap()
1966 struct xe_device *xe = to_xe_device(dev); in xe_gem_create_ioctl() local
1975 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_gem_create_ioctl()
1976 XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || in xe_gem_create_ioctl()
1977 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_create_ioctl()
1981 if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) || in xe_gem_create_ioctl()
1985 if (XE_IOCTL_DBG(xe, args->flags & in xe_gem_create_ioctl()
1991 if (XE_IOCTL_DBG(xe, args->handle)) in xe_gem_create_ioctl()
1994 if (XE_IOCTL_DBG(xe, !args->size)) in xe_gem_create_ioctl()
1997 if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX)) in xe_gem_create_ioctl()
2000 if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK)) in xe_gem_create_ioctl()
2003 bo_flags = 0; in xe_gem_create_ioctl()
2015 !(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) && in xe_gem_create_ioctl()
2020 if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK))) in xe_gem_create_ioctl()
2026 if (XE_IOCTL_DBG(xe, !args->cpu_caching || in xe_gem_create_ioctl()
2030 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK && in xe_gem_create_ioctl()
2034 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT && in xe_gem_create_ioctl()
2040 if (XE_IOCTL_DBG(xe, !vm)) in xe_gem_create_ioctl()
2047 bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching, in xe_gem_create_ioctl()
2083 struct xe_device *xe = to_xe_device(dev); in xe_gem_mmap_offset_ioctl() local
2087 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_gem_mmap_offset_ioctl()
2088 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_mmap_offset_ioctl()
2091 if (XE_IOCTL_DBG(xe, args->flags)) in xe_gem_mmap_offset_ioctl()
2095 if (XE_IOCTL_DBG(xe, !gem_obj)) in xe_gem_mmap_offset_ioctl()
2102 return 0; in xe_gem_mmap_offset_ioctl()
2113 * Return: 0 on success, -EINTR if @intr is true and the wait for a
2115 * function always returns 0.
2124 return 0; in xe_bo_lock()
2163 for (cur_place = 0; cur_place < bo->placement.num_placement; in xe_bo_can_migrate()
2174 memset(place, 0, sizeof(*place)); in xe_place_from_ttm_type()
2190 * Return: 0 on success. Negative error code on failure. In particular may
2195 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_migrate() local
2206 return 0; in xe_bo_migrate()
2222 drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN); in xe_bo_migrate()
2225 u32 c = 0; in xe_bo_migrate()
2227 add_vram(xe, bo, &requested, bo->flags, mem_type, &c); in xe_bo_migrate()
2241 * Return: 0 on success. Negative error code on failure.
2261 return 0; in xe_bo_evict()
2273 struct xe_device *xe = xe_bo_device(bo); in xe_bo_needs_ccs_pages() local
2275 if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)) in xe_bo_needs_ccs_pages()
2278 if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device) in xe_bo_needs_ccs_pages()
2286 if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM)) in xe_bo_needs_ccs_pages()
2354 struct xe_device *xe = to_xe_device(dev); in xe_bo_dumb_create() local
2360 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K); in xe_bo_dumb_create()
2366 bo = xe_bo_create_user(xe, NULL, NULL, args->size, in xe_bo_dumb_create()
2368 XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | in xe_bo_dumb_create()