Lines Matching +full:row +full:- +full:hold

26 #include <linux/dma-fence-array.h>
55 if (kref_read(&vma->vm->ref)) in assert_vma_held_evict()
56 assert_object_held_shared(vma->obj); in assert_vma_held_evict()
79 if (!vma->node.stack) { in vma_print_allocator()
80 drm_dbg(vma->obj->base.dev, in vma_print_allocator()
82 vma->node.start, vma->node.size, reason); in vma_print_allocator()
86 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); in vma_print_allocator()
87 drm_dbg(vma->obj->base.dev, in vma_print_allocator()
89 vma->node.start, vma->node.size, reason, buf); in vma_print_allocator()
110 return -ENOENT; in __i915_vma_active()
124 intel_gt_pm_get_untracked(vma->vm->gt); in __i915_vma_active()
139 intel_gt_pm_put_async_untracked(vma->vm->gt); in __i915_vma_retire()
150 struct i915_vma *pos = ERR_PTR(-E2BIG); in vma_create()
156 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); in vma_create()
160 return ERR_PTR(-ENOMEM); in vma_create()
162 vma->ops = &vm->vma_ops; in vma_create()
163 vma->obj = obj; in vma_create()
164 vma->size = obj->base.size; in vma_create()
165 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; in vma_create()
167 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0); in vma_create()
172 might_lock(&vma->active.mutex); in vma_create()
176 INIT_LIST_HEAD(&vma->closed_link); in vma_create()
177 INIT_LIST_HEAD(&vma->obj_link); in vma_create()
178 RB_CLEAR_NODE(&vma->obj_node); in vma_create()
180 if (view && view->type != I915_GTT_VIEW_NORMAL) { in vma_create()
181 vma->gtt_view = *view; in vma_create()
182 if (view->type == I915_GTT_VIEW_PARTIAL) { in vma_create()
184 view->partial.offset, in vma_create()
185 view->partial.size, in vma_create()
186 obj->base.size >> PAGE_SHIFT)); in vma_create()
187 vma->size = view->partial.size; in vma_create()
188 vma->size <<= PAGE_SHIFT; in vma_create()
189 GEM_BUG_ON(vma->size > obj->base.size); in vma_create()
190 } else if (view->type == I915_GTT_VIEW_ROTATED) { in vma_create()
191 vma->size = intel_rotation_info_size(&view->rotated); in vma_create()
192 vma->size <<= PAGE_SHIFT; in vma_create()
193 } else if (view->type == I915_GTT_VIEW_REMAPPED) { in vma_create()
194 vma->size = intel_remapped_info_size(&view->remapped); in vma_create()
195 vma->size <<= PAGE_SHIFT; in vma_create()
199 if (unlikely(vma->size > vm->total)) in vma_create()
202 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); in vma_create()
204 err = mutex_lock_interruptible(&vm->mutex); in vma_create()
210 vma->vm = vm; in vma_create()
211 list_add_tail(&vma->vm_link, &vm->unbound_list); in vma_create()
213 spin_lock(&obj->vma.lock); in vma_create()
215 if (unlikely(overflows_type(vma->size, u32))) in vma_create()
218 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, in vma_create()
221 if (unlikely(vma->fence_size < vma->size || /* overflow */ in vma_create()
222 vma->fence_size > vm->total)) in vma_create()
225 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); in vma_create()
227 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, in vma_create()
230 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); in vma_create()
236 p = &obj->vma.tree.rb_node; in vma_create()
250 p = &rb->rb_right; in vma_create()
252 p = &rb->rb_left; in vma_create()
256 rb_link_node(&vma->obj_node, rb, p); in vma_create()
257 rb_insert_color(&vma->obj_node, &obj->vma.tree); in vma_create()
261 * We put the GGTT vma at the start of the vma-list, followed in vma_create()
266 list_add(&vma->obj_link, &obj->vma.list); in vma_create()
268 list_add_tail(&vma->obj_link, &obj->vma.list); in vma_create()
270 spin_unlock(&obj->vma.lock); in vma_create()
271 mutex_unlock(&vm->mutex); in vma_create()
276 spin_unlock(&obj->vma.lock); in vma_create()
277 list_del_init(&vma->vm_link); in vma_create()
278 mutex_unlock(&vm->mutex); in vma_create()
291 rb = obj->vma.tree.rb_node; in i915_vma_lookup()
301 rb = rb->rb_right; in i915_vma_lookup()
303 rb = rb->rb_left; in i915_vma_lookup()
310 * i915_vma_instance - return the singleton instance of the VMA
330 GEM_BUG_ON(!kref_read(&vm->ref)); in i915_vma_instance()
332 spin_lock(&obj->vma.lock); in i915_vma_instance()
334 spin_unlock(&obj->vma.lock); in i915_vma_instance()
358 struct i915_vma_resource *vma_res = vw->vma_res; in __vma_bind()
366 if (i915_gem_object_has_unknown_state(vw->obj)) in __vma_bind()
369 vma_res->ops->bind_vma(vma_res->vm, &vw->stash, in __vma_bind()
370 vma_res, vw->pat_index, vw->flags); in __vma_bind()
377 if (vw->obj) in __vma_release()
378 i915_gem_object_put(vw->obj); in __vma_release()
380 i915_vm_free_pt_stash(vw->vm, &vw->stash); in __vma_release()
381 if (vw->vma_res) in __vma_release()
382 i915_vma_resource_put(vw->vma_res); in __vma_release()
399 dma_fence_work_init(&vw->base, &bind_ops); in i915_vma_work()
400 vw->base.dma.error = -EAGAIN; /* disable the worker by default */ in i915_vma_work()
409 if (rcu_access_pointer(vma->active.excl.fence)) { in i915_vma_wait_for_bind()
413 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); in i915_vma_wait_for_bind()
427 struct dma_fence *fence = i915_active_fence_get(&vma->active.excl); in i915_vma_verify_bind_complete()
434 err = fence->error; in i915_vma_verify_bind_complete()
436 err = -EBUSY; in i915_vma_verify_bind_complete()
450 struct drm_i915_gem_object *obj = vma->obj; in i915_vma_resource_init_from_vma()
452 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes, in i915_vma_resource_init_from_vma()
453 obj->mm.rsgt, i915_gem_object_is_readonly(obj), in i915_vma_resource_init_from_vma()
454 i915_gem_object_is_lmem(obj), obj->mm.region, in i915_vma_resource_init_from_vma()
455 vma->ops, vma->private, __i915_vma_offset(vma), in i915_vma_resource_init_from_vma()
456 __i915_vma_size(vma), vma->size, vma->guard); in i915_vma_resource_init_from_vma()
460 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
468 * DMA addresses are taken from the scatter-gather table of this object (or of
469 * this VMA in case of non-default GGTT views) and PTE entries set up.
482 lockdep_assert_held(&vma->vm->mutex); in i915_vma_bind()
483 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); in i915_vma_bind()
484 GEM_BUG_ON(vma->size > i915_vma_size(vma)); in i915_vma_bind()
486 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, in i915_vma_bind()
487 vma->node.size, in i915_vma_bind()
488 vma->vm->total))) { in i915_vma_bind()
490 return -ENODEV; in i915_vma_bind()
495 return -EINVAL; in i915_vma_bind()
501 vma_flags = atomic_read(&vma->flags); in i915_vma_bind()
510 GEM_BUG_ON(!atomic_read(&vma->pages_count)); in i915_vma_bind()
513 if (work && bind_flags & vma->vm->bind_async_flags) in i915_vma_bind()
514 ret = i915_vma_resource_bind_dep_await(vma->vm, in i915_vma_bind()
515 &work->base.chain, in i915_vma_bind()
516 vma->node.start, in i915_vma_bind()
517 vma->node.size, in i915_vma_bind()
523 ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start, in i915_vma_bind()
524 vma->node.size, true); in i915_vma_bind()
530 if (vma->resource || !vma_res) { in i915_vma_bind()
536 vma->resource = vma_res; in i915_vma_bind()
539 if (work && bind_flags & vma->vm->bind_async_flags) { in i915_vma_bind()
542 work->vma_res = i915_vma_resource_get(vma->resource); in i915_vma_bind()
543 work->pat_index = pat_index; in i915_vma_bind()
544 work->flags = bind_flags; in i915_vma_bind()
552 * part of the obj->resv->excl_fence as it only affects in i915_vma_bind()
555 prev = i915_active_set_exclusive(&vma->active, &work->base.dma); in i915_vma_bind()
557 __i915_sw_fence_await_dma_fence(&work->base.chain, in i915_vma_bind()
559 &work->cb); in i915_vma_bind()
563 work->base.dma.error = 0; /* enable the queue_work() */ in i915_vma_bind()
564 work->obj = i915_gem_object_get(vma->obj); in i915_vma_bind()
566 ret = i915_gem_object_wait_moving_fence(vma->obj, true); in i915_vma_bind()
568 i915_vma_resource_free(vma->resource); in i915_vma_bind()
569 vma->resource = NULL; in i915_vma_bind()
573 vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index, in i915_vma_bind()
577 atomic_or(bind_flags, &vma->flags); in i915_vma_bind()
586 if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY)) in i915_vma_pin_iomap()
587 return IOMEM_ERR_PTR(-EINVAL); in i915_vma_pin_iomap()
593 ptr = READ_ONCE(vma->iomap); in i915_vma_pin_iomap()
597 * instead, which already supports mapping non-contiguous chunks in i915_vma_pin_iomap()
601 if (i915_gem_object_is_lmem(vma->obj)) { in i915_vma_pin_iomap()
602 ptr = i915_gem_object_lmem_io_map(vma->obj, 0, in i915_vma_pin_iomap()
603 vma->obj->base.size); in i915_vma_pin_iomap()
605 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, in i915_vma_pin_iomap()
610 i915_gem_object_pin_map(vma->obj, I915_MAP_WC); in i915_vma_pin_iomap()
619 err = -ENOMEM; in i915_vma_pin_iomap()
623 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { in i915_vma_pin_iomap()
625 __i915_gem_object_release_map(vma->obj); in i915_vma_pin_iomap()
628 ptr = vma->iomap; in i915_vma_pin_iomap()
652 intel_gt_flush_ggtt_writes(vma->vm->gt); in i915_vma_flush_writes()
657 GEM_BUG_ON(vma->iomap == NULL); in i915_vma_unpin_iomap()
676 obj = vma->obj; in i915_vma_unpin_and_release()
690 if (!drm_mm_node_allocated(&vma->node)) in i915_vma_misplaced()
715 vma->guard < (flags & PIN_OFFSET_MASK)) in i915_vma_misplaced()
726 GEM_BUG_ON(!vma->fence_size); in __i915_vma_set_map_and_fenceable()
728 fenceable = (i915_vma_size(vma) >= vma->fence_size && in __i915_vma_set_map_and_fenceable()
729 IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment)); in __i915_vma_set_map_and_fenceable()
731 mappable = i915_ggtt_offset(vma) + vma->fence_size <= in __i915_vma_set_map_and_fenceable()
732 i915_vm_to_ggtt(vma->vm)->mappable_end; in __i915_vma_set_map_and_fenceable()
742 struct drm_mm_node *node = &vma->node; in i915_gem_valid_gtt_space()
752 if (!i915_vm_has_cache_coloring(vma->vm)) in i915_gem_valid_gtt_space()
757 GEM_BUG_ON(list_empty(&node->node_list)); in i915_gem_valid_gtt_space()
773 * i915_vma_insert - finds a slot for the vma in its address space
796 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); in i915_vma_insert()
799 size = max(size, vma->size); in i915_vma_insert()
800 alignment = max_t(typeof(alignment), alignment, vma->display_alignment); in i915_vma_insert()
802 size = max_t(typeof(size), size, vma->fence_size); in i915_vma_insert()
804 alignment, vma->fence_alignment); in i915_vma_insert()
811 guard = vma->guard; /* retain guard across rebinds */ in i915_vma_insert()
826 end = vma->vm->total; in i915_vma_insert()
828 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); in i915_vma_insert()
830 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); in i915_vma_insert()
833 alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj)); in i915_vma_insert()
840 if (size > end - 2 * guard) { in i915_vma_insert()
841 drm_dbg(vma->obj->base.dev, in i915_vma_insert()
844 return -ENOSPC; in i915_vma_insert()
849 if (i915_vm_has_cache_coloring(vma->vm)) in i915_vma_insert()
850 color = vma->obj->pat_index; in i915_vma_insert()
856 return -EINVAL; in i915_vma_insert()
861 * of the vma->node due to the guard pages. in i915_vma_insert()
863 if (offset < guard || offset + size > end - guard) in i915_vma_insert()
864 return -ENOSPC; in i915_vma_insert()
866 ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node, in i915_vma_insert()
868 offset - guard, in i915_vma_insert()
882 if (upper_32_bits(end - 1) && in i915_vma_insert()
883 vma->page_sizes.sg > I915_GTT_PAGE_SIZE && in i915_vma_insert()
884 !HAS_64K_PAGES(vma->vm->i915)) { in i915_vma_insert()
886 * We can't mix 64K and 4K PTEs in the same page-table in i915_vma_insert()
892 rounddown_pow_of_two(vma->page_sizes.sg | in i915_vma_insert()
898 * also checks that we exclude the aliasing-ppgtt. in i915_vma_insert()
904 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) in i915_vma_insert()
908 ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node, in i915_vma_insert()
914 GEM_BUG_ON(vma->node.start < start); in i915_vma_insert()
915 GEM_BUG_ON(vma->node.start + vma->node.size > end); in i915_vma_insert()
917 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); in i915_vma_insert()
920 list_move_tail(&vma->vm_link, &vma->vm->bound_list); in i915_vma_insert()
921 vma->guard = guard; in i915_vma_insert()
929 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); in i915_vma_detach()
934 * vma, we can drop its hold on the backing storage and allow in i915_vma_detach()
937 list_move_tail(&vma->vm_link, &vma->vm->unbound_list); in i915_vma_detach()
944 bound = atomic_read(&vma->flags); in try_qad_pin()
962 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); in try_qad_pin()
973 unsigned int column, row; in rotate_pages() local
979 src_idx = src_stride * (height - 1) + column + offset; in rotate_pages()
980 for (row = 0; row < height; row++) { in rotate_pages()
981 st->nents++; in rotate_pages()
992 src_idx -= src_stride; in rotate_pages()
995 left = (dst_stride - height) * I915_GTT_PAGE_SIZE; in rotate_pages()
1000 st->nents++; in rotate_pages()
1021 struct drm_i915_private *i915 = to_i915(obj->base.dev); in intel_rotate_pages()
1024 int ret = -ENOMEM; in intel_rotate_pages()
1036 st->nents = 0; in intel_rotate_pages()
1037 sg = st->sgl; in intel_rotate_pages()
1039 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) in intel_rotate_pages()
1040 sg = rotate_pages(obj, rot_info->plane[i].offset, in intel_rotate_pages()
1041 rot_info->plane[i].width, rot_info->plane[i].height, in intel_rotate_pages()
1042 rot_info->plane[i].src_stride, in intel_rotate_pages()
1043 rot_info->plane[i].dst_stride, in intel_rotate_pages()
1052 …drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)… in intel_rotate_pages()
1053 obj->base.size, rot_info->plane[0].width, in intel_rotate_pages()
1054 rot_info->plane[0].height, size); in intel_rotate_pages()
1063 st->nents++; in add_padding_pages()
1086 unsigned int row; in remap_tiled_color_plane_pages() local
1094 for (row = 0; row < height; row++) { in remap_tiled_color_plane_pages()
1111 st->nents++; in remap_tiled_color_plane_pages()
1119 left -= length; in remap_tiled_color_plane_pages()
1122 offset += src_stride - width; in remap_tiled_color_plane_pages()
1124 left = (dst_stride - width) * I915_GTT_PAGE_SIZE; in remap_tiled_color_plane_pages()
1152 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), in remap_contiguous_pages()
1159 st->nents++; in remap_contiguous_pages()
1160 count -= len >> PAGE_SHIFT; in remap_contiguous_pages()
1200 if (rem_info->plane_alignment) in remap_color_plane_pages()
1201 alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset; in remap_color_plane_pages()
1203 if (rem_info->plane[color_plane].linear) in remap_color_plane_pages()
1205 rem_info->plane[color_plane].offset, in remap_color_plane_pages()
1207 rem_info->plane[color_plane].size, in remap_color_plane_pages()
1213 rem_info->plane[color_plane].offset, in remap_color_plane_pages()
1215 rem_info->plane[color_plane].width, in remap_color_plane_pages()
1216 rem_info->plane[color_plane].height, in remap_color_plane_pages()
1217 rem_info->plane[color_plane].src_stride, in remap_color_plane_pages()
1218 rem_info->plane[color_plane].dst_stride, in remap_color_plane_pages()
1230 struct drm_i915_private *i915 = to_i915(obj->base.dev); in intel_remap_pages()
1234 int ret = -ENOMEM; in intel_remap_pages()
1246 st->nents = 0; in intel_remap_pages()
1247 sg = st->sgl; in intel_remap_pages()
1249 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) in intel_remap_pages()
1260 …drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages… in intel_remap_pages()
1261 obj->base.size, rem_info->plane[0].width, in intel_remap_pages()
1262 rem_info->plane[0].height, size); in intel_remap_pages()
1273 unsigned int count = view->partial.size; in intel_partial_pages()
1274 int ret = -ENOMEM; in intel_partial_pages()
1284 st->nents = 0; in intel_partial_pages()
1286 sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl); in intel_partial_pages()
1305 * The vma->pages are only valid within the lifespan of the borrowed in __i915_vma_get_pages()
1306 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so in __i915_vma_get_pages()
1307 * must be the vma->pages. A simple rule is that vma->pages must only in __i915_vma_get_pages()
1308 * be accessed when the obj->mm.pages are pinned. in __i915_vma_get_pages()
1310 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); in __i915_vma_get_pages()
1312 switch (vma->gtt_view.type) { in __i915_vma_get_pages()
1314 GEM_BUG_ON(vma->gtt_view.type); in __i915_vma_get_pages()
1317 pages = vma->obj->mm.pages; in __i915_vma_get_pages()
1322 intel_rotate_pages(&vma->gtt_view.rotated, vma->obj); in __i915_vma_get_pages()
1327 intel_remap_pages(&vma->gtt_view.remapped, vma->obj); in __i915_vma_get_pages()
1331 pages = intel_partial_pages(&vma->gtt_view, vma->obj); in __i915_vma_get_pages()
1336 drm_err(&vma->vm->i915->drm, in __i915_vma_get_pages()
1338 vma->gtt_view.type, PTR_ERR(pages)); in __i915_vma_get_pages()
1342 vma->pages = pages; in __i915_vma_get_pages()
1351 if (atomic_add_unless(&vma->pages_count, 1, 0)) in i915_vma_get_pages()
1354 err = i915_gem_object_pin_pages(vma->obj); in i915_vma_get_pages()
1362 vma->page_sizes = vma->obj->mm.page_sizes; in i915_vma_get_pages()
1363 atomic_inc(&vma->pages_count); in i915_vma_get_pages()
1368 __i915_gem_object_unpin_pages(vma->obj); in i915_vma_get_pages()
1389 for_each_gt(gt, vm->i915, id) in vma_invalidate_tlb()
1397 GEM_BUG_ON(atomic_read(&vma->pages_count) < count); in __vma_put_pages()
1399 if (atomic_sub_return(count, &vma->pages_count) == 0) { in __vma_put_pages()
1400 if (vma->pages != vma->obj->mm.pages) { in __vma_put_pages()
1401 sg_free_table(vma->pages); in __vma_put_pages()
1402 kfree(vma->pages); in __vma_put_pages()
1404 vma->pages = NULL; in __vma_put_pages()
1406 i915_gem_object_unpin_pages(vma->obj); in __vma_put_pages()
1412 if (atomic_add_unless(&vma->pages_count, -1, 1)) in i915_vma_put_pages()
1422 lockdep_assert_held(&vma->vm->mutex); in vma_unbind_pages()
1425 count = atomic_read(&vma->pages_count); in vma_unbind_pages()
1459 * In case of a global GTT, we must hold a runtime-pm wakeref in i915_vma_pin_ww()
1460 * while global PTEs are updated. In other cases, we hold in i915_vma_pin_ww()
1463 * vm->mutex, get the first rpm wakeref outside of the mutex. in i915_vma_pin_ww()
1465 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); in i915_vma_pin_ww()
1467 if (flags & vma->vm->bind_async_flags) { in i915_vma_pin_ww()
1469 err = i915_vm_lock_objects(vma->vm, ww); in i915_vma_pin_ww()
1475 err = -ENOMEM; in i915_vma_pin_ww()
1479 work->vm = vma->vm; in i915_vma_pin_ww()
1481 err = i915_gem_object_get_moving_fence(vma->obj, &moving); in i915_vma_pin_ww()
1485 dma_fence_work_chain(&work->base, moving); in i915_vma_pin_ww()
1488 if (vma->vm->allocate_va_range) { in i915_vma_pin_ww()
1489 err = i915_vm_alloc_pt_stash(vma->vm, in i915_vma_pin_ww()
1490 &work->stash, in i915_vma_pin_ww()
1491 vma->size); in i915_vma_pin_ww()
1495 err = i915_vm_map_pt_stash(vma->vm, &work->stash); in i915_vma_pin_ww()
1508 * Differentiate between user/kernel vma inside the aliasing-ppgtt. in i915_vma_pin_ww()
1511 * aliasing-ppgtt, but it is still vitally important to try and in i915_vma_pin_ww()
1514 * inversions when we have to evict them the mmu_notifier callbacks - in i915_vma_pin_ww()
1520 * NB this may cause us to mask real lock inversions -- while the in i915_vma_pin_ww()
1524 err = mutex_lock_interruptible_nested(&vma->vm->mutex, in i915_vma_pin_ww()
1529 /* No more allocations allowed now we hold vm->mutex */ in i915_vma_pin_ww()
1532 err = -ENOENT; in i915_vma_pin_ww()
1536 bound = atomic_read(&vma->flags); in i915_vma_pin_ww()
1538 err = -ENOMEM; in i915_vma_pin_ww()
1543 err = -EAGAIN; /* pins are meant to be fairly temporary */ in i915_vma_pin_ww()
1553 err = i915_active_acquire(&vma->active); in i915_vma_pin_ww()
1562 if (i915_is_ggtt(vma->vm)) in i915_vma_pin_ww()
1566 GEM_BUG_ON(!vma->pages); in i915_vma_pin_ww()
1568 vma->obj->pat_index, in i915_vma_pin_ww()
1576 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); in i915_vma_pin_ww()
1577 list_move_tail(&vma->vm_link, &vma->vm->bound_list); in i915_vma_pin_ww()
1589 drm_mm_remove_node(&vma->node); in i915_vma_pin_ww()
1592 i915_active_release(&vma->active); in i915_vma_pin_ww()
1594 mutex_unlock(&vma->vm->mutex); in i915_vma_pin_ww()
1599 dma_fence_work_commit_imm(&work->base); in i915_vma_pin_ww()
1601 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); in i915_vma_pin_ww()
1624 struct i915_address_space *vm = vma->vm; in __i915_ggtt_pin()
1632 if (err != -ENOSPC) { in __i915_ggtt_pin()
1642 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) in __i915_ggtt_pin()
1644 if (mutex_lock_interruptible(&vm->mutex) == 0) { in __i915_ggtt_pin()
1651 mutex_unlock(&vm->mutex); in __i915_ggtt_pin()
1667 lockdep_assert_not_held(&vma->obj->base.resv->lock.base); in i915_ggtt_pin()
1670 err = i915_gem_object_lock(vma->obj, &_ww); in i915_ggtt_pin()
1679 * i915_ggtt_clear_scanout - Clear scanout flag for all objects ggtt vmas
1690 spin_lock(&obj->vma.lock); in i915_ggtt_clear_scanout()
1693 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; in i915_ggtt_clear_scanout()
1695 spin_unlock(&obj->vma.lock); in i915_ggtt_clear_scanout()
1713 list_add(&vma->closed_link, &gt->closed_vma); in __vma_close()
1718 struct intel_gt *gt = vma->vm->gt; in i915_vma_close()
1724 GEM_BUG_ON(!atomic_read(&vma->open_count)); in i915_vma_close()
1725 if (atomic_dec_and_lock_irqsave(&vma->open_count, in i915_vma_close()
1726 &gt->closed_lock, in i915_vma_close()
1729 spin_unlock_irqrestore(&gt->closed_lock, flags); in i915_vma_close()
1735 list_del_init(&vma->closed_link); in __i915_vma_remove_closed()
1740 struct intel_gt *gt = vma->vm->gt; in i915_vma_reopen()
1742 spin_lock_irq(&gt->closed_lock); in i915_vma_reopen()
1745 spin_unlock_irq(&gt->closed_lock); in i915_vma_reopen()
1750 if (!drm_mm_node_allocated(&vma->node)) in force_unbind()
1753 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); in force_unbind()
1755 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); in force_unbind()
1761 struct drm_i915_gem_object *obj = vma->obj; in release_references()
1765 spin_lock(&obj->vma.lock); in release_references()
1766 list_del(&vma->obj_link); in release_references()
1767 if (!RB_EMPTY_NODE(&vma->obj_node)) in release_references()
1768 rb_erase(&vma->obj_node, &obj->vma.tree); in release_references()
1770 spin_unlock(&obj->vma.lock); in release_references()
1772 spin_lock_irq(&gt->closed_lock); in release_references()
1774 spin_unlock_irq(&gt->closed_lock); in release_references()
1777 i915_vm_resv_put(vma->vm); in release_references()
1779 i915_active_fini(&vma->active); in release_references()
1780 GEM_WARN_ON(vma->resource); in release_references()
1785 * i915_vma_destroy_locked - Remove all weak reference to the vma and put
1793 * - __i915_gem_object_pages_fini()
1794 * - __i915_vm_close() - Blocks the above function by taking a reference on
1796 * - __i915_vma_parked() - Blocks the above functions by taking a reference
1806 * - vm->mutex
1807 * - obj->vma.lock
1808 * - gt->closed_lock
1812 lockdep_assert_held(&vma->vm->mutex); in i915_vma_destroy_locked()
1815 list_del_init(&vma->vm_link); in i915_vma_destroy_locked()
1816 release_references(vma, vma->vm->gt, false); in i915_vma_destroy_locked()
1824 mutex_lock(&vma->vm->mutex); in i915_vma_destroy()
1826 list_del_init(&vma->vm_link); in i915_vma_destroy()
1827 vm_ddestroy = vma->vm_ddestroy; in i915_vma_destroy()
1828 vma->vm_ddestroy = false; in i915_vma_destroy()
1830 /* vma->vm may be freed when releasing vma->vm->mutex. */ in i915_vma_destroy()
1831 gt = vma->vm->gt; in i915_vma_destroy()
1832 mutex_unlock(&vma->vm->mutex); in i915_vma_destroy()
1841 spin_lock_irq(&gt->closed_lock); in i915_vma_parked()
1842 list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) { in i915_vma_parked()
1843 struct drm_i915_gem_object *obj = vma->obj; in i915_vma_parked()
1844 struct i915_address_space *vm = vma->vm; in i915_vma_parked()
1848 if (!kref_get_unless_zero(&obj->base.refcount)) in i915_vma_parked()
1856 list_move(&vma->closed_link, &closed); in i915_vma_parked()
1858 spin_unlock_irq(&gt->closed_lock); in i915_vma_parked()
1862 struct drm_i915_gem_object *obj = vma->obj; in i915_vma_parked()
1863 struct i915_address_space *vm = vma->vm; in i915_vma_parked()
1866 INIT_LIST_HEAD(&vma->closed_link); in i915_vma_parked()
1871 spin_lock_irq(&gt->closed_lock); in i915_vma_parked()
1872 list_add(&vma->closed_link, &gt->closed_vma); in i915_vma_parked()
1873 spin_unlock_irq(&gt->closed_lock); in i915_vma_parked()
1885 if (vma->iomap == NULL) in __i915_vma_iounmap()
1888 if (page_unmask_bits(vma->iomap)) in __i915_vma_iounmap()
1889 __i915_gem_object_release_map(vma->obj); in __i915_vma_iounmap()
1891 io_mapping_unmap(vma->iomap); in __i915_vma_iounmap()
1892 vma->iomap = NULL; in __i915_vma_iounmap()
1904 GEM_BUG_ON(!vma->obj->userfault_count); in i915_vma_revoke_mmap()
1906 node = &vma->mmo->vma_node; in i915_vma_revoke_mmap()
1907 vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT; in i915_vma_revoke_mmap()
1908 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, in i915_vma_revoke_mmap()
1910 vma->size, in i915_vma_revoke_mmap()
1914 if (!--vma->obj->userfault_count) in i915_vma_revoke_mmap()
1915 list_del(&vma->obj->userfault_link); in i915_vma_revoke_mmap()
1921 return __i915_request_await_exclusive(rq, &vma->active); in __i915_request_await_bind()
1933 return i915_active_add_request(&vma->active, rq); in __i915_vma_move_to_active()
1941 struct drm_i915_gem_object *obj = vma->obj; in _i915_vma_move_to_active()
1946 GEM_BUG_ON(!vma->pages); in _i915_vma_move_to_active()
1949 err = i915_request_await_object(rq, vma->obj, flags & EXEC_OBJECT_WRITE); in _i915_vma_move_to_active()
1967 err = dma_resv_reserve_fences(vma->obj->base.resv, idx); in _i915_vma_move_to_active()
1978 i915_active_add_request(&front->write, rq); in _i915_vma_move_to_active()
1990 obj->write_domain = I915_GEM_DOMAIN_RENDER; in _i915_vma_move_to_active()
1991 obj->read_domains = 0; in _i915_vma_move_to_active()
1994 obj->write_domain = 0; in _i915_vma_move_to_active()
1998 dma_resv_add_fence(vma->obj->base.resv, curr, usage); in _i915_vma_move_to_active()
2001 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) in _i915_vma_move_to_active()
2002 i915_active_add_request(&vma->fence->active, rq); in _i915_vma_move_to_active()
2004 obj->read_domains |= I915_GEM_GPU_DOMAINS; in _i915_vma_move_to_active()
2005 obj->mm.dirty = true; in _i915_vma_move_to_active()
2013 struct i915_vma_resource *vma_res = vma->resource; in __i915_vma_evict()
2025 * before the unbind, other due to non-strict nature of those in __i915_vma_evict()
2030 * bit from set-domain, as we mark all GGTT vma associated in __i915_vma_evict()
2032 * are currently unbinding this one -- so if this vma will be in __i915_vma_evict()
2046 GEM_BUG_ON(vma->fence); in __i915_vma_evict()
2050 GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt); in __i915_vma_evict()
2053 vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) && in __i915_vma_evict()
2054 kref_read(&vma->vm->ref); in __i915_vma_evict()
2055 vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) || in __i915_vma_evict()
2056 vma->vm->skip_pte_rewrite; in __i915_vma_evict()
2061 vma->obj->mm.tlb); in __i915_vma_evict()
2065 vma->resource = NULL; in __i915_vma_evict()
2068 &vma->flags); in __i915_vma_evict()
2078 vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb); in __i915_vma_evict()
2095 lockdep_assert_held(&vma->vm->mutex); in __i915_vma_unbind()
2098 if (!drm_mm_node_allocated(&vma->node)) in __i915_vma_unbind()
2103 return -EAGAIN; in __i915_vma_unbind()
2109 * a residual pin skipping the vm->mutex) to complete. in __i915_vma_unbind()
2118 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ in __i915_vma_unbind()
2126 lockdep_assert_held(&vma->vm->mutex); in __i915_vma_unbind_async()
2128 if (!drm_mm_node_allocated(&vma->node)) in __i915_vma_unbind_async()
2132 &vma->obj->mm.rsgt->table != vma->resource->bi.pages) in __i915_vma_unbind_async()
2133 return ERR_PTR(-EAGAIN); in __i915_vma_unbind_async()
2144 if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active, in __i915_vma_unbind_async()
2147 return ERR_PTR(-EBUSY); in __i915_vma_unbind_async()
2152 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ in __i915_vma_unbind_async()
2159 struct i915_address_space *vm = vma->vm; in i915_vma_unbind()
2163 assert_object_held_shared(vma->obj); in i915_vma_unbind()
2170 if (!drm_mm_node_allocated(&vma->node)) in i915_vma_unbind()
2175 return -EAGAIN; in i915_vma_unbind()
2180 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); in i915_vma_unbind()
2182 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); in i915_vma_unbind()
2187 mutex_unlock(&vm->mutex); in i915_vma_unbind()
2191 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); in i915_vma_unbind()
2197 struct drm_i915_gem_object *obj = vma->obj; in i915_vma_unbind_async()
2198 struct i915_address_space *vm = vma->vm; in i915_vma_unbind_async()
2204 * We need the dma-resv lock since we add the in i915_vma_unbind_async()
2205 * unbind fence to the dma-resv object. in i915_vma_unbind_async()
2209 if (!drm_mm_node_allocated(&vma->node)) in i915_vma_unbind_async()
2214 return -EAGAIN; in i915_vma_unbind_async()
2217 if (!obj->mm.rsgt) in i915_vma_unbind_async()
2218 return -EBUSY; in i915_vma_unbind_async()
2220 err = dma_resv_reserve_fences(obj->base.resv, 2); in i915_vma_unbind_async()
2222 return -EBUSY; in i915_vma_unbind_async()
2227 * kmalloc and it's in the dma-fence signalling critical path. in i915_vma_unbind_async()
2230 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); in i915_vma_unbind_async()
2232 if (trylock_vm && !mutex_trylock(&vm->mutex)) { in i915_vma_unbind_async()
2233 err = -EBUSY; in i915_vma_unbind_async()
2236 err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref); in i915_vma_unbind_async()
2242 mutex_unlock(&vm->mutex); in i915_vma_unbind_async()
2248 dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ); in i915_vma_unbind_async()
2253 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); in i915_vma_unbind_async()
2261 i915_gem_object_lock(vma->obj, NULL); in i915_vma_unbind_unlocked()
2263 i915_gem_object_unlock(vma->obj); in i915_vma_unbind_unlocked()
2270 i915_gem_object_make_unshrinkable(vma->obj); in i915_vma_make_unshrinkable()
2276 i915_gem_object_make_shrinkable(vma->obj); in i915_vma_make_shrinkable()
2281 i915_gem_object_make_purgeable(vma->obj); in i915_vma_make_purgeable()
2297 return -ENOMEM; in i915_vma_module_init()