Lines Matching +full:software +full:- +full:locked
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
52 if (drm_WARN_ON(&i915->drm, level >= I915_MAX_CACHE_LEVEL)) in i915_gem_get_pat_index()
55 return INTEL_INFO(i915)->cachelevel_to_pat[level]; in i915_gem_get_pat_index()
66 if (obj->pat_set_by_user) in i915_gem_object_has_cache_level()
73 return obj->pat_index == i915_gem_get_pat_index(obj_to_i915(obj), lvl); in i915_gem_object_has_cache_level()
83 obj->base.funcs = &i915_gem_object_funcs; in i915_gem_object_alloc()
104 spin_lock_init(&obj->vma.lock); in i915_gem_object_init()
105 INIT_LIST_HEAD(&obj->vma.list); in i915_gem_object_init()
107 INIT_LIST_HEAD(&obj->mm.link); in i915_gem_object_init()
110 INIT_LIST_HEAD(&obj->client_link); in i915_gem_object_init()
113 INIT_LIST_HEAD(&obj->lut_list); in i915_gem_object_init()
114 spin_lock_init(&obj->lut_lock); in i915_gem_object_init()
116 spin_lock_init(&obj->mmo.lock); in i915_gem_object_init()
117 obj->mmo.offsets = RB_ROOT; in i915_gem_object_init()
119 init_rcu_head(&obj->rcu); in i915_gem_object_init()
121 obj->ops = ops; in i915_gem_object_init()
123 obj->flags = flags; in i915_gem_object_init()
125 obj->mm.madv = I915_MADV_WILLNEED; in i915_gem_object_init()
126 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); in i915_gem_object_init()
127 mutex_init(&obj->mm.get_page.lock); in i915_gem_object_init()
128 INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN); in i915_gem_object_init()
129 mutex_init(&obj->mm.get_dma_page.lock); in i915_gem_object_init()
133 * __i915_gem_object_fini - Clean up a GEM object initialization
143 mutex_destroy(&obj->mm.get_page.lock); in __i915_gem_object_fini()
144 mutex_destroy(&obj->mm.get_dma_page.lock); in __i915_gem_object_fini()
145 dma_resv_fini(&obj->base._resv); in __i915_gem_object_fini()
149 * i915_gem_object_set_cache_coherency - Mark up the object's coherency levels
157 struct drm_i915_private *i915 = to_i915(obj->base.dev); in i915_gem_object_set_cache_coherency()
159 obj->pat_index = i915_gem_get_pat_index(i915, cache_level); in i915_gem_object_set_cache_coherency()
162 obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ | in i915_gem_object_set_cache_coherency()
165 obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ; in i915_gem_object_set_cache_coherency()
167 obj->cache_coherent = 0; in i915_gem_object_set_cache_coherency()
169 obj->cache_dirty = in i915_gem_object_set_cache_coherency()
170 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) && in i915_gem_object_set_cache_coherency()
175 * i915_gem_object_set_pat_index - set PAT index to be used in PTE encode
185 struct drm_i915_private *i915 = to_i915(obj->base.dev); in i915_gem_object_set_pat_index()
187 if (obj->pat_index == pat_index) in i915_gem_object_set_pat_index()
190 obj->pat_index = pat_index; in i915_gem_object_set_pat_index()
193 obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ | in i915_gem_object_set_pat_index()
196 obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ; in i915_gem_object_set_pat_index()
198 obj->cache_coherent = 0; in i915_gem_object_set_pat_index()
200 obj->cache_dirty = in i915_gem_object_set_pat_index()
201 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) && in i915_gem_object_set_pat_index()
207 struct drm_i915_private *i915 = to_i915(obj->base.dev); in i915_gem_object_can_bypass_llc()
211 * about non-userspace objects being able to bypass the LLC. in i915_gem_object_can_bypass_llc()
213 if (!(obj->flags & I915_BO_ALLOC_USER)) in i915_gem_object_can_bypass_llc()
219 if (obj->pat_set_by_user) in i915_gem_object_can_bypass_llc()
240 struct drm_i915_file_private *fpriv = file->driver_priv; in i915_gem_close_object()
246 spin_lock(&obj->lut_lock); in i915_gem_close_object()
247 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) { in i915_gem_close_object()
248 struct i915_gem_context *ctx = lut->ctx; in i915_gem_close_object()
250 if (ctx && ctx->file_priv == fpriv) { in i915_gem_close_object()
252 list_move(&lut->obj_link, &close); in i915_gem_close_object()
256 if (&ln->obj_link != &obj->lut_list) { in i915_gem_close_object()
257 list_add_tail(&bookmark.obj_link, &ln->obj_link); in i915_gem_close_object()
258 if (cond_resched_lock(&obj->lut_lock)) in i915_gem_close_object()
263 spin_unlock(&obj->lut_lock); in i915_gem_close_object()
265 spin_lock(&obj->mmo.lock); in i915_gem_close_object()
266 rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) in i915_gem_close_object()
267 drm_vma_node_revoke(&mmo->vma_node, file); in i915_gem_close_object()
268 spin_unlock(&obj->mmo.lock); in i915_gem_close_object()
271 struct i915_gem_context *ctx = lut->ctx; in i915_gem_close_object()
279 mutex_lock(&ctx->lut_mutex); in i915_gem_close_object()
280 vma = radix_tree_delete(&ctx->handles_vma, lut->handle); in i915_gem_close_object()
282 GEM_BUG_ON(vma->obj != obj); in i915_gem_close_object()
283 GEM_BUG_ON(!atomic_read(&vma->open_count)); in i915_gem_close_object()
286 mutex_unlock(&ctx->lut_mutex); in i915_gem_close_object()
288 i915_gem_context_put(lut->ctx); in i915_gem_close_object()
298 struct drm_i915_private *i915 = to_i915(obj->base.dev); in __i915_gem_free_object_rcu()
301 if (obj->mm.n_placements > 1) in __i915_gem_free_object_rcu()
302 kfree(obj->mm.placements); in __i915_gem_free_object_rcu()
306 GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); in __i915_gem_free_object_rcu()
307 atomic_dec(&i915->mm.free_count); in __i915_gem_free_object_rcu()
314 if (obj->userfault_count && !IS_DGFX(to_i915(obj->base.dev))) in __i915_gem_object_free_mmaps()
317 if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) { in __i915_gem_object_free_mmaps()
323 &obj->mmo.offsets, in __i915_gem_object_free_mmaps()
325 drm_vma_offset_remove(obj->base.dev->vma_offset_manager, in __i915_gem_object_free_mmaps()
326 &mmo->vma_node); in __i915_gem_object_free_mmaps()
329 obj->mmo.offsets = RB_ROOT; in __i915_gem_object_free_mmaps()
334 * __i915_gem_object_pages_fini - Clean up pages use of a gem object
346 if (!list_empty(&obj->vma.list)) { in __i915_gem_object_pages_fini()
349 spin_lock(&obj->vma.lock); in __i915_gem_object_pages_fini()
350 while ((vma = list_first_entry_or_null(&obj->vma.list, in __i915_gem_object_pages_fini()
353 GEM_BUG_ON(vma->obj != obj); in __i915_gem_object_pages_fini()
354 spin_unlock(&obj->vma.lock); in __i915_gem_object_pages_fini()
358 spin_lock(&obj->vma.lock); in __i915_gem_object_pages_fini()
360 spin_unlock(&obj->vma.lock); in __i915_gem_object_pages_fini()
365 atomic_set(&obj->mm.pages_pin_count, 0); in __i915_gem_object_pages_fini()
369 * locked. The imported GEM shouldn't share reservation lock in __i915_gem_object_pages_fini()
371 * dma-buf, so it's safe to take the lock. in __i915_gem_object_pages_fini()
373 if (obj->base.import_attach) in __i915_gem_object_pages_fini()
378 if (obj->base.import_attach) in __i915_gem_object_pages_fini()
388 GEM_BUG_ON(!list_empty(&obj->lut_list)); in __i915_gem_free_object()
390 bitmap_free(obj->bit_17); in __i915_gem_free_object()
392 if (obj->base.import_attach) in __i915_gem_free_object()
393 drm_prime_gem_destroy(&obj->base, NULL); in __i915_gem_free_object()
395 drm_gem_free_mmap_offset(&obj->base); in __i915_gem_free_object()
397 if (obj->ops->release) in __i915_gem_free_object()
398 obj->ops->release(obj); in __i915_gem_free_object()
400 if (obj->shares_resv_from) in __i915_gem_free_object()
401 i915_vm_resv_put(obj->shares_resv_from); in __i915_gem_free_object()
413 if (obj->ops->delayed_free) { in __i915_gem_free_objects()
414 obj->ops->delayed_free(obj); in __i915_gem_free_objects()
421 /* But keep the pointer alive for RCU-protected lookups */ in __i915_gem_free_objects()
422 call_rcu(&obj->rcu, __i915_gem_free_object_rcu); in __i915_gem_free_objects()
429 struct llist_node *freed = llist_del_all(&i915->mm.free_list); in i915_gem_flush_free_objects()
446 struct drm_i915_private *i915 = to_i915(obj->base.dev); in i915_gem_free_object()
453 * Before we free the object, make sure any pure RCU-only in i915_gem_free_object()
454 * read-side critical sections are complete, e.g. in i915_gem_free_object()
458 atomic_inc(&i915->mm.free_count); in i915_gem_free_object()
471 if (llist_add(&obj->freed, &i915->mm.free_list)) in i915_gem_free_object()
472 queue_work(i915->wq, &i915->mm.free_work); in i915_gem_free_object()
507 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) in i915_gem_object_read_from_page_kmap()
522 src_map = io_mapping_map_wc(&obj->mm.region->iomap, in i915_gem_object_read_from_page_iomap()
523 dma - obj->mm.region->region.start, in i915_gem_object_read_from_page_iomap()
537 if (IS_DGFX(to_i915(obj->base.dev))) in object_has_mappable_iomem()
538 return i915_ttm_resource_mappable(i915_gem_to_ttm(obj)->resource); in object_has_mappable_iomem()
544 * i915_gem_object_read_from_page - read data from the page of a GEM object
554 * Return: %0 on success or -ENODEV if the type of @obj's backing store is
560 GEM_BUG_ON(offset >= obj->base.size); in i915_gem_object_read_from_page()
561 GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size); in i915_gem_object_read_from_page()
569 return -ENODEV; in i915_gem_object_read_from_page()
575 * i915_gem_object_evictable - Whether object is likely evictable after unbind.
579 * If the object is not locked when checking, the result is only advisory.
580 * If the object is locked when checking, and the function returns true,
590 int pin_count = atomic_read(&obj->mm.pages_pin_count); in i915_gem_object_evictable()
595 spin_lock(&obj->vma.lock); in i915_gem_object_evictable()
596 list_for_each_entry(vma, &obj->vma.list, obj_link) { in i915_gem_object_evictable()
598 spin_unlock(&obj->vma.lock); in i915_gem_object_evictable()
601 if (atomic_read(&vma->pages_count)) in i915_gem_object_evictable()
602 pin_count--; in i915_gem_object_evictable()
604 spin_unlock(&obj->vma.lock); in i915_gem_object_evictable()
611 * i915_gem_object_migratable - Whether the object is migratable out of the
620 struct intel_memory_region *mr = READ_ONCE(obj->mm.region); in i915_gem_object_migratable()
625 return obj->mm.n_placements > 1; in i915_gem_object_migratable()
629 * i915_gem_object_has_struct_page - Whether the object is page-backed
632 * This function should only be called while the object is locked or pinned,
635 * Return: True if page-backed, false otherwise.
640 if (IS_DGFX(to_i915(obj->base.dev)) && in i915_gem_object_has_struct_page()
644 return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE; in i915_gem_object_has_struct_page()
648 * i915_gem_object_has_iomem - Whether the object is iomem-backed
651 * This function should only be called while the object is locked or pinned,
654 * Return: True if iomem-backed, false otherwise.
659 if (IS_DGFX(to_i915(obj->base.dev)) && in i915_gem_object_has_iomem()
663 return obj->mem_flags & I915_BO_FLAG_IOMEM; in i915_gem_object_has_iomem()
667 * i915_gem_object_can_migrate - Whether an object likely can be migrated
686 struct drm_i915_private *i915 = to_i915(obj->base.dev); in i915_gem_object_can_migrate()
687 unsigned int num_allowed = obj->mm.n_placements; in i915_gem_object_can_migrate()
692 GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED); in i915_gem_object_can_migrate()
694 mr = i915->mm.regions[id]; in i915_gem_object_can_migrate()
698 if (!IS_ALIGNED(obj->base.size, mr->min_page_size)) in i915_gem_object_can_migrate()
701 if (obj->mm.region == mr) in i915_gem_object_can_migrate()
707 if (!obj->ops->migrate) in i915_gem_object_can_migrate()
710 if (!(obj->flags & I915_BO_ALLOC_USER)) in i915_gem_object_can_migrate()
717 if (mr == obj->mm.placements[i]) in i915_gem_object_can_migrate()
725 * i915_gem_object_migrate - Migrate an object to the desired region id
734 * be locked.
745 * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
746 * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
747 * -EBUSY if the object is pinned.
753 return __i915_gem_object_migrate(obj, ww, id, obj->flags); in i915_gem_object_migrate()
757 * __i915_gem_object_migrate - Migrate an object to the desired region id, with
763 * @flags: The object flags. Normally just obj->flags.
768 * be locked.
779 * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
780 * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
781 * -EBUSY if the object is pinned.
788 struct drm_i915_private *i915 = to_i915(obj->base.dev); in __i915_gem_object_migrate()
792 GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED); in __i915_gem_object_migrate()
795 mr = i915->mm.regions[id]; in __i915_gem_object_migrate()
799 return -EINVAL; in __i915_gem_object_migrate()
801 if (!obj->ops->migrate) { in __i915_gem_object_migrate()
802 if (GEM_WARN_ON(obj->mm.region != mr)) in __i915_gem_object_migrate()
803 return -EINVAL; in __i915_gem_object_migrate()
807 return obj->ops->migrate(obj, mr, flags); in __i915_gem_object_migrate()
811 * i915_gem_object_placement_possible - Check whether the object can be
823 if (!obj->mm.n_placements) { in i915_gem_object_placement_possible()
836 for (i = 0; i < obj->mm.n_placements; i++) { in i915_gem_object_placement_possible()
837 if (obj->mm.placements[i]->type == type) in i915_gem_object_placement_possible()
845 * i915_gem_object_needs_ccs_pages - Check whether the object requires extra
846 * pages when placed in system-memory, in order to save and later restore the
847 * flat-CCS aux state when the object is moved between local-memory and
848 * system-memory
858 if (!HAS_FLAT_CCS(to_i915(obj->base.dev))) in i915_gem_object_needs_ccs_pages()
861 if (obj->flags & I915_BO_ALLOC_CCS_AUX) in i915_gem_object_needs_ccs_pages()
864 for (i = 0; i < obj->mm.n_placements; i++) { in i915_gem_object_needs_ccs_pages()
866 if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM) in i915_gem_object_needs_ccs_pages()
869 obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL) in i915_gem_object_needs_ccs_pages()
878 INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); in i915_gem_init__objects()
890 return -ENOMEM; in i915_objects_module_init()
902 * i915_gem_object_get_moving_fence - Get the object's moving fence if any
906 * A non-signaled moving fence means that there is an async operation
908 * any GPU- or CPU PTEs to the object's pages.
915 return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL, in i915_gem_object_get_moving_fence()
920 * i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any
927 * Return: 0 if successful, -ERESTARTSYS if the wait was interrupted,
938 ret = dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL, in i915_gem_object_wait_moving_fence()
941 ret = -ETIME; in i915_gem_object_wait_moving_fence()
943 ret = -EIO; in i915_gem_object_wait_moving_fence()
949 * i915_gem_object_has_unknown_state - Return true if the object backing pages are
964 return obj->mm.unknown_state; in i915_gem_object_has_unknown_state()