Lines Matching +full:key +full:- +full:release
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
4 * Copyright (c) 2009-2023 VMware, Inc., Palo Alto, CA., USA
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
39 * Base- and reference object implementation for the various
41 * and release on file close.
70 * file release.
90 * This is the per-device data structure needed for ttm object management.
103 * @hash: Hash entry for the per-file object reference hash.
105 * @head: List entry for the per-file list of ref-objects.
114 * that allows lookup with a pointer to the referenced object as a key. In
135 kref_get(&tfile->refcount); in ttm_object_file_ref()
140 uint64_t key, in ttm_tfile_find_ref_rcu() argument
145 hash_for_each_possible_rcu(tfile->ref_hash, hash, head, key) { in ttm_tfile_find_ref_rcu()
146 if (hash->key == key) { in ttm_tfile_find_ref_rcu()
151 return -EINVAL; in ttm_tfile_find_ref_rcu()
155 uint64_t key, in ttm_tfile_find_ref() argument
160 hash_for_each_possible(tfile->ref_hash, hash, head, key) { in ttm_tfile_find_ref()
161 if (hash->key == key) { in ttm_tfile_find_ref()
166 return -EINVAL; in ttm_tfile_find_ref()
183 kref_put(&tfile->refcount, ttm_object_file_destroy); in ttm_object_file_unref()
193 struct ttm_object_device *tdev = tfile->tdev; in ttm_base_object_init()
196 base->shareable = shareable; in ttm_base_object_init()
197 base->tfile = ttm_object_file_ref(tfile); in ttm_base_object_init()
198 base->refcount_release = refcount_release; in ttm_base_object_init()
199 base->object_type = object_type; in ttm_base_object_init()
200 kref_init(&base->refcount); in ttm_base_object_init()
202 spin_lock(&tdev->object_lock); in ttm_base_object_init()
203 ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT); in ttm_base_object_init()
204 spin_unlock(&tdev->object_lock); in ttm_base_object_init()
209 base->handle = ret; in ttm_base_object_init()
218 spin_lock(&tdev->object_lock); in ttm_base_object_init()
219 idr_remove(&tdev->idr, base->handle); in ttm_base_object_init()
220 spin_unlock(&tdev->object_lock); in ttm_base_object_init()
228 struct ttm_object_device *tdev = base->tfile->tdev; in ttm_release_base()
230 spin_lock(&tdev->object_lock); in ttm_release_base()
231 idr_remove(&tdev->idr, base->handle); in ttm_release_base()
232 spin_unlock(&tdev->object_lock); in ttm_release_base()
240 ttm_object_file_unref(&base->tfile); in ttm_release_base()
241 if (base->refcount_release) in ttm_release_base()
242 base->refcount_release(&base); in ttm_release_base()
251 kref_put(&base->refcount, ttm_release_base); in ttm_base_object_unref()
255 uint64_t key) in ttm_base_object_lookup() argument
261 spin_lock(&tfile->lock); in ttm_base_object_lookup()
262 ret = ttm_tfile_find_ref(tfile, key, &hash); in ttm_base_object_lookup()
265 base = hlist_entry(hash, struct ttm_ref_object, hash)->obj; in ttm_base_object_lookup()
266 if (!kref_get_unless_zero(&base->refcount)) in ttm_base_object_lookup()
269 spin_unlock(&tfile->lock); in ttm_base_object_lookup()
276 ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key) in ttm_base_object_lookup_for_ref() argument
281 base = idr_find(&tdev->idr, key); in ttm_base_object_lookup_for_ref()
283 if (base && !kref_get_unless_zero(&base->refcount)) in ttm_base_object_lookup_for_ref()
297 int ret = -EINVAL; in ttm_ref_object_add()
299 if (base->tfile != tfile && !base->shareable) in ttm_ref_object_add()
300 return -EPERM; in ttm_ref_object_add()
305 while (ret == -EINVAL) { in ttm_ref_object_add()
307 ret = ttm_tfile_find_ref_rcu(tfile, base->handle, &hash); in ttm_ref_object_add()
311 if (kref_get_unless_zero(&ref->kref)) { in ttm_ref_object_add()
319 return -EPERM; in ttm_ref_object_add()
323 return -ENOMEM; in ttm_ref_object_add()
326 ref->hash.key = base->handle; in ttm_ref_object_add()
327 ref->obj = base; in ttm_ref_object_add()
328 ref->tfile = tfile; in ttm_ref_object_add()
329 kref_init(&ref->kref); in ttm_ref_object_add()
331 spin_lock(&tfile->lock); in ttm_ref_object_add()
332 hash_add_rcu(tfile->ref_hash, &ref->hash.head, ref->hash.key); in ttm_ref_object_add()
335 list_add_tail(&ref->head, &tfile->ref_list); in ttm_ref_object_add()
336 kref_get(&base->refcount); in ttm_ref_object_add()
337 spin_unlock(&tfile->lock); in ttm_ref_object_add()
345 static void __releases(tfile->lock) __acquires(tfile->lock)
350 struct ttm_object_file *tfile = ref->tfile; in ttm_ref_object_release()
352 hash_del_rcu(&ref->hash.head); in ttm_ref_object_release()
353 list_del(&ref->head); in ttm_ref_object_release()
354 spin_unlock(&tfile->lock); in ttm_ref_object_release()
356 ttm_base_object_unref(&ref->obj); in ttm_ref_object_release()
358 spin_lock(&tfile->lock); in ttm_ref_object_release()
362 unsigned long key) in ttm_ref_object_base_unref() argument
368 spin_lock(&tfile->lock); in ttm_ref_object_base_unref()
369 ret = ttm_tfile_find_ref(tfile, key, &hash); in ttm_ref_object_base_unref()
371 spin_unlock(&tfile->lock); in ttm_ref_object_base_unref()
372 return -EINVAL; in ttm_ref_object_base_unref()
375 kref_put(&ref->kref, ttm_ref_object_release); in ttm_ref_object_base_unref()
376 spin_unlock(&tfile->lock); in ttm_ref_object_base_unref()
387 spin_lock(&tfile->lock); in ttm_object_file_release()
390 * Since we release the lock within the loop, we have to in ttm_object_file_release()
394 while (!list_empty(&tfile->ref_list)) { in ttm_object_file_release()
395 list = tfile->ref_list.next; in ttm_object_file_release()
397 ttm_ref_object_release(&ref->kref); in ttm_object_file_release()
400 spin_unlock(&tfile->lock); in ttm_object_file_release()
412 spin_lock_init(&tfile->lock); in ttm_object_file_init()
413 tfile->tdev = tdev; in ttm_object_file_init()
414 kref_init(&tfile->refcount); in ttm_object_file_init()
415 INIT_LIST_HEAD(&tfile->ref_list); in ttm_object_file_init()
417 hash_init(tfile->ref_hash); in ttm_object_file_init()
430 spin_lock_init(&tdev->object_lock); in ttm_object_device_init()
440 idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1); in ttm_object_device_init()
441 tdev->ops = *ops; in ttm_object_device_init()
442 tdev->dmabuf_release = tdev->ops.release; in ttm_object_device_init()
443 tdev->ops.release = ttm_prime_dmabuf_release; in ttm_object_device_init()
453 WARN_ON_ONCE(!idr_is_empty(&tdev->idr)); in ttm_object_device_release()
454 idr_destroy(&tdev->idr); in ttm_object_device_release()
460 * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
462 * @dmabuf: Non-refcounted pointer to a struct dma-buf.
465 * the file, but synchronizes with its release method to make sure it has
474 return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L; in get_dma_buf_unless_doomed()
478 * ttm_prime_refcount_release - refcount release method for a prime object.
494 BUG_ON(prime->dma_buf != NULL); in ttm_prime_refcount_release()
495 mutex_destroy(&prime->mutex); in ttm_prime_refcount_release()
496 if (prime->refcount_release) in ttm_prime_refcount_release()
497 prime->refcount_release(&base); in ttm_prime_refcount_release()
501 * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
505 * This function first calls the dma_buf release method the driver
513 (struct ttm_prime_object *) dma_buf->priv; in ttm_prime_dmabuf_release()
514 struct ttm_base_object *base = &prime->base; in ttm_prime_dmabuf_release()
515 struct ttm_object_device *tdev = base->tfile->tdev; in ttm_prime_dmabuf_release()
517 if (tdev->dmabuf_release) in ttm_prime_dmabuf_release()
518 tdev->dmabuf_release(dma_buf); in ttm_prime_dmabuf_release()
519 mutex_lock(&prime->mutex); in ttm_prime_dmabuf_release()
520 if (prime->dma_buf == dma_buf) in ttm_prime_dmabuf_release()
521 prime->dma_buf = NULL; in ttm_prime_dmabuf_release()
522 mutex_unlock(&prime->mutex); in ttm_prime_dmabuf_release()
527 * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
534 * a dma-buf. Note that we don't handle imports yet, because we simply
540 struct ttm_object_device *tdev = tfile->tdev; in ttm_prime_fd_to_handle()
550 if (dma_buf->ops != &tdev->ops) in ttm_prime_fd_to_handle()
551 return -ENOSYS; in ttm_prime_fd_to_handle()
553 prime = (struct ttm_prime_object *) dma_buf->priv; in ttm_prime_fd_to_handle()
554 base = &prime->base; in ttm_prime_fd_to_handle()
555 *handle = base->handle; in ttm_prime_fd_to_handle()
564 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
568 * @flags: flags for dma-buf creation. We just pass them on.
576 struct ttm_object_device *tdev = tfile->tdev; in ttm_prime_handle_to_fd()
584 base->object_type != ttm_prime_type)) { in ttm_prime_handle_to_fd()
585 ret = -ENOENT; in ttm_prime_handle_to_fd()
590 if (unlikely(!base->shareable)) { in ttm_prime_handle_to_fd()
591 ret = -EPERM; in ttm_prime_handle_to_fd()
595 ret = mutex_lock_interruptible(&prime->mutex); in ttm_prime_handle_to_fd()
597 ret = -ERESTARTSYS; in ttm_prime_handle_to_fd()
601 dma_buf = prime->dma_buf; in ttm_prime_handle_to_fd()
604 exp_info.ops = &tdev->ops; in ttm_prime_handle_to_fd()
605 exp_info.size = prime->size; in ttm_prime_handle_to_fd()
616 mutex_unlock(&prime->mutex); in ttm_prime_handle_to_fd()
624 prime->dma_buf = dma_buf; in ttm_prime_handle_to_fd()
626 mutex_unlock(&prime->mutex); in ttm_prime_handle_to_fd()
642 * ttm_prime_object_init - Initialize a ttm_prime_object
659 mutex_init(&prime->mutex); in ttm_prime_object_init()
660 prime->size = PAGE_ALIGN(size); in ttm_prime_object_init()
661 prime->real_type = type; in ttm_prime_object_init()
662 prime->dma_buf = NULL; in ttm_prime_object_init()
663 prime->refcount_release = refcount_release; in ttm_prime_object_init()
664 return ttm_base_object_init(tfile, &prime->base, shareable, in ttm_prime_object_init()