Lines Matching +full:ts +full:- +full:attached

1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
55 * struct vmw_event_fence_action - fence action that delivers a drm event.
62 * @tv_sec: If non-null, the variable pointed to will be assigned
81 return container_of(fence->base.lock, struct vmw_fence_manager, lock); in fman_from_fence()
86 if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0) in vmw_fence_goal_read()
94 if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0) in vmw_fence_goal_write()
105 * b) On-demand when we have waiters. Sleeping waiters will switch on the
113 * actions attached to a fence. The fencing subsystem then makes use of the
115 * which has an action attached, and each time vmw_fences_update is called,
119 * objects with actions attached to them.
128 if (!list_empty(&fence->head)) { in vmw_fence_obj_destroy()
129 spin_lock(&fman->lock); in vmw_fence_obj_destroy()
130 list_del_init(&fence->head); in vmw_fence_obj_destroy()
131 spin_unlock(&fman->lock); in vmw_fence_obj_destroy()
133 fence->destroy(fence); in vmw_fence_obj_destroy()
152 struct vmw_private *dev_priv = fman->dev_priv; in vmw_fence_enable_signaling()
155 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) in vmw_fence_enable_signaling()
172 wake_up_process(wait->task); in vmwgfx_wait_cb()
183 struct vmw_private *dev_priv = fman->dev_priv; in vmw_fence_wait()
192 spin_lock(f->lock); in vmw_fence_wait()
194 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) in vmw_fence_wait()
198 ret = -ERESTARTSYS; in vmw_fence_wait()
204 list_add(&cb.base.node, &f->cb_list); in vmw_fence_wait()
219 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) { in vmw_fence_wait()
226 ret = -ERESTARTSYS; in vmw_fence_wait()
233 spin_unlock(f->lock); in vmw_fence_wait()
237 spin_lock(f->lock); in vmw_fence_wait()
244 spin_unlock(f->lock); in vmw_fence_wait()
275 mutex_lock(&fman->goal_irq_mutex); in vmw_fence_work_func()
277 spin_lock(&fman->lock); in vmw_fence_work_func()
278 list_splice_init(&fman->cleanup_list, &list); in vmw_fence_work_func()
279 seqno_valid = fman->seqno_valid; in vmw_fence_work_func()
280 spin_unlock(&fman->lock); in vmw_fence_work_func()
282 if (!seqno_valid && fman->goal_irq_on) { in vmw_fence_work_func()
283 fman->goal_irq_on = false; in vmw_fence_work_func()
284 vmw_goal_waiter_remove(fman->dev_priv); in vmw_fence_work_func()
286 mutex_unlock(&fman->goal_irq_mutex); in vmw_fence_work_func()
298 list_del_init(&action->head); in vmw_fence_work_func()
299 if (action->cleanup) in vmw_fence_work_func()
300 action->cleanup(action); in vmw_fence_work_func()
312 fman->dev_priv = dev_priv; in vmw_fence_manager_init()
313 spin_lock_init(&fman->lock); in vmw_fence_manager_init()
314 INIT_LIST_HEAD(&fman->fence_list); in vmw_fence_manager_init()
315 INIT_LIST_HEAD(&fman->cleanup_list); in vmw_fence_manager_init()
316 INIT_WORK(&fman->work, &vmw_fence_work_func); in vmw_fence_manager_init()
317 fman->fifo_down = true; in vmw_fence_manager_init()
318 mutex_init(&fman->goal_irq_mutex); in vmw_fence_manager_init()
319 fman->ctx = dma_fence_context_alloc(1); in vmw_fence_manager_init()
328 (void) cancel_work_sync(&fman->work); in vmw_fence_manager_takedown()
330 spin_lock(&fman->lock); in vmw_fence_manager_takedown()
331 lists_empty = list_empty(&fman->fence_list) && in vmw_fence_manager_takedown()
332 list_empty(&fman->cleanup_list); in vmw_fence_manager_takedown()
333 spin_unlock(&fman->lock); in vmw_fence_manager_takedown()
345 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock, in vmw_fence_obj_init()
346 fman->ctx, seqno); in vmw_fence_obj_init()
347 INIT_LIST_HEAD(&fence->seq_passed_actions); in vmw_fence_obj_init()
348 fence->destroy = destroy; in vmw_fence_obj_init()
350 spin_lock(&fman->lock); in vmw_fence_obj_init()
351 if (unlikely(fman->fifo_down)) { in vmw_fence_obj_init()
352 ret = -EBUSY; in vmw_fence_obj_init()
355 list_add_tail(&fence->head, &fman->fence_list); in vmw_fence_obj_init()
358 spin_unlock(&fman->lock); in vmw_fence_obj_init()
369 list_del_init(&action->head); in vmw_fences_perform_actions()
370 fman->pending_actions[action->type]--; in vmw_fences_perform_actions()
371 if (action->seq_passed != NULL) in vmw_fences_perform_actions()
372 action->seq_passed(action); in vmw_fences_perform_actions()
379 list_add_tail(&action->head, &fman->cleanup_list); in vmw_fences_perform_actions()
384 * vmw_fence_goal_new_locked - Figure out a new device fence goal
395 * action attached, and sets the seqno of that fence as a new fence goal.
405 if (likely(!fman->seqno_valid)) in vmw_fence_goal_new_locked()
408 goal_seqno = vmw_fence_goal_read(fman->dev_priv); in vmw_fence_goal_new_locked()
409 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) in vmw_fence_goal_new_locked()
412 fman->seqno_valid = false; in vmw_fence_goal_new_locked()
413 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { in vmw_fence_goal_new_locked()
414 if (!list_empty(&fence->seq_passed_actions)) { in vmw_fence_goal_new_locked()
415 fman->seqno_valid = true; in vmw_fence_goal_new_locked()
416 vmw_fence_goal_write(fman->dev_priv, in vmw_fence_goal_new_locked()
417 fence->base.seqno); in vmw_fence_goal_new_locked()
427 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
434 * It is typically called when an action has been attached to a fence to
446 if (dma_fence_is_signaled_locked(&fence->base)) in vmw_fence_goal_check_locked()
449 goal_seqno = vmw_fence_goal_read(fman->dev_priv); in vmw_fence_goal_check_locked()
450 if (likely(fman->seqno_valid && in vmw_fence_goal_check_locked()
451 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP)) in vmw_fence_goal_check_locked()
454 vmw_fence_goal_write(fman->dev_priv, fence->base.seqno); in vmw_fence_goal_check_locked()
455 fman->seqno_valid = true; in vmw_fence_goal_check_locked()
467 seqno = vmw_fence_read(fman->dev_priv); in __vmw_fences_update()
469 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { in __vmw_fences_update()
470 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { in __vmw_fences_update()
471 list_del_init(&fence->head); in __vmw_fences_update()
472 dma_fence_signal_locked(&fence->base); in __vmw_fences_update()
474 list_splice_init(&fence->seq_passed_actions, in __vmw_fences_update()
489 new_seqno = vmw_fence_read(fman->dev_priv); in __vmw_fences_update()
496 if (!list_empty(&fman->cleanup_list)) in __vmw_fences_update()
497 (void) schedule_work(&fman->work); in __vmw_fences_update()
502 spin_lock(&fman->lock); in vmw_fences_update()
504 spin_unlock(&fman->lock); in vmw_fences_update()
511 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) in vmw_fence_obj_signaled()
516 return dma_fence_is_signaled(&fence->base); in vmw_fence_obj_signaled()
522 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout); in vmw_fence_obj_wait()
527 return -EBUSY; in vmw_fence_obj_wait()
534 dma_fence_free(&fence->base); in vmw_fence_destroy()
546 return -ENOMEM; in vmw_fence_create()
575 struct vmw_fence_obj *fence = &ufence->fence; in vmw_user_fence_base_release()
587 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; in vmw_user_fence_create()
594 ret = -ENOMEM; in vmw_user_fence_create()
598 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno, in vmw_user_fence_create()
609 tmp = vmw_fence_obj_reference(&ufence->fence); in vmw_user_fence_create()
611 ret = ttm_base_object_init(tfile, &ufence->base, false, in vmw_user_fence_create()
624 *p_fence = &ufence->fence; in vmw_user_fence_create()
625 *p_handle = ufence->base.handle; in vmw_user_fence_create()
629 tmp = &ufence->fence; in vmw_user_fence_create()
636 * vmw_fence_fifo_down - signal all unsignaled fence objects.
646 * restart when we've released the fman->lock. in vmw_fence_fifo_down()
649 spin_lock(&fman->lock); in vmw_fence_fifo_down()
650 fman->fifo_down = true; in vmw_fence_fifo_down()
651 while (!list_empty(&fman->fence_list)) { in vmw_fence_fifo_down()
653 list_entry(fman->fence_list.prev, struct vmw_fence_obj, in vmw_fence_fifo_down()
655 dma_fence_get(&fence->base); in vmw_fence_fifo_down()
656 spin_unlock(&fman->lock); in vmw_fence_fifo_down()
662 list_del_init(&fence->head); in vmw_fence_fifo_down()
663 dma_fence_signal(&fence->base); in vmw_fence_fifo_down()
665 list_splice_init(&fence->seq_passed_actions, in vmw_fence_fifo_down()
670 BUG_ON(!list_empty(&fence->head)); in vmw_fence_fifo_down()
671 dma_fence_put(&fence->base); in vmw_fence_fifo_down()
672 spin_lock(&fman->lock); in vmw_fence_fifo_down()
674 spin_unlock(&fman->lock); in vmw_fence_fifo_down()
679 spin_lock(&fman->lock); in vmw_fence_fifo_up()
680 fman->fifo_down = false; in vmw_fence_fifo_up()
681 spin_unlock(&fman->lock); in vmw_fence_fifo_up()
686 * vmw_fence_obj_lookup - Look up a user-space fence object
693 * The fence object is looked up and type-checked. The caller needs
706 return ERR_PTR(-EINVAL); in vmw_fence_obj_lookup()
709 if (base->refcount_release != vmw_user_fence_base_release) { in vmw_fence_obj_lookup()
713 return ERR_PTR(-EINVAL); in vmw_fence_obj_lookup()
728 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; in vmw_fence_obj_wait_ioctl()
730 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ); in vmw_fence_obj_wait_ioctl()
733 * 64-bit division not present on 32-bit systems, so do an in vmw_fence_obj_wait_ioctl()
737 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) - in vmw_fence_obj_wait_ioctl()
740 if (!arg->cookie_valid) { in vmw_fence_obj_wait_ioctl()
741 arg->cookie_valid = 1; in vmw_fence_obj_wait_ioctl()
742 arg->kernel_cookie = jiffies + wait_timeout; in vmw_fence_obj_wait_ioctl()
745 base = vmw_fence_obj_lookup(tfile, arg->handle); in vmw_fence_obj_wait_ioctl()
749 fence = &(container_of(base, struct vmw_user_fence, base)->fence); in vmw_fence_obj_wait_ioctl()
752 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) { in vmw_fence_obj_wait_ioctl()
754 0 : -EBUSY); in vmw_fence_obj_wait_ioctl()
758 timeout = (unsigned long)arg->kernel_cookie - timeout; in vmw_fence_obj_wait_ioctl()
760 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout); in vmw_fence_obj_wait_ioctl()
769 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF)) in vmw_fence_obj_wait_ioctl()
770 return ttm_ref_object_base_unref(tfile, arg->handle); in vmw_fence_obj_wait_ioctl()
782 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; in vmw_fence_obj_signaled_ioctl()
785 base = vmw_fence_obj_lookup(tfile, arg->handle); in vmw_fence_obj_signaled_ioctl()
789 fence = &(container_of(base, struct vmw_user_fence, base)->fence); in vmw_fence_obj_signaled_ioctl()
792 arg->signaled = vmw_fence_obj_signaled(fence); in vmw_fence_obj_signaled_ioctl()
794 arg->signaled_flags = arg->flags; in vmw_fence_obj_signaled_ioctl()
795 spin_lock(&fman->lock); in vmw_fence_obj_signaled_ioctl()
796 arg->passed_seqno = dev_priv->last_read_seqno; in vmw_fence_obj_signaled_ioctl()
797 spin_unlock(&fman->lock); in vmw_fence_obj_signaled_ioctl()
811 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, in vmw_fence_obj_unref_ioctl()
812 arg->handle); in vmw_fence_obj_unref_ioctl()
822 * attached has passed. It queues the event on the submitter's event list.
829 struct drm_device *dev = eaction->dev; in vmw_event_fence_action_seq_passed()
830 struct drm_pending_event *event = eaction->event; in vmw_event_fence_action_seq_passed()
835 spin_lock_irq(&dev->event_lock); in vmw_event_fence_action_seq_passed()
837 if (likely(eaction->tv_sec != NULL)) { in vmw_event_fence_action_seq_passed()
838 struct timespec64 ts; in vmw_event_fence_action_seq_passed() local
840 ktime_get_ts64(&ts); in vmw_event_fence_action_seq_passed()
842 *eaction->tv_sec = ts.tv_sec; in vmw_event_fence_action_seq_passed()
843 *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC; in vmw_event_fence_action_seq_passed()
846 drm_send_event_locked(dev, eaction->event); in vmw_event_fence_action_seq_passed()
847 eaction->event = NULL; in vmw_event_fence_action_seq_passed()
848 spin_unlock_irq(&dev->event_lock); in vmw_event_fence_action_seq_passed()
865 vmw_fence_obj_unreference(&eaction->fence); in vmw_event_fence_action_cleanup()
871 * vmw_fence_obj_add_action - Add an action to a fence object.
885 mutex_lock(&fman->goal_irq_mutex); in vmw_fence_obj_add_action()
886 spin_lock(&fman->lock); in vmw_fence_obj_add_action()
888 fman->pending_actions[action->type]++; in vmw_fence_obj_add_action()
889 if (dma_fence_is_signaled_locked(&fence->base)) { in vmw_fence_obj_add_action()
893 list_add_tail(&action->head, &action_list); in vmw_fence_obj_add_action()
896 list_add_tail(&action->head, &fence->seq_passed_actions); in vmw_fence_obj_add_action()
905 spin_unlock(&fman->lock); in vmw_fence_obj_add_action()
908 if (!fman->goal_irq_on) { in vmw_fence_obj_add_action()
909 fman->goal_irq_on = true; in vmw_fence_obj_add_action()
910 vmw_goal_waiter_add(fman->dev_priv); in vmw_fence_obj_add_action()
914 mutex_unlock(&fman->goal_irq_mutex); in vmw_fence_obj_add_action()
919 * vmw_event_fence_action_queue - Post an event for sending when a fence
926 * @tv_sec: If non-null, the variable pointed to will be assigned
949 return -ENOMEM; in vmw_event_fence_action_queue()
951 eaction->event = event; in vmw_event_fence_action_queue()
953 eaction->action.seq_passed = vmw_event_fence_action_seq_passed; in vmw_event_fence_action_queue()
954 eaction->action.cleanup = vmw_event_fence_action_cleanup; in vmw_event_fence_action_queue()
955 eaction->action.type = VMW_ACTION_EVENT; in vmw_event_fence_action_queue()
957 eaction->fence = vmw_fence_obj_reference(fence); in vmw_event_fence_action_queue()
958 eaction->dev = &fman->dev_priv->drm; in vmw_event_fence_action_queue()
959 eaction->tv_sec = tv_sec; in vmw_event_fence_action_queue()
960 eaction->tv_usec = tv_usec; in vmw_event_fence_action_queue()
962 vmw_fence_obj_add_action(fence, &eaction->action); in vmw_event_fence_action_queue()
980 struct drm_device *dev = &fman->dev_priv->drm; in vmw_event_fence_action_create()
986 ret = -ENOMEM; in vmw_event_fence_action_create()
990 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; in vmw_event_fence_action_create()
991 event->event.base.length = sizeof(event->event); in vmw_event_fence_action_create()
992 event->event.user_data = user_data; in vmw_event_fence_action_create()
994 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base); in vmw_event_fence_action_create()
1004 &event->base, in vmw_event_fence_action_create()
1005 &event->event.tv_sec, in vmw_event_fence_action_create()
1006 &event->event.tv_usec, in vmw_event_fence_action_create()
1010 &event->base, in vmw_event_fence_action_create()
1020 drm_event_cancel_free(dev, &event->base); in vmw_event_fence_action_create()
1033 struct ttm_object_file *tfile = vmw_fp->tfile; in vmw_fence_event_ioctl()
1036 arg->fence_rep; in vmw_fence_event_ioctl()
1042 * and if user-space wants a new reference, in vmw_fence_event_ioctl()
1045 if (arg->handle) { in vmw_fence_event_ioctl()
1047 vmw_fence_obj_lookup(tfile, arg->handle); in vmw_fence_event_ioctl()
1053 base)->fence); in vmw_fence_event_ioctl()
1057 ret = ttm_ref_object_add(vmw_fp->tfile, base, in vmw_fence_event_ioctl()
1064 handle = base->handle; in vmw_fence_event_ioctl()
1086 arg->flags, in vmw_fence_event_ioctl()
1087 arg->user_data, in vmw_fence_event_ioctl()
1090 if (ret != -ERESTARTSYS) in vmw_fence_event_ioctl()
1096 handle, -1); in vmw_fence_event_ioctl()