Lines Matching full:active
18 * Active refs memory management
21 * they idle (when we know the active requests are inactive) and allocate the
36 node_from_active(struct i915_active_fence *active) in node_from_active() argument
38 return container_of(active, struct active_node, base); in node_from_active()
43 static inline bool is_barrier(const struct i915_active_fence *active) in is_barrier() argument
45 return IS_ERR(rcu_access_pointer(active->fence)); in is_barrier()
79 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref; in active_debug_hint()
204 __active_fence_slot(struct i915_active_fence *active) in __active_fence_slot() argument
206 return (struct dma_fence ** __force)&active->fence; in __active_fence_slot()
212 struct i915_active_fence *active = in active_fence_cb() local
213 container_of(cb, typeof(*active), cb); in active_fence_cb()
215 return cmpxchg(__active_fence_slot(active), fence, NULL) == fence; in active_fence_cb()
269 /* While active, the tree can only be built; not destroyed */ in __active_lookup()
339 int (*active)(struct i915_active *ref), in __i915_active_init()
348 ref->active = active; in __i915_active_init()
414 replace_barrier(struct i915_active *ref, struct i915_active_fence *active) in replace_barrier() argument
416 if (!is_barrier(active)) /* proto-node used by our idle barrier? */ in replace_barrier()
424 return __active_del_barrier(ref, node_from_active(active)); in replace_barrier()
431 struct i915_active_fence *active; in i915_active_add_request() local
440 active = active_instance(ref, idx); in i915_active_add_request()
441 if (!active) { in i915_active_add_request()
446 if (replace_barrier(ref, active)) { in i915_active_add_request()
447 RCU_INIT_POINTER(active->fence, NULL); in i915_active_add_request()
450 } while (unlikely(is_barrier(active))); in i915_active_add_request()
452 fence = __i915_active_fence_set(active, fence); in i915_active_add_request()
465 struct i915_active_fence *active, in __i915_active_set_fence() argument
470 if (replace_barrier(ref, active)) { in __i915_active_set_fence()
471 RCU_INIT_POINTER(active->fence, fence); in __i915_active_set_fence()
475 prev = __i915_active_fence_set(active, fence); in __i915_active_set_fence()
510 if (!ref->active) { in i915_active_acquire()
520 err = ref->active(ref); in i915_active_acquire()
532 struct i915_active_fence *active; in i915_active_acquire_for_context() local
539 active = active_instance(ref, idx); in i915_active_acquire_for_context()
540 if (!active) { in i915_active_acquire_for_context()
545 return 0; /* return with active ref */ in i915_active_acquire_for_context()
554 static void enable_signaling(struct i915_active_fence *active) in enable_signaling() argument
558 if (unlikely(is_barrier(active))) in enable_signaling()
561 fence = i915_active_fence_get(active); in enable_signaling()
620 * After the wait is complete, the caller may free the active. in __i915_active_wait()
627 static int __await_active(struct i915_active_fence *active, in __await_active() argument
633 if (is_barrier(active)) /* XXX flush the barrier? */ in __await_active()
636 fence = i915_active_fence_get(active); in __await_active()
785 * i915_active, due to overlapping active phases there is likely a in reuse_idle_barrier()
1015 * __i915_active_fence_set: Update the last active fence along its timeline
1016 * @active: the active tracker
1019 * Records the new @fence as the last active fence along its timeline in
1020 * this active tracker, moving the tracking callbacks from the previous
1028 __i915_active_fence_set(struct i915_active_fence *active, in __i915_active_fence_set() argument
1039 * while tracked under a different active tracker. Combined with i915 in __i915_active_fence_set()
1044 * As a countermeasure, we try to get a reference to the active->fence in __i915_active_fence_set()
1049 prev = i915_active_fence_get(active); in __i915_active_fence_set()
1057 * C already resident as the active->fence. in __i915_active_fence_set()
1076 * active->fence. Meanwhile, B follows the same path as A. in __i915_active_fence_set()
1078 * active->fence, locks it as soon as A completes, and possibly in __i915_active_fence_set()
1081 while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) { in __i915_active_fence_set()
1088 prev = i915_active_fence_get(active); in __i915_active_fence_set()
1109 __list_del_entry(&active->cb.node); in __i915_active_fence_set()
1112 list_add_tail(&active->cb.node, &fence->cb_list); in __i915_active_fence_set()
1118 int i915_active_fence_set(struct i915_active_fence *active, in i915_active_fence_set() argument
1124 /* Must maintain timeline ordering wrt previous active requests */ in i915_active_fence_set()
1125 fence = __i915_active_fence_set(active, &rq->fence); in i915_active_fence_set()