1  /*
2   * SPDX-License-Identifier: MIT
3   *
4   * Copyright © 2019 Intel Corporation
5   */
6  
7  #ifndef _I915_ACTIVE_H_
8  #define _I915_ACTIVE_H_
9  
10  #include <linux/lockdep.h>
11  
12  #include "i915_active_types.h"
13  #include "i915_request.h"
14  
15  struct i915_request;
16  struct intel_engine_cs;
17  struct intel_timeline;
18  
19  /*
20   * We treat requests as fences. This is not be to confused with our
21   * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
22   * We use the fences to synchronize access from the CPU with activity on the
23   * GPU, for example, we should not rewrite an object's PTE whilst the GPU
24   * is reading them. We also track fences at a higher level to provide
25   * implicit synchronisation around GEM objects, e.g. set-domain will wait
26   * for outstanding GPU rendering before marking the object ready for CPU
27   * access, or a pageflip will wait until the GPU is complete before showing
28   * the frame on the scanout.
29   *
30   * In order to use a fence, the object must track the fence it needs to
31   * serialise with. For example, GEM objects want to track both read and
32   * write access so that we can perform concurrent read operations between
33   * the CPU and GPU engines, as well as waiting for all rendering to
34   * complete, or waiting for the last GPU user of a "fence register". The
35   * object then embeds a #i915_active_fence to track the most recent (in
36   * retirement order) request relevant for the desired mode of access.
37   * The #i915_active_fence is updated with i915_active_fence_set() to
38   * track the most recent fence request, typically this is done as part of
39   * i915_vma_move_to_active().
40   *
41   * When the #i915_active_fence completes (is retired), it will
42   * signal its completion to the owner through a callback as well as mark
43   * itself as idle (i915_active_fence.request == NULL). The owner
44   * can then perform any action, such as delayed freeing of an active
45   * resource including itself.
46   */
47  
48  void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb);
49  
50  /**
51   * __i915_active_fence_init - prepares the activity tracker for use
52   * @active: the active tracker
53   * @fence: initial fence to track, can be NULL
54   * @fn: a callback when then the tracker is retired (becomes idle),
55   *         can be NULL
56   *
57   * i915_active_fence_init() prepares the embedded @active struct for use as
58   * an activity tracker, that is for tracking the last known active fence
59   * associated with it. When the last fence becomes idle, when it is retired
60   * after completion, the optional callback @func is invoked.
61   */
62  static inline void
__i915_active_fence_init(struct i915_active_fence * active,void * fence,dma_fence_func_t fn)63  __i915_active_fence_init(struct i915_active_fence *active,
64  			 void *fence,
65  			 dma_fence_func_t fn)
66  {
67  	RCU_INIT_POINTER(active->fence, fence);
68  	active->cb.func = fn ?: i915_active_noop;
69  }
70  
71  #define INIT_ACTIVE_FENCE(A) \
72  	__i915_active_fence_init((A), NULL, NULL)
73  
74  struct dma_fence *
75  __i915_active_fence_set(struct i915_active_fence *active,
76  			struct dma_fence *fence);
77  
78  /**
79   * i915_active_fence_set - updates the tracker to watch the current fence
80   * @active: the active tracker
81   * @rq: the request to watch
82   *
83   * i915_active_fence_set() watches the given @rq for completion. While
84   * that @rq is busy, the @active reports busy. When that @rq is signaled
85   * (or else retired) the @active tracker is updated to report idle.
86   */
87  int __must_check
88  i915_active_fence_set(struct i915_active_fence *active,
89  		      struct i915_request *rq);
90  /**
91   * i915_active_fence_get - return a reference to the active fence
92   * @active: the active tracker
93   *
94   * i915_active_fence_get() returns a reference to the active fence,
95   * or NULL if the active tracker is idle. The reference is obtained under RCU,
96   * so no locking is required by the caller.
97   *
98   * The reference should be freed with dma_fence_put().
99   */
100  static inline struct dma_fence *
i915_active_fence_get(struct i915_active_fence * active)101  i915_active_fence_get(struct i915_active_fence *active)
102  {
103  	struct dma_fence *fence;
104  
105  	rcu_read_lock();
106  	fence = dma_fence_get_rcu_safe(&active->fence);
107  	rcu_read_unlock();
108  
109  	return fence;
110  }
111  
112  /**
113   * i915_active_fence_isset - report whether the active tracker is assigned
114   * @active: the active tracker
115   *
116   * i915_active_fence_isset() returns true if the active tracker is currently
117   * assigned to a fence. Due to the lazy retiring, that fence may be idle
118   * and this may report stale information.
119   */
120  static inline bool
i915_active_fence_isset(const struct i915_active_fence * active)121  i915_active_fence_isset(const struct i915_active_fence *active)
122  {
123  	return rcu_access_pointer(active->fence);
124  }
125  
126  /*
127   * GPU activity tracking
128   *
129   * Each set of commands submitted to the GPU compromises a single request that
130   * signals a fence upon completion. struct i915_request combines the
131   * command submission, scheduling and fence signaling roles. If we want to see
132   * if a particular task is complete, we need to grab the fence (struct
133   * i915_request) for that task and check or wait for it to be signaled. More
134   * often though we want to track the status of a bunch of tasks, for example
135   * to wait for the GPU to finish accessing some memory across a variety of
136   * different command pipelines from different clients. We could choose to
137   * track every single request associated with the task, but knowing that
138   * each request belongs to an ordered timeline (later requests within a
139   * timeline must wait for earlier requests), we need only track the
140   * latest request in each timeline to determine the overall status of the
141   * task.
142   *
143   * struct i915_active provides this tracking across timelines. It builds a
144   * composite shared-fence, and is updated as new work is submitted to the task,
145   * forming a snapshot of the current status. It should be embedded into the
146   * different resources that need to track their associated GPU activity to
147   * provide a callback when that GPU activity has ceased, or otherwise to
148   * provide a serialisation point either for request submission or for CPU
149   * synchronisation.
150   */
151  
152  void __i915_active_init(struct i915_active *ref,
153  			int (*active)(struct i915_active *ref),
154  			void (*retire)(struct i915_active *ref),
155  			unsigned long flags,
156  			struct lock_class_key *mkey,
157  			struct lock_class_key *wkey);
158  
159  /* Specialise each class of i915_active to avoid impossible lockdep cycles. */
160  #define i915_active_init(ref, active, retire, flags) do {			\
161  	static struct lock_class_key __mkey;					\
162  	static struct lock_class_key __wkey;					\
163  										\
164  	__i915_active_init(ref, active, retire, flags, &__mkey, &__wkey);	\
165  } while (0)
166  
167  int i915_active_add_request(struct i915_active *ref, struct i915_request *rq);
168  
169  struct dma_fence *
170  i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
171  
172  int __i915_active_wait(struct i915_active *ref, int state);
i915_active_wait(struct i915_active * ref)173  static inline int i915_active_wait(struct i915_active *ref)
174  {
175  	return __i915_active_wait(ref, TASK_INTERRUPTIBLE);
176  }
177  
178  int i915_sw_fence_await_active(struct i915_sw_fence *fence,
179  			       struct i915_active *ref,
180  			       unsigned int flags);
181  int i915_request_await_active(struct i915_request *rq,
182  			      struct i915_active *ref,
183  			      unsigned int flags);
184  #define I915_ACTIVE_AWAIT_EXCL BIT(0)
185  #define I915_ACTIVE_AWAIT_ACTIVE BIT(1)
186  #define I915_ACTIVE_AWAIT_BARRIER BIT(2)
187  
188  int i915_active_acquire(struct i915_active *ref);
189  int i915_active_acquire_for_context(struct i915_active *ref, u64 idx);
190  bool i915_active_acquire_if_busy(struct i915_active *ref);
191  
192  void i915_active_release(struct i915_active *ref);
193  
__i915_active_acquire(struct i915_active * ref)194  static inline void __i915_active_acquire(struct i915_active *ref)
195  {
196  	GEM_BUG_ON(!atomic_read(&ref->count));
197  	atomic_inc(&ref->count);
198  }
199  
200  static inline bool
i915_active_is_idle(const struct i915_active * ref)201  i915_active_is_idle(const struct i915_active *ref)
202  {
203  	return !atomic_read(&ref->count);
204  }
205  
206  void i915_active_fini(struct i915_active *ref);
207  
208  int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
209  					    struct intel_engine_cs *engine);
210  void i915_active_acquire_barrier(struct i915_active *ref);
211  void i915_request_add_active_barriers(struct i915_request *rq);
212  
213  void i915_active_print(struct i915_active *ref, struct drm_printer *m);
214  void i915_active_unlock_wait(struct i915_active *ref);
215  
216  struct i915_active *i915_active_create(void);
217  struct i915_active *i915_active_get(struct i915_active *ref);
218  void i915_active_put(struct i915_active *ref);
219  
__i915_request_await_exclusive(struct i915_request * rq,struct i915_active * active)220  static inline int __i915_request_await_exclusive(struct i915_request *rq,
221  						 struct i915_active *active)
222  {
223  	struct dma_fence *fence;
224  	int err = 0;
225  
226  	fence = i915_active_fence_get(&active->excl);
227  	if (fence) {
228  		err = i915_request_await_dma_fence(rq, fence);
229  		dma_fence_put(fence);
230  	}
231  
232  	return err;
233  }
234  
235  void i915_active_module_exit(void);
236  int i915_active_module_init(void);
237  
238  #endif /* _I915_ACTIVE_H_ */
239