1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include <linux/kref.h>
8 #include <linux/string_helpers.h>
9 
10 #include "gem/i915_gem_pm.h"
11 #include "gt/intel_gt.h"
12 
13 #include "i915_selftest.h"
14 
15 #include "igt_flush_test.h"
16 #include "lib_sw_fence.h"
17 
18 struct live_active {
19 	struct i915_active base;
20 	struct kref ref;
21 	bool retired;
22 };
23 
__live_get(struct live_active * active)24 static void __live_get(struct live_active *active)
25 {
26 	kref_get(&active->ref);
27 }
28 
__live_free(struct live_active * active)29 static void __live_free(struct live_active *active)
30 {
31 	i915_active_fini(&active->base);
32 	kfree(active);
33 }
34 
__live_release(struct kref * ref)35 static void __live_release(struct kref *ref)
36 {
37 	struct live_active *active = container_of(ref, typeof(*active), ref);
38 
39 	__live_free(active);
40 }
41 
__live_put(struct live_active * active)42 static void __live_put(struct live_active *active)
43 {
44 	kref_put(&active->ref, __live_release);
45 }
46 
__live_active(struct i915_active * base)47 static int __live_active(struct i915_active *base)
48 {
49 	struct live_active *active = container_of(base, typeof(*active), base);
50 
51 	__live_get(active);
52 	return 0;
53 }
54 
__live_retire(struct i915_active * base)55 static void __live_retire(struct i915_active *base)
56 {
57 	struct live_active *active = container_of(base, typeof(*active), base);
58 
59 	active->retired = true;
60 	__live_put(active);
61 }
62 
__live_alloc(struct drm_i915_private * i915)63 static struct live_active *__live_alloc(struct drm_i915_private *i915)
64 {
65 	struct live_active *active;
66 
67 	active = kzalloc(sizeof(*active), GFP_KERNEL);
68 	if (!active)
69 		return NULL;
70 
71 	kref_init(&active->ref);
72 	i915_active_init(&active->base, __live_active, __live_retire, 0);
73 
74 	return active;
75 }
76 
77 static struct live_active *
__live_active_setup(struct drm_i915_private * i915)78 __live_active_setup(struct drm_i915_private *i915)
79 {
80 	struct intel_engine_cs *engine;
81 	struct i915_sw_fence *submit;
82 	struct live_active *active;
83 	unsigned int count = 0;
84 	int err = 0;
85 
86 	active = __live_alloc(i915);
87 	if (!active)
88 		return ERR_PTR(-ENOMEM);
89 
90 	submit = heap_fence_create(GFP_KERNEL);
91 	if (!submit) {
92 		kfree(active);
93 		return ERR_PTR(-ENOMEM);
94 	}
95 
96 	err = i915_active_acquire(&active->base);
97 	if (err)
98 		goto out;
99 
100 	for_each_uabi_engine(engine, i915) {
101 		struct i915_request *rq;
102 
103 		rq = intel_engine_create_kernel_request(engine);
104 		if (IS_ERR(rq)) {
105 			err = PTR_ERR(rq);
106 			break;
107 		}
108 
109 		err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
110 						       submit,
111 						       GFP_KERNEL);
112 		if (err >= 0)
113 			err = i915_active_add_request(&active->base, rq);
114 		i915_request_add(rq);
115 		if (err) {
116 			pr_err("Failed to track active ref!\n");
117 			break;
118 		}
119 
120 		count++;
121 	}
122 
123 	i915_active_release(&active->base);
124 	if (READ_ONCE(active->retired) && count) {
125 		pr_err("i915_active retired before submission!\n");
126 		err = -EINVAL;
127 	}
128 	if (atomic_read(&active->base.count) != count) {
129 		pr_err("i915_active not tracking all requests, found %d, expected %d\n",
130 		       atomic_read(&active->base.count), count);
131 		err = -EINVAL;
132 	}
133 
134 out:
135 	i915_sw_fence_commit(submit);
136 	heap_fence_put(submit);
137 	if (err) {
138 		__live_put(active);
139 		active = ERR_PTR(err);
140 	}
141 
142 	return active;
143 }
144 
live_active_wait(void * arg)145 static int live_active_wait(void *arg)
146 {
147 	struct drm_i915_private *i915 = arg;
148 	struct live_active *active;
149 	int err = 0;
150 
151 	/* Check that we get a callback when requests retire upon waiting */
152 
153 	active = __live_active_setup(i915);
154 	if (IS_ERR(active))
155 		return PTR_ERR(active);
156 
157 	__i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
158 	if (!READ_ONCE(active->retired)) {
159 		struct drm_printer p = drm_err_printer(&i915->drm, __func__);
160 
161 		drm_printf(&p, "i915_active not retired after waiting!\n");
162 		i915_active_print(&active->base, &p);
163 
164 		err = -EINVAL;
165 	}
166 
167 	__live_put(active);
168 
169 	if (igt_flush_test(i915))
170 		err = -EIO;
171 
172 	return err;
173 }
174 
live_active_retire(void * arg)175 static int live_active_retire(void *arg)
176 {
177 	struct drm_i915_private *i915 = arg;
178 	struct live_active *active;
179 	int err = 0;
180 
181 	/* Check that we get a callback when requests are indirectly retired */
182 
183 	active = __live_active_setup(i915);
184 	if (IS_ERR(active))
185 		return PTR_ERR(active);
186 
187 	/* waits for & retires all requests */
188 	if (igt_flush_test(i915))
189 		err = -EIO;
190 
191 	if (!READ_ONCE(active->retired)) {
192 		struct drm_printer p = drm_err_printer(&i915->drm, __func__);
193 
194 		drm_printf(&p, "i915_active not retired after flushing!\n");
195 		i915_active_print(&active->base, &p);
196 
197 		err = -EINVAL;
198 	}
199 
200 	__live_put(active);
201 
202 	return err;
203 }
204 
live_active_barrier(void * arg)205 static int live_active_barrier(void *arg)
206 {
207 	struct drm_i915_private *i915 = arg;
208 	struct intel_engine_cs *engine;
209 	struct live_active *active;
210 	int err = 0;
211 
212 	/* Check that we get a callback when requests retire upon waiting */
213 
214 	active = __live_alloc(i915);
215 	if (!active)
216 		return -ENOMEM;
217 
218 	err = i915_active_acquire(&active->base);
219 	if (err)
220 		goto out;
221 
222 	for_each_uabi_engine(engine, i915) {
223 		err = i915_active_acquire_preallocate_barrier(&active->base,
224 							      engine);
225 		if (err)
226 			break;
227 
228 		i915_active_acquire_barrier(&active->base);
229 	}
230 
231 	i915_active_release(&active->base);
232 	if (err)
233 		goto out;
234 
235 	__i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
236 	if (!READ_ONCE(active->retired)) {
237 		pr_err("i915_active not retired after flushing barriers!\n");
238 		err = -EINVAL;
239 	}
240 
241 out:
242 	__live_put(active);
243 
244 	if (igt_flush_test(i915))
245 		err = -EIO;
246 
247 	return err;
248 }
249 
i915_active_live_selftests(struct drm_i915_private * i915)250 int i915_active_live_selftests(struct drm_i915_private *i915)
251 {
252 	static const struct i915_subtest tests[] = {
253 		SUBTEST(live_active_wait),
254 		SUBTEST(live_active_retire),
255 		SUBTEST(live_active_barrier),
256 	};
257 
258 	if (intel_gt_is_wedged(to_gt(i915)))
259 		return 0;
260 
261 	return i915_subtests(tests, i915);
262 }
263 
node_to_barrier(struct active_node * it)264 static struct intel_engine_cs *node_to_barrier(struct active_node *it)
265 {
266 	struct intel_engine_cs *engine;
267 
268 	if (!is_barrier(&it->base))
269 		return NULL;
270 
271 	engine = __barrier_to_engine(it);
272 	smp_rmb(); /* serialise with add_active_barriers */
273 	if (!is_barrier(&it->base))
274 		return NULL;
275 
276 	return engine;
277 }
278 
i915_active_print(struct i915_active * ref,struct drm_printer * m)279 void i915_active_print(struct i915_active *ref, struct drm_printer *m)
280 {
281 	drm_printf(m, "active %ps:%ps\n", ref->active, ref->retire);
282 	drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
283 	drm_printf(m, "\tpreallocated barriers? %s\n",
284 		   str_yes_no(!llist_empty(&ref->preallocated_barriers)));
285 
286 	if (i915_active_acquire_if_busy(ref)) {
287 		struct active_node *it, *n;
288 
289 		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
290 			struct intel_engine_cs *engine;
291 
292 			engine = node_to_barrier(it);
293 			if (engine) {
294 				drm_printf(m, "\tbarrier: %s\n", engine->name);
295 				continue;
296 			}
297 
298 			if (i915_active_fence_isset(&it->base)) {
299 				drm_printf(m,
300 					   "\ttimeline: %llx\n", it->timeline);
301 				continue;
302 			}
303 		}
304 
305 		i915_active_release(ref);
306 	}
307 }
308 
spin_unlock_wait(spinlock_t * lock)309 static void spin_unlock_wait(spinlock_t *lock)
310 {
311 	spin_lock_irq(lock);
312 	spin_unlock_irq(lock);
313 }
314 
active_flush(struct i915_active * ref,struct i915_active_fence * active)315 static void active_flush(struct i915_active *ref,
316 			 struct i915_active_fence *active)
317 {
318 	struct dma_fence *fence;
319 
320 	fence = xchg(__active_fence_slot(active), NULL);
321 	if (!fence)
322 		return;
323 
324 	spin_lock_irq(fence->lock);
325 	__list_del_entry(&active->cb.node);
326 	spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
327 	atomic_dec(&ref->count);
328 
329 	GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
330 }
331 
i915_active_unlock_wait(struct i915_active * ref)332 void i915_active_unlock_wait(struct i915_active *ref)
333 {
334 	if (i915_active_acquire_if_busy(ref)) {
335 		struct active_node *it, *n;
336 
337 		/* Wait for all active callbacks */
338 		rcu_read_lock();
339 		active_flush(ref, &ref->excl);
340 		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
341 			active_flush(ref, &it->base);
342 		rcu_read_unlock();
343 
344 		i915_active_release(ref);
345 	}
346 
347 	/* And wait for the retire callback */
348 	spin_unlock_wait(&ref->tree_lock);
349 
350 	/* ... which may have been on a thread instead */
351 	flush_work(&ref->work);
352 }
353