1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in a entity are always scheduled in the order that they were pushed.
45  *
46  * Note that once a job was taken from the entities queue and pushed to the
47  * hardware, i.e. the pending queue, the entity must not be referenced anymore
48  * through the jobs entity pointer.
49  */
50 
51 /**
52  * DOC: Flow Control
53  *
54  * The DRM GPU scheduler provides a flow control mechanism to regulate the rate
55  * in which the jobs fetched from scheduler entities are executed.
56  *
57  * In this context the &drm_gpu_scheduler keeps track of a driver specified
58  * credit limit representing the capacity of this scheduler and a credit count;
59  * every &drm_sched_job carries a driver specified number of credits.
60  *
61  * Once a job is executed (but not yet finished), the job's credits contribute
62  * to the scheduler's credit count until the job is finished. If by executing
63  * one more job the scheduler's credit count would exceed the scheduler's
64  * credit limit, the job won't be executed. Instead, the scheduler will wait
65  * until the credit count has decreased enough to not overflow its credit limit.
66  * This implies waiting for previously executed jobs.
67  *
68  * Optionally, drivers may register a callback (update_job_credits) provided by
69  * struct drm_sched_backend_ops to update the job's credits dynamically. The
70  * scheduler executes this callback every time the scheduler considers a job for
71  * execution and subsequently checks whether the job fits the scheduler's credit
72  * limit.
73  */
74 
75 #include <linux/wait.h>
76 #include <linux/sched.h>
77 #include <linux/completion.h>
78 #include <linux/dma-resv.h>
79 #include <uapi/linux/sched/types.h>
80 
81 #include <drm/drm_print.h>
82 #include <drm/drm_gem.h>
83 #include <drm/drm_syncobj.h>
84 #include <drm/gpu_scheduler.h>
85 #include <drm/spsc_queue.h>
86 
87 #define CREATE_TRACE_POINTS
88 #include "gpu_scheduler_trace.h"
89 
90 #ifdef CONFIG_LOCKDEP
91 static struct lockdep_map drm_sched_lockdep_map = {
92 	.name = "drm_sched_lockdep_map"
93 };
94 #endif
95 
96 #define to_drm_sched_job(sched_job)		\
97 		container_of((sched_job), struct drm_sched_job, queue_node)
98 
99 int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
100 
101 /**
102  * DOC: sched_policy (int)
103  * Used to override default entities scheduling policy in a run queue.
104  */
105 MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
106 module_param_named(sched_policy, drm_sched_policy, int, 0444);
107 
drm_sched_available_credits(struct drm_gpu_scheduler * sched)108 static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
109 {
110 	u32 credits;
111 
112 	drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit,
113 					      atomic_read(&sched->credit_count),
114 					      &credits));
115 
116 	return credits;
117 }
118 
119 /**
120  * drm_sched_can_queue -- Can we queue more to the hardware?
121  * @sched: scheduler instance
122  * @entity: the scheduler entity
123  *
124  * Return true if we can push at least one more job from @entity, false
125  * otherwise.
126  */
drm_sched_can_queue(struct drm_gpu_scheduler * sched,struct drm_sched_entity * entity)127 static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
128 				struct drm_sched_entity *entity)
129 {
130 	struct drm_sched_job *s_job;
131 
132 	s_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
133 	if (!s_job)
134 		return false;
135 
136 	if (sched->ops->update_job_credits) {
137 		s_job->credits = sched->ops->update_job_credits(s_job);
138 
139 		drm_WARN(sched, !s_job->credits,
140 			 "Jobs with zero credits bypass job-flow control.\n");
141 	}
142 
143 	/* If a job exceeds the credit limit, truncate it to the credit limit
144 	 * itself to guarantee forward progress.
145 	 */
146 	if (drm_WARN(sched, s_job->credits > sched->credit_limit,
147 		     "Jobs may not exceed the credit limit, truncate.\n"))
148 		s_job->credits = sched->credit_limit;
149 
150 	return drm_sched_available_credits(sched) >= s_job->credits;
151 }
152 
drm_sched_entity_compare_before(struct rb_node * a,const struct rb_node * b)153 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
154 							    const struct rb_node *b)
155 {
156 	struct drm_sched_entity *ent_a =  rb_entry((a), struct drm_sched_entity, rb_tree_node);
157 	struct drm_sched_entity *ent_b =  rb_entry((b), struct drm_sched_entity, rb_tree_node);
158 
159 	return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
160 }
161 
drm_sched_rq_remove_fifo_locked(struct drm_sched_entity * entity)162 static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
163 {
164 	struct drm_sched_rq *rq = entity->rq;
165 
166 	if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
167 		rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
168 		RB_CLEAR_NODE(&entity->rb_tree_node);
169 	}
170 }
171 
drm_sched_rq_update_fifo(struct drm_sched_entity * entity,ktime_t ts)172 void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
173 {
174 	/*
175 	 * Both locks need to be grabbed, one to protect from entity->rq change
176 	 * for entity from within concurrent drm_sched_entity_select_rq and the
177 	 * other to update the rb tree structure.
178 	 */
179 	spin_lock(&entity->rq_lock);
180 	spin_lock(&entity->rq->lock);
181 
182 	drm_sched_rq_remove_fifo_locked(entity);
183 
184 	entity->oldest_job_waiting = ts;
185 
186 	rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
187 		      drm_sched_entity_compare_before);
188 
189 	spin_unlock(&entity->rq->lock);
190 	spin_unlock(&entity->rq_lock);
191 }
192 
193 /**
194  * drm_sched_rq_init - initialize a given run queue struct
195  *
196  * @sched: scheduler instance to associate with this run queue
197  * @rq: scheduler run queue
198  *
199  * Initializes a scheduler runqueue.
200  */
drm_sched_rq_init(struct drm_gpu_scheduler * sched,struct drm_sched_rq * rq)201 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
202 			      struct drm_sched_rq *rq)
203 {
204 	spin_lock_init(&rq->lock);
205 	INIT_LIST_HEAD(&rq->entities);
206 	rq->rb_tree_root = RB_ROOT_CACHED;
207 	rq->current_entity = NULL;
208 	rq->sched = sched;
209 }
210 
211 /**
212  * drm_sched_rq_add_entity - add an entity
213  *
214  * @rq: scheduler run queue
215  * @entity: scheduler entity
216  *
217  * Adds a scheduler entity to the run queue.
218  */
drm_sched_rq_add_entity(struct drm_sched_rq * rq,struct drm_sched_entity * entity)219 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
220 			     struct drm_sched_entity *entity)
221 {
222 	if (!list_empty(&entity->list))
223 		return;
224 
225 	spin_lock(&rq->lock);
226 
227 	atomic_inc(rq->sched->score);
228 	list_add_tail(&entity->list, &rq->entities);
229 
230 	spin_unlock(&rq->lock);
231 }
232 
233 /**
234  * drm_sched_rq_remove_entity - remove an entity
235  *
236  * @rq: scheduler run queue
237  * @entity: scheduler entity
238  *
239  * Removes a scheduler entity from the run queue.
240  */
drm_sched_rq_remove_entity(struct drm_sched_rq * rq,struct drm_sched_entity * entity)241 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
242 				struct drm_sched_entity *entity)
243 {
244 	if (list_empty(&entity->list))
245 		return;
246 
247 	spin_lock(&rq->lock);
248 
249 	atomic_dec(rq->sched->score);
250 	list_del_init(&entity->list);
251 
252 	if (rq->current_entity == entity)
253 		rq->current_entity = NULL;
254 
255 	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
256 		drm_sched_rq_remove_fifo_locked(entity);
257 
258 	spin_unlock(&rq->lock);
259 }
260 
261 /**
262  * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
263  *
264  * @sched: the gpu scheduler
265  * @rq: scheduler run queue to check.
266  *
267  * Try to find the next ready entity.
268  *
269  * Return an entity if one is found; return an error-pointer (!NULL) if an
270  * entity was ready, but the scheduler had insufficient credits to accommodate
271  * its job; return NULL, if no ready entity was found.
272  */
273 static struct drm_sched_entity *
drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler * sched,struct drm_sched_rq * rq)274 drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
275 			      struct drm_sched_rq *rq)
276 {
277 	struct drm_sched_entity *entity;
278 
279 	spin_lock(&rq->lock);
280 
281 	entity = rq->current_entity;
282 	if (entity) {
283 		list_for_each_entry_continue(entity, &rq->entities, list) {
284 			if (drm_sched_entity_is_ready(entity)) {
285 				/* If we can't queue yet, preserve the current
286 				 * entity in terms of fairness.
287 				 */
288 				if (!drm_sched_can_queue(sched, entity)) {
289 					spin_unlock(&rq->lock);
290 					return ERR_PTR(-ENOSPC);
291 				}
292 
293 				rq->current_entity = entity;
294 				reinit_completion(&entity->entity_idle);
295 				spin_unlock(&rq->lock);
296 				return entity;
297 			}
298 		}
299 	}
300 
301 	list_for_each_entry(entity, &rq->entities, list) {
302 		if (drm_sched_entity_is_ready(entity)) {
303 			/* If we can't queue yet, preserve the current entity in
304 			 * terms of fairness.
305 			 */
306 			if (!drm_sched_can_queue(sched, entity)) {
307 				spin_unlock(&rq->lock);
308 				return ERR_PTR(-ENOSPC);
309 			}
310 
311 			rq->current_entity = entity;
312 			reinit_completion(&entity->entity_idle);
313 			spin_unlock(&rq->lock);
314 			return entity;
315 		}
316 
317 		if (entity == rq->current_entity)
318 			break;
319 	}
320 
321 	spin_unlock(&rq->lock);
322 
323 	return NULL;
324 }
325 
326 /**
327  * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
328  *
329  * @sched: the gpu scheduler
330  * @rq: scheduler run queue to check.
331  *
332  * Find oldest waiting ready entity.
333  *
334  * Return an entity if one is found; return an error-pointer (!NULL) if an
335  * entity was ready, but the scheduler had insufficient credits to accommodate
336  * its job; return NULL, if no ready entity was found.
337  */
338 static struct drm_sched_entity *
drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler * sched,struct drm_sched_rq * rq)339 drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
340 				struct drm_sched_rq *rq)
341 {
342 	struct rb_node *rb;
343 
344 	spin_lock(&rq->lock);
345 	for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
346 		struct drm_sched_entity *entity;
347 
348 		entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
349 		if (drm_sched_entity_is_ready(entity)) {
350 			/* If we can't queue yet, preserve the current entity in
351 			 * terms of fairness.
352 			 */
353 			if (!drm_sched_can_queue(sched, entity)) {
354 				spin_unlock(&rq->lock);
355 				return ERR_PTR(-ENOSPC);
356 			}
357 
358 			rq->current_entity = entity;
359 			reinit_completion(&entity->entity_idle);
360 			break;
361 		}
362 	}
363 	spin_unlock(&rq->lock);
364 
365 	return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
366 }
367 
368 /**
369  * drm_sched_run_job_queue - enqueue run-job work
370  * @sched: scheduler instance
371  */
drm_sched_run_job_queue(struct drm_gpu_scheduler * sched)372 static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
373 {
374 	if (!READ_ONCE(sched->pause_submit))
375 		queue_work(sched->submit_wq, &sched->work_run_job);
376 }
377 
378 /**
379  * __drm_sched_run_free_queue - enqueue free-job work
380  * @sched: scheduler instance
381  */
__drm_sched_run_free_queue(struct drm_gpu_scheduler * sched)382 static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
383 {
384 	if (!READ_ONCE(sched->pause_submit))
385 		queue_work(sched->submit_wq, &sched->work_free_job);
386 }
387 
388 /**
389  * drm_sched_run_free_queue - enqueue free-job work if ready
390  * @sched: scheduler instance
391  */
drm_sched_run_free_queue(struct drm_gpu_scheduler * sched)392 static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
393 {
394 	struct drm_sched_job *job;
395 
396 	spin_lock(&sched->job_list_lock);
397 	job = list_first_entry_or_null(&sched->pending_list,
398 				       struct drm_sched_job, list);
399 	if (job && dma_fence_is_signaled(&job->s_fence->finished))
400 		__drm_sched_run_free_queue(sched);
401 	spin_unlock(&sched->job_list_lock);
402 }
403 
404 /**
405  * drm_sched_job_done - complete a job
406  * @s_job: pointer to the job which is done
407  *
408  * Finish the job's fence and wake up the worker thread.
409  */
drm_sched_job_done(struct drm_sched_job * s_job,int result)410 static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
411 {
412 	struct drm_sched_fence *s_fence = s_job->s_fence;
413 	struct drm_gpu_scheduler *sched = s_fence->sched;
414 
415 	atomic_sub(s_job->credits, &sched->credit_count);
416 	atomic_dec(sched->score);
417 
418 	trace_drm_sched_process_job(s_fence);
419 
420 	dma_fence_get(&s_fence->finished);
421 	drm_sched_fence_finished(s_fence, result);
422 	dma_fence_put(&s_fence->finished);
423 	__drm_sched_run_free_queue(sched);
424 }
425 
426 /**
427  * drm_sched_job_done_cb - the callback for a done job
428  * @f: fence
429  * @cb: fence callbacks
430  */
drm_sched_job_done_cb(struct dma_fence * f,struct dma_fence_cb * cb)431 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
432 {
433 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
434 
435 	drm_sched_job_done(s_job, f->error);
436 }
437 
438 /**
439  * drm_sched_start_timeout - start timeout for reset worker
440  *
441  * @sched: scheduler instance to start the worker for
442  *
443  * Start the timeout for the given scheduler.
444  */
drm_sched_start_timeout(struct drm_gpu_scheduler * sched)445 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
446 {
447 	lockdep_assert_held(&sched->job_list_lock);
448 
449 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
450 	    !list_empty(&sched->pending_list))
451 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
452 }
453 
drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler * sched)454 static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
455 {
456 	spin_lock(&sched->job_list_lock);
457 	drm_sched_start_timeout(sched);
458 	spin_unlock(&sched->job_list_lock);
459 }
460 
461 /**
462  * drm_sched_tdr_queue_imm: - immediately start job timeout handler
463  *
464  * @sched: scheduler for which the timeout handling should be started.
465  *
466  * Start timeout handling immediately for the named scheduler.
467  */
drm_sched_tdr_queue_imm(struct drm_gpu_scheduler * sched)468 void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched)
469 {
470 	spin_lock(&sched->job_list_lock);
471 	sched->timeout = 0;
472 	drm_sched_start_timeout(sched);
473 	spin_unlock(&sched->job_list_lock);
474 }
475 EXPORT_SYMBOL(drm_sched_tdr_queue_imm);
476 
477 /**
478  * drm_sched_fault - immediately start timeout handler
479  *
480  * @sched: scheduler where the timeout handling should be started.
481  *
482  * Start timeout handling immediately when the driver detects a hardware fault.
483  */
drm_sched_fault(struct drm_gpu_scheduler * sched)484 void drm_sched_fault(struct drm_gpu_scheduler *sched)
485 {
486 	if (sched->timeout_wq)
487 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
488 }
489 EXPORT_SYMBOL(drm_sched_fault);
490 
491 /**
492  * drm_sched_suspend_timeout - Suspend scheduler job timeout
493  *
494  * @sched: scheduler instance for which to suspend the timeout
495  *
496  * Suspend the delayed work timeout for the scheduler. This is done by
497  * modifying the delayed work timeout to an arbitrary large value,
498  * MAX_SCHEDULE_TIMEOUT in this case.
499  *
500  * Returns the timeout remaining
501  *
502  */
drm_sched_suspend_timeout(struct drm_gpu_scheduler * sched)503 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
504 {
505 	unsigned long sched_timeout, now = jiffies;
506 
507 	sched_timeout = sched->work_tdr.timer.expires;
508 
509 	/*
510 	 * Modify the timeout to an arbitrarily large value. This also prevents
511 	 * the timeout to be restarted when new submissions arrive
512 	 */
513 	if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
514 			&& time_after(sched_timeout, now))
515 		return sched_timeout - now;
516 	else
517 		return sched->timeout;
518 }
519 EXPORT_SYMBOL(drm_sched_suspend_timeout);
520 
521 /**
522  * drm_sched_resume_timeout - Resume scheduler job timeout
523  *
524  * @sched: scheduler instance for which to resume the timeout
525  * @remaining: remaining timeout
526  *
527  * Resume the delayed work timeout for the scheduler.
528  */
drm_sched_resume_timeout(struct drm_gpu_scheduler * sched,unsigned long remaining)529 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
530 		unsigned long remaining)
531 {
532 	spin_lock(&sched->job_list_lock);
533 
534 	if (list_empty(&sched->pending_list))
535 		cancel_delayed_work(&sched->work_tdr);
536 	else
537 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
538 
539 	spin_unlock(&sched->job_list_lock);
540 }
541 EXPORT_SYMBOL(drm_sched_resume_timeout);
542 
drm_sched_job_begin(struct drm_sched_job * s_job)543 static void drm_sched_job_begin(struct drm_sched_job *s_job)
544 {
545 	struct drm_gpu_scheduler *sched = s_job->sched;
546 
547 	spin_lock(&sched->job_list_lock);
548 	list_add_tail(&s_job->list, &sched->pending_list);
549 	drm_sched_start_timeout(sched);
550 	spin_unlock(&sched->job_list_lock);
551 }
552 
drm_sched_job_timedout(struct work_struct * work)553 static void drm_sched_job_timedout(struct work_struct *work)
554 {
555 	struct drm_gpu_scheduler *sched;
556 	struct drm_sched_job *job;
557 	enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
558 
559 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
560 
561 	/* Protects against concurrent deletion in drm_sched_get_finished_job */
562 	spin_lock(&sched->job_list_lock);
563 	job = list_first_entry_or_null(&sched->pending_list,
564 				       struct drm_sched_job, list);
565 
566 	if (job) {
567 		/*
568 		 * Remove the bad job so it cannot be freed by concurrent
569 		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
570 		 * is parked at which point it's safe.
571 		 */
572 		list_del_init(&job->list);
573 		spin_unlock(&sched->job_list_lock);
574 
575 		status = job->sched->ops->timedout_job(job);
576 
577 		/*
578 		 * Guilty job did complete and hence needs to be manually removed
579 		 * See drm_sched_stop doc.
580 		 */
581 		if (sched->free_guilty) {
582 			job->sched->ops->free_job(job);
583 			sched->free_guilty = false;
584 		}
585 	} else {
586 		spin_unlock(&sched->job_list_lock);
587 	}
588 
589 	if (status != DRM_GPU_SCHED_STAT_ENODEV)
590 		drm_sched_start_timeout_unlocked(sched);
591 }
592 
593 /**
594  * drm_sched_stop - stop the scheduler
595  *
596  * @sched: scheduler instance
597  * @bad: job which caused the time out
598  *
599  * Stop the scheduler and also removes and frees all completed jobs.
600  * Note: bad job will not be freed as it might be used later and so it's
601  * callers responsibility to release it manually if it's not part of the
602  * pending list any more.
603  *
604  */
drm_sched_stop(struct drm_gpu_scheduler * sched,struct drm_sched_job * bad)605 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
606 {
607 	struct drm_sched_job *s_job, *tmp;
608 
609 	drm_sched_wqueue_stop(sched);
610 
611 	/*
612 	 * Reinsert back the bad job here - now it's safe as
613 	 * drm_sched_get_finished_job cannot race against us and release the
614 	 * bad job at this point - we parked (waited for) any in progress
615 	 * (earlier) cleanups and drm_sched_get_finished_job will not be called
616 	 * now until the scheduler thread is unparked.
617 	 */
618 	if (bad && bad->sched == sched)
619 		/*
620 		 * Add at the head of the queue to reflect it was the earliest
621 		 * job extracted.
622 		 */
623 		list_add(&bad->list, &sched->pending_list);
624 
625 	/*
626 	 * Iterate the job list from later to  earlier one and either deactive
627 	 * their HW callbacks or remove them from pending list if they already
628 	 * signaled.
629 	 * This iteration is thread safe as sched thread is stopped.
630 	 */
631 	list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
632 					 list) {
633 		if (s_job->s_fence->parent &&
634 		    dma_fence_remove_callback(s_job->s_fence->parent,
635 					      &s_job->cb)) {
636 			dma_fence_put(s_job->s_fence->parent);
637 			s_job->s_fence->parent = NULL;
638 			atomic_sub(s_job->credits, &sched->credit_count);
639 		} else {
640 			/*
641 			 * remove job from pending_list.
642 			 * Locking here is for concurrent resume timeout
643 			 */
644 			spin_lock(&sched->job_list_lock);
645 			list_del_init(&s_job->list);
646 			spin_unlock(&sched->job_list_lock);
647 
648 			/*
649 			 * Wait for job's HW fence callback to finish using s_job
650 			 * before releasing it.
651 			 *
652 			 * Job is still alive so fence refcount at least 1
653 			 */
654 			dma_fence_wait(&s_job->s_fence->finished, false);
655 
656 			/*
657 			 * We must keep bad job alive for later use during
658 			 * recovery by some of the drivers but leave a hint
659 			 * that the guilty job must be released.
660 			 */
661 			if (bad != s_job)
662 				sched->ops->free_job(s_job);
663 			else
664 				sched->free_guilty = true;
665 		}
666 	}
667 
668 	/*
669 	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
670 	 * avoids the pending timeout work in progress to fire right away after
671 	 * this TDR finished and before the newly restarted jobs had a
672 	 * chance to complete.
673 	 */
674 	cancel_delayed_work(&sched->work_tdr);
675 }
676 
677 EXPORT_SYMBOL(drm_sched_stop);
678 
679 /**
680  * drm_sched_start - recover jobs after a reset
681  *
682  * @sched: scheduler instance
683  *
684  */
drm_sched_start(struct drm_gpu_scheduler * sched)685 void drm_sched_start(struct drm_gpu_scheduler *sched)
686 {
687 	struct drm_sched_job *s_job, *tmp;
688 
689 	/*
690 	 * Locking the list is not required here as the sched thread is parked
691 	 * so no new jobs are being inserted or removed. Also concurrent
692 	 * GPU recovers can't run in parallel.
693 	 */
694 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
695 		struct dma_fence *fence = s_job->s_fence->parent;
696 
697 		atomic_add(s_job->credits, &sched->credit_count);
698 
699 		if (!fence) {
700 			drm_sched_job_done(s_job, -ECANCELED);
701 			continue;
702 		}
703 
704 		if (dma_fence_add_callback(fence, &s_job->cb,
705 					   drm_sched_job_done_cb))
706 			drm_sched_job_done(s_job, fence->error);
707 	}
708 
709 	drm_sched_start_timeout_unlocked(sched);
710 	drm_sched_wqueue_start(sched);
711 }
712 EXPORT_SYMBOL(drm_sched_start);
713 
714 /**
715  * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
716  *
717  * @sched: scheduler instance
718  *
719  * Re-submitting jobs was a concept AMD came up as cheap way to implement
720  * recovery after a job timeout.
721  *
722  * This turned out to be not working very well. First of all there are many
723  * problem with the dma_fence implementation and requirements. Either the
724  * implementation is risking deadlocks with core memory management or violating
725  * documented implementation details of the dma_fence object.
726  *
727  * Drivers can still save and restore their state for recovery operations, but
728  * we shouldn't make this a general scheduler feature around the dma_fence
729  * interface.
730  */
drm_sched_resubmit_jobs(struct drm_gpu_scheduler * sched)731 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
732 {
733 	struct drm_sched_job *s_job, *tmp;
734 	uint64_t guilty_context;
735 	bool found_guilty = false;
736 	struct dma_fence *fence;
737 
738 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
739 		struct drm_sched_fence *s_fence = s_job->s_fence;
740 
741 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
742 			found_guilty = true;
743 			guilty_context = s_job->s_fence->scheduled.context;
744 		}
745 
746 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
747 			dma_fence_set_error(&s_fence->finished, -ECANCELED);
748 
749 		fence = sched->ops->run_job(s_job);
750 
751 		if (IS_ERR_OR_NULL(fence)) {
752 			if (IS_ERR(fence))
753 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
754 
755 			s_job->s_fence->parent = NULL;
756 		} else {
757 
758 			s_job->s_fence->parent = dma_fence_get(fence);
759 
760 			/* Drop for orignal kref_init */
761 			dma_fence_put(fence);
762 		}
763 	}
764 }
765 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
766 
767 /**
768  * drm_sched_job_init - init a scheduler job
769  * @job: scheduler job to init
770  * @entity: scheduler entity to use
771  * @credits: the number of credits this job contributes to the schedulers
772  * credit limit
773  * @owner: job owner for debugging
774  *
775  * Refer to drm_sched_entity_push_job() documentation
776  * for locking considerations.
777  *
778  * Drivers must make sure drm_sched_job_cleanup() if this function returns
779  * successfully, even when @job is aborted before drm_sched_job_arm() is called.
780  *
781  * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
782  * has died, which can mean that there's no valid runqueue for a @entity.
783  * This function returns -ENOENT in this case (which probably should be -EIO as
784  * a more meanigful return value).
785  *
786  * Returns 0 for success, negative error code otherwise.
787  */
drm_sched_job_init(struct drm_sched_job * job,struct drm_sched_entity * entity,u32 credits,void * owner)788 int drm_sched_job_init(struct drm_sched_job *job,
789 		       struct drm_sched_entity *entity,
790 		       u32 credits, void *owner)
791 {
792 	if (!entity->rq) {
793 		/* This will most likely be followed by missing frames
794 		 * or worse--a blank screen--leave a trail in the
795 		 * logs, so this can be debugged easier.
796 		 */
797 		drm_err(job->sched, "%s: entity has no rq!\n", __func__);
798 		return -ENOENT;
799 	}
800 
801 	if (unlikely(!credits)) {
802 		pr_err("*ERROR* %s: credits cannot be 0!\n", __func__);
803 		return -EINVAL;
804 	}
805 
806 	job->entity = entity;
807 	job->credits = credits;
808 	job->s_fence = drm_sched_fence_alloc(entity, owner);
809 	if (!job->s_fence)
810 		return -ENOMEM;
811 
812 	INIT_LIST_HEAD(&job->list);
813 
814 	xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
815 
816 	return 0;
817 }
818 EXPORT_SYMBOL(drm_sched_job_init);
819 
820 /**
821  * drm_sched_job_arm - arm a scheduler job for execution
822  * @job: scheduler job to arm
823  *
824  * This arms a scheduler job for execution. Specifically it initializes the
825  * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
826  * or other places that need to track the completion of this job.
827  *
828  * Refer to drm_sched_entity_push_job() documentation for locking
829  * considerations.
830  *
831  * This can only be called if drm_sched_job_init() succeeded.
832  */
drm_sched_job_arm(struct drm_sched_job * job)833 void drm_sched_job_arm(struct drm_sched_job *job)
834 {
835 	struct drm_gpu_scheduler *sched;
836 	struct drm_sched_entity *entity = job->entity;
837 
838 	BUG_ON(!entity);
839 	drm_sched_entity_select_rq(entity);
840 	sched = entity->rq->sched;
841 
842 	job->sched = sched;
843 	job->s_priority = entity->priority;
844 	job->id = atomic64_inc_return(&sched->job_id_count);
845 
846 	drm_sched_fence_init(job->s_fence, job->entity);
847 }
848 EXPORT_SYMBOL(drm_sched_job_arm);
849 
850 /**
851  * drm_sched_job_add_dependency - adds the fence as a job dependency
852  * @job: scheduler job to add the dependencies to
853  * @fence: the dma_fence to add to the list of dependencies.
854  *
855  * Note that @fence is consumed in both the success and error cases.
856  *
857  * Returns:
858  * 0 on success, or an error on failing to expand the array.
859  */
drm_sched_job_add_dependency(struct drm_sched_job * job,struct dma_fence * fence)860 int drm_sched_job_add_dependency(struct drm_sched_job *job,
861 				 struct dma_fence *fence)
862 {
863 	struct dma_fence *entry;
864 	unsigned long index;
865 	u32 id = 0;
866 	int ret;
867 
868 	if (!fence)
869 		return 0;
870 
871 	/* Deduplicate if we already depend on a fence from the same context.
872 	 * This lets the size of the array of deps scale with the number of
873 	 * engines involved, rather than the number of BOs.
874 	 */
875 	xa_for_each(&job->dependencies, index, entry) {
876 		if (entry->context != fence->context)
877 			continue;
878 
879 		if (dma_fence_is_later(fence, entry)) {
880 			dma_fence_put(entry);
881 			xa_store(&job->dependencies, index, fence, GFP_KERNEL);
882 		} else {
883 			dma_fence_put(fence);
884 		}
885 		return 0;
886 	}
887 
888 	ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
889 	if (ret != 0)
890 		dma_fence_put(fence);
891 
892 	return ret;
893 }
894 EXPORT_SYMBOL(drm_sched_job_add_dependency);
895 
896 /**
897  * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
898  * @job: scheduler job to add the dependencies to
899  * @file: drm file private pointer
900  * @handle: syncobj handle to lookup
901  * @point: timeline point
902  *
903  * This adds the fence matching the given syncobj to @job.
904  *
905  * Returns:
906  * 0 on success, or an error on failing to expand the array.
907  */
drm_sched_job_add_syncobj_dependency(struct drm_sched_job * job,struct drm_file * file,u32 handle,u32 point)908 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
909 					 struct drm_file *file,
910 					 u32 handle,
911 					 u32 point)
912 {
913 	struct dma_fence *fence;
914 	int ret;
915 
916 	ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
917 	if (ret)
918 		return ret;
919 
920 	return drm_sched_job_add_dependency(job, fence);
921 }
922 EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
923 
924 /**
925  * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
926  * @job: scheduler job to add the dependencies to
927  * @resv: the dma_resv object to get the fences from
928  * @usage: the dma_resv_usage to use to filter the fences
929  *
930  * This adds all fences matching the given usage from @resv to @job.
931  * Must be called with the @resv lock held.
932  *
933  * Returns:
934  * 0 on success, or an error on failing to expand the array.
935  */
drm_sched_job_add_resv_dependencies(struct drm_sched_job * job,struct dma_resv * resv,enum dma_resv_usage usage)936 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
937 					struct dma_resv *resv,
938 					enum dma_resv_usage usage)
939 {
940 	struct dma_resv_iter cursor;
941 	struct dma_fence *fence;
942 	int ret;
943 
944 	dma_resv_assert_held(resv);
945 
946 	dma_resv_for_each_fence(&cursor, resv, usage, fence) {
947 		/* Make sure to grab an additional ref on the added fence */
948 		dma_fence_get(fence);
949 		ret = drm_sched_job_add_dependency(job, fence);
950 		if (ret) {
951 			dma_fence_put(fence);
952 			return ret;
953 		}
954 	}
955 	return 0;
956 }
957 EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
958 
959 /**
960  * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
961  *   dependencies
962  * @job: scheduler job to add the dependencies to
963  * @obj: the gem object to add new dependencies from.
964  * @write: whether the job might write the object (so we need to depend on
965  * shared fences in the reservation object).
966  *
967  * This should be called after drm_gem_lock_reservations() on your array of
968  * GEM objects used in the job but before updating the reservations with your
969  * own fences.
970  *
971  * Returns:
972  * 0 on success, or an error on failing to expand the array.
973  */
drm_sched_job_add_implicit_dependencies(struct drm_sched_job * job,struct drm_gem_object * obj,bool write)974 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
975 					    struct drm_gem_object *obj,
976 					    bool write)
977 {
978 	return drm_sched_job_add_resv_dependencies(job, obj->resv,
979 						   dma_resv_usage_rw(write));
980 }
981 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
982 
983 /**
984  * drm_sched_job_cleanup - clean up scheduler job resources
985  * @job: scheduler job to clean up
986  *
987  * Cleans up the resources allocated with drm_sched_job_init().
988  *
989  * Drivers should call this from their error unwind code if @job is aborted
990  * before drm_sched_job_arm() is called.
991  *
992  * After that point of no return @job is committed to be executed by the
993  * scheduler, and this function should be called from the
994  * &drm_sched_backend_ops.free_job callback.
995  */
drm_sched_job_cleanup(struct drm_sched_job * job)996 void drm_sched_job_cleanup(struct drm_sched_job *job)
997 {
998 	struct dma_fence *fence;
999 	unsigned long index;
1000 
1001 	if (kref_read(&job->s_fence->finished.refcount)) {
1002 		/* drm_sched_job_arm() has been called */
1003 		dma_fence_put(&job->s_fence->finished);
1004 	} else {
1005 		/* aborted job before committing to run it */
1006 		drm_sched_fence_free(job->s_fence);
1007 	}
1008 
1009 	job->s_fence = NULL;
1010 
1011 	xa_for_each(&job->dependencies, index, fence) {
1012 		dma_fence_put(fence);
1013 	}
1014 	xa_destroy(&job->dependencies);
1015 
1016 }
1017 EXPORT_SYMBOL(drm_sched_job_cleanup);
1018 
1019 /**
1020  * drm_sched_wakeup - Wake up the scheduler if it is ready to queue
1021  * @sched: scheduler instance
1022  *
1023  * Wake up the scheduler if we can queue jobs.
1024  */
drm_sched_wakeup(struct drm_gpu_scheduler * sched)1025 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
1026 {
1027 	drm_sched_run_job_queue(sched);
1028 }
1029 
1030 /**
1031  * drm_sched_select_entity - Select next entity to process
1032  *
1033  * @sched: scheduler instance
1034  *
1035  * Return an entity to process or NULL if none are found.
1036  *
1037  * Note, that we break out of the for-loop when "entity" is non-null, which can
1038  * also be an error-pointer--this assures we don't process lower priority
1039  * run-queues. See comments in the respectively called functions.
1040  */
1041 static struct drm_sched_entity *
drm_sched_select_entity(struct drm_gpu_scheduler * sched)1042 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
1043 {
1044 	struct drm_sched_entity *entity;
1045 	int i;
1046 
1047 	/* Start with the highest priority.
1048 	 */
1049 	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1050 		entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
1051 			drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
1052 			drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
1053 		if (entity)
1054 			break;
1055 	}
1056 
1057 	return IS_ERR(entity) ? NULL : entity;
1058 }
1059 
1060 /**
1061  * drm_sched_get_finished_job - fetch the next finished job to be destroyed
1062  *
1063  * @sched: scheduler instance
1064  *
1065  * Returns the next finished job from the pending list (if there is one)
1066  * ready for it to be destroyed.
1067  */
1068 static struct drm_sched_job *
drm_sched_get_finished_job(struct drm_gpu_scheduler * sched)1069 drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
1070 {
1071 	struct drm_sched_job *job, *next;
1072 
1073 	spin_lock(&sched->job_list_lock);
1074 
1075 	job = list_first_entry_or_null(&sched->pending_list,
1076 				       struct drm_sched_job, list);
1077 
1078 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
1079 		/* remove job from pending_list */
1080 		list_del_init(&job->list);
1081 
1082 		/* cancel this job's TO timer */
1083 		cancel_delayed_work(&sched->work_tdr);
1084 		/* make the scheduled timestamp more accurate */
1085 		next = list_first_entry_or_null(&sched->pending_list,
1086 						typeof(*next), list);
1087 
1088 		if (next) {
1089 			if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
1090 				     &next->s_fence->scheduled.flags))
1091 				next->s_fence->scheduled.timestamp =
1092 					dma_fence_timestamp(&job->s_fence->finished);
1093 			/* start TO timer for next job */
1094 			drm_sched_start_timeout(sched);
1095 		}
1096 	} else {
1097 		job = NULL;
1098 	}
1099 
1100 	spin_unlock(&sched->job_list_lock);
1101 
1102 	return job;
1103 }
1104 
1105 /**
1106  * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
1107  * @sched_list: list of drm_gpu_schedulers
1108  * @num_sched_list: number of drm_gpu_schedulers in the sched_list
1109  *
1110  * Returns pointer of the sched with the least load or NULL if none of the
1111  * drm_gpu_schedulers are ready
1112  */
1113 struct drm_gpu_scheduler *
drm_sched_pick_best(struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list)1114 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
1115 		     unsigned int num_sched_list)
1116 {
1117 	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
1118 	int i;
1119 	unsigned int min_score = UINT_MAX, num_score;
1120 
1121 	for (i = 0; i < num_sched_list; ++i) {
1122 		sched = sched_list[i];
1123 
1124 		if (!sched->ready) {
1125 			DRM_WARN("scheduler %s is not ready, skipping",
1126 				 sched->name);
1127 			continue;
1128 		}
1129 
1130 		num_score = atomic_read(sched->score);
1131 		if (num_score < min_score) {
1132 			min_score = num_score;
1133 			picked_sched = sched;
1134 		}
1135 	}
1136 
1137 	return picked_sched;
1138 }
1139 EXPORT_SYMBOL(drm_sched_pick_best);
1140 
1141 /**
1142  * drm_sched_free_job_work - worker to call free_job
1143  *
1144  * @w: free job work
1145  */
drm_sched_free_job_work(struct work_struct * w)1146 static void drm_sched_free_job_work(struct work_struct *w)
1147 {
1148 	struct drm_gpu_scheduler *sched =
1149 		container_of(w, struct drm_gpu_scheduler, work_free_job);
1150 	struct drm_sched_job *job;
1151 
1152 	if (READ_ONCE(sched->pause_submit))
1153 		return;
1154 
1155 	job = drm_sched_get_finished_job(sched);
1156 	if (job)
1157 		sched->ops->free_job(job);
1158 
1159 	drm_sched_run_free_queue(sched);
1160 	drm_sched_run_job_queue(sched);
1161 }
1162 
1163 /**
1164  * drm_sched_run_job_work - worker to call run_job
1165  *
1166  * @w: run job work
1167  */
drm_sched_run_job_work(struct work_struct * w)1168 static void drm_sched_run_job_work(struct work_struct *w)
1169 {
1170 	struct drm_gpu_scheduler *sched =
1171 		container_of(w, struct drm_gpu_scheduler, work_run_job);
1172 	struct drm_sched_entity *entity;
1173 	struct dma_fence *fence;
1174 	struct drm_sched_fence *s_fence;
1175 	struct drm_sched_job *sched_job;
1176 	int r;
1177 
1178 	if (READ_ONCE(sched->pause_submit))
1179 		return;
1180 
1181 	/* Find entity with a ready job */
1182 	entity = drm_sched_select_entity(sched);
1183 	if (!entity)
1184 		return;	/* No more work */
1185 
1186 	sched_job = drm_sched_entity_pop_job(entity);
1187 	if (!sched_job) {
1188 		complete_all(&entity->entity_idle);
1189 		drm_sched_run_job_queue(sched);
1190 		return;
1191 	}
1192 
1193 	s_fence = sched_job->s_fence;
1194 
1195 	atomic_add(sched_job->credits, &sched->credit_count);
1196 	drm_sched_job_begin(sched_job);
1197 
1198 	trace_drm_run_job(sched_job, entity);
1199 	fence = sched->ops->run_job(sched_job);
1200 	complete_all(&entity->entity_idle);
1201 	drm_sched_fence_scheduled(s_fence, fence);
1202 
1203 	if (!IS_ERR_OR_NULL(fence)) {
1204 		/* Drop for original kref_init of the fence */
1205 		dma_fence_put(fence);
1206 
1207 		r = dma_fence_add_callback(fence, &sched_job->cb,
1208 					   drm_sched_job_done_cb);
1209 		if (r == -ENOENT)
1210 			drm_sched_job_done(sched_job, fence->error);
1211 		else if (r)
1212 			DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
1213 	} else {
1214 		drm_sched_job_done(sched_job, IS_ERR(fence) ?
1215 				   PTR_ERR(fence) : 0);
1216 	}
1217 
1218 	wake_up(&sched->job_scheduled);
1219 	drm_sched_run_job_queue(sched);
1220 }
1221 
1222 /**
1223  * drm_sched_init - Init a gpu scheduler instance
1224  *
1225  * @sched: scheduler instance
1226  * @ops: backend operations for this scheduler
1227  * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
1228  *	       allocated and used
1229  * @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT
1230  * @credit_limit: the number of credits this scheduler can hold from all jobs
1231  * @hang_limit: number of times to allow a job to hang before dropping it
1232  * @timeout: timeout value in jiffies for the scheduler
1233  * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
1234  *		used
1235  * @score: optional score atomic shared with other schedulers
1236  * @name: name used for debugging
1237  * @dev: target &struct device
1238  *
1239  * Return 0 on success, otherwise error code.
1240  */
drm_sched_init(struct drm_gpu_scheduler * sched,const struct drm_sched_backend_ops * ops,struct workqueue_struct * submit_wq,u32 num_rqs,u32 credit_limit,unsigned int hang_limit,long timeout,struct workqueue_struct * timeout_wq,atomic_t * score,const char * name,struct device * dev)1241 int drm_sched_init(struct drm_gpu_scheduler *sched,
1242 		   const struct drm_sched_backend_ops *ops,
1243 		   struct workqueue_struct *submit_wq,
1244 		   u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
1245 		   long timeout, struct workqueue_struct *timeout_wq,
1246 		   atomic_t *score, const char *name, struct device *dev)
1247 {
1248 	int i;
1249 
1250 	sched->ops = ops;
1251 	sched->credit_limit = credit_limit;
1252 	sched->name = name;
1253 	sched->timeout = timeout;
1254 	sched->timeout_wq = timeout_wq ? : system_wq;
1255 	sched->hang_limit = hang_limit;
1256 	sched->score = score ? score : &sched->_score;
1257 	sched->dev = dev;
1258 
1259 	if (num_rqs > DRM_SCHED_PRIORITY_COUNT) {
1260 		/* This is a gross violation--tell drivers what the  problem is.
1261 		 */
1262 		drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
1263 			__func__);
1264 		return -EINVAL;
1265 	} else if (sched->sched_rq) {
1266 		/* Not an error, but warn anyway so drivers can
1267 		 * fine-tune their DRM calling order, and return all
1268 		 * is good.
1269 		 */
1270 		drm_warn(sched, "%s: scheduler already initialized!\n", __func__);
1271 		return 0;
1272 	}
1273 
1274 	if (submit_wq) {
1275 		sched->submit_wq = submit_wq;
1276 		sched->own_submit_wq = false;
1277 	} else {
1278 #ifdef CONFIG_LOCKDEP
1279 		sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name,
1280 								       WQ_MEM_RECLAIM,
1281 								       &drm_sched_lockdep_map);
1282 #else
1283 		sched->submit_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
1284 #endif
1285 		if (!sched->submit_wq)
1286 			return -ENOMEM;
1287 
1288 		sched->own_submit_wq = true;
1289 	}
1290 
1291 	sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq),
1292 					GFP_KERNEL | __GFP_ZERO);
1293 	if (!sched->sched_rq)
1294 		goto Out_check_own;
1295 	sched->num_rqs = num_rqs;
1296 	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1297 		sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
1298 		if (!sched->sched_rq[i])
1299 			goto Out_unroll;
1300 		drm_sched_rq_init(sched, sched->sched_rq[i]);
1301 	}
1302 
1303 	init_waitqueue_head(&sched->job_scheduled);
1304 	INIT_LIST_HEAD(&sched->pending_list);
1305 	spin_lock_init(&sched->job_list_lock);
1306 	atomic_set(&sched->credit_count, 0);
1307 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1308 	INIT_WORK(&sched->work_run_job, drm_sched_run_job_work);
1309 	INIT_WORK(&sched->work_free_job, drm_sched_free_job_work);
1310 	atomic_set(&sched->_score, 0);
1311 	atomic64_set(&sched->job_id_count, 0);
1312 	sched->pause_submit = false;
1313 
1314 	sched->ready = true;
1315 	return 0;
1316 Out_unroll:
1317 	for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
1318 		kfree(sched->sched_rq[i]);
1319 
1320 	kfree(sched->sched_rq);
1321 	sched->sched_rq = NULL;
1322 Out_check_own:
1323 	if (sched->own_submit_wq)
1324 		destroy_workqueue(sched->submit_wq);
1325 	drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
1326 	return -ENOMEM;
1327 }
1328 EXPORT_SYMBOL(drm_sched_init);
1329 
1330 /**
1331  * drm_sched_fini - Destroy a gpu scheduler
1332  *
1333  * @sched: scheduler instance
1334  *
1335  * Tears down and cleans up the scheduler.
1336  */
drm_sched_fini(struct drm_gpu_scheduler * sched)1337 void drm_sched_fini(struct drm_gpu_scheduler *sched)
1338 {
1339 	struct drm_sched_entity *s_entity;
1340 	int i;
1341 
1342 	drm_sched_wqueue_stop(sched);
1343 
1344 	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1345 		struct drm_sched_rq *rq = sched->sched_rq[i];
1346 
1347 		spin_lock(&rq->lock);
1348 		list_for_each_entry(s_entity, &rq->entities, list)
1349 			/*
1350 			 * Prevents reinsertion and marks job_queue as idle,
1351 			 * it will removed from rq in drm_sched_entity_fini
1352 			 * eventually
1353 			 */
1354 			s_entity->stopped = true;
1355 		spin_unlock(&rq->lock);
1356 		kfree(sched->sched_rq[i]);
1357 	}
1358 
1359 	/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1360 	wake_up_all(&sched->job_scheduled);
1361 
1362 	/* Confirm no work left behind accessing device structures */
1363 	cancel_delayed_work_sync(&sched->work_tdr);
1364 
1365 	if (sched->own_submit_wq)
1366 		destroy_workqueue(sched->submit_wq);
1367 	sched->ready = false;
1368 	kfree(sched->sched_rq);
1369 	sched->sched_rq = NULL;
1370 }
1371 EXPORT_SYMBOL(drm_sched_fini);
1372 
1373 /**
1374  * drm_sched_increase_karma - Update sched_entity guilty flag
1375  *
1376  * @bad: The job guilty of time out
1377  *
1378  * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1379  * limit of the scheduler then the respective sched entity is marked guilty and
1380  * jobs from it will not be scheduled further
1381  */
drm_sched_increase_karma(struct drm_sched_job * bad)1382 void drm_sched_increase_karma(struct drm_sched_job *bad)
1383 {
1384 	int i;
1385 	struct drm_sched_entity *tmp;
1386 	struct drm_sched_entity *entity;
1387 	struct drm_gpu_scheduler *sched = bad->sched;
1388 
1389 	/* don't change @bad's karma if it's from KERNEL RQ,
1390 	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1391 	 * corrupt but keep in mind that kernel jobs always considered good.
1392 	 */
1393 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1394 		atomic_inc(&bad->karma);
1395 
1396 		for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
1397 			struct drm_sched_rq *rq = sched->sched_rq[i];
1398 
1399 			spin_lock(&rq->lock);
1400 			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1401 				if (bad->s_fence->scheduled.context ==
1402 				    entity->fence_context) {
1403 					if (entity->guilty)
1404 						atomic_set(entity->guilty, 1);
1405 					break;
1406 				}
1407 			}
1408 			spin_unlock(&rq->lock);
1409 			if (&entity->list != &rq->entities)
1410 				break;
1411 		}
1412 	}
1413 }
1414 EXPORT_SYMBOL(drm_sched_increase_karma);
1415 
1416 /**
1417  * drm_sched_wqueue_ready - Is the scheduler ready for submission
1418  *
1419  * @sched: scheduler instance
1420  *
1421  * Returns true if submission is ready
1422  */
drm_sched_wqueue_ready(struct drm_gpu_scheduler * sched)1423 bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
1424 {
1425 	return sched->ready;
1426 }
1427 EXPORT_SYMBOL(drm_sched_wqueue_ready);
1428 
1429 /**
1430  * drm_sched_wqueue_stop - stop scheduler submission
1431  *
1432  * @sched: scheduler instance
1433  */
drm_sched_wqueue_stop(struct drm_gpu_scheduler * sched)1434 void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
1435 {
1436 	WRITE_ONCE(sched->pause_submit, true);
1437 	cancel_work_sync(&sched->work_run_job);
1438 	cancel_work_sync(&sched->work_free_job);
1439 }
1440 EXPORT_SYMBOL(drm_sched_wqueue_stop);
1441 
1442 /**
1443  * drm_sched_wqueue_start - start scheduler submission
1444  *
1445  * @sched: scheduler instance
1446  */
drm_sched_wqueue_start(struct drm_gpu_scheduler * sched)1447 void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
1448 {
1449 	WRITE_ONCE(sched->pause_submit, false);
1450 	queue_work(sched->submit_wq, &sched->work_run_job);
1451 	queue_work(sched->submit_wq, &sched->work_free_job);
1452 }
1453 EXPORT_SYMBOL(drm_sched_wqueue_start);
1454