1  /*
2   * Copyright 2015 Advanced Micro Devices, Inc.
3   *
4   * Permission is hereby granted, free of charge, to any person obtaining a
5   * copy of this software and associated documentation files (the "Software"),
6   * to deal in the Software without restriction, including without limitation
7   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8   * and/or sell copies of the Software, and to permit persons to whom the
9   * Software is furnished to do so, subject to the following conditions:
10   *
11   * The above copyright notice and this permission notice shall be included in
12   * all copies or substantial portions of the Software.
13   *
14   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20   * OTHER DEALINGS IN THE SOFTWARE.
21   *
22   */
23  
24  #include <linux/kthread.h>
25  #include <linux/slab.h>
26  #include <linux/completion.h>
27  
28  #include <drm/drm_print.h>
29  #include <drm/gpu_scheduler.h>
30  
31  #include "gpu_scheduler_trace.h"
32  
33  #define to_drm_sched_job(sched_job)		\
34  		container_of((sched_job), struct drm_sched_job, queue_node)
35  
36  /**
37   * drm_sched_entity_init - Init a context entity used by scheduler when
38   * submit to HW ring.
39   *
40   * @entity: scheduler entity to init
41   * @priority: priority of the entity
42   * @sched_list: the list of drm scheds on which jobs from this
43   *           entity can be submitted
44   * @num_sched_list: number of drm sched in sched_list
45   * @guilty: atomic_t set to 1 when a job on this queue
46   *          is found to be guilty causing a timeout
47   *
48   * Note that the &sched_list must have at least one element to schedule the entity.
49   *
50   * For changing @priority later on at runtime see
51   * drm_sched_entity_set_priority(). For changing the set of schedulers
52   * @sched_list at runtime see drm_sched_entity_modify_sched().
53   *
54   * An entity is cleaned up by callind drm_sched_entity_fini(). See also
55   * drm_sched_entity_destroy().
56   *
57   * Returns 0 on success or a negative error code on failure.
58   */
drm_sched_entity_init(struct drm_sched_entity * entity,enum drm_sched_priority priority,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list,atomic_t * guilty)59  int drm_sched_entity_init(struct drm_sched_entity *entity,
60  			  enum drm_sched_priority priority,
61  			  struct drm_gpu_scheduler **sched_list,
62  			  unsigned int num_sched_list,
63  			  atomic_t *guilty)
64  {
65  	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
66  		return -EINVAL;
67  
68  	memset(entity, 0, sizeof(struct drm_sched_entity));
69  	INIT_LIST_HEAD(&entity->list);
70  	entity->rq = NULL;
71  	entity->guilty = guilty;
72  	entity->num_sched_list = num_sched_list;
73  	entity->priority = priority;
74  	/*
75  	 * It's perfectly valid to initialize an entity without having a valid
76  	 * scheduler attached. It's just not valid to use the scheduler before it
77  	 * is initialized itself.
78  	 */
79  	entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
80  	RCU_INIT_POINTER(entity->last_scheduled, NULL);
81  	RB_CLEAR_NODE(&entity->rb_tree_node);
82  
83  	if (num_sched_list && !sched_list[0]->sched_rq) {
84  		/* Since every entry covered by num_sched_list
85  		 * should be non-NULL and therefore we warn drivers
86  		 * not to do this and to fix their DRM calling order.
87  		 */
88  		pr_warn("%s: called with uninitialized scheduler\n", __func__);
89  	} else if (num_sched_list) {
90  		/* The "priority" of an entity cannot exceed the number of run-queues of a
91  		 * scheduler. Protect against num_rqs being 0, by converting to signed. Choose
92  		 * the lowest priority available.
93  		 */
94  		if (entity->priority >= sched_list[0]->num_rqs) {
95  			drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
96  				entity->priority, sched_list[0]->num_rqs);
97  			entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
98  						 (s32) DRM_SCHED_PRIORITY_KERNEL);
99  		}
100  		entity->rq = sched_list[0]->sched_rq[entity->priority];
101  	}
102  
103  	init_completion(&entity->entity_idle);
104  
105  	/* We start in an idle state. */
106  	complete_all(&entity->entity_idle);
107  
108  	spin_lock_init(&entity->rq_lock);
109  	spsc_queue_init(&entity->job_queue);
110  
111  	atomic_set(&entity->fence_seq, 0);
112  	entity->fence_context = dma_fence_context_alloc(2);
113  
114  	return 0;
115  }
116  EXPORT_SYMBOL(drm_sched_entity_init);
117  
118  /**
119   * drm_sched_entity_modify_sched - Modify sched of an entity
120   * @entity: scheduler entity to init
121   * @sched_list: the list of new drm scheds which will replace
122   *		 existing entity->sched_list
123   * @num_sched_list: number of drm sched in sched_list
124   *
125   * Note that this must be called under the same common lock for @entity as
126   * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
127   * guarantee through some other means that this is never called while new jobs
128   * can be pushed to @entity.
129   */
drm_sched_entity_modify_sched(struct drm_sched_entity * entity,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list)130  void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
131  				    struct drm_gpu_scheduler **sched_list,
132  				    unsigned int num_sched_list)
133  {
134  	WARN_ON(!num_sched_list || !sched_list);
135  
136  	spin_lock(&entity->rq_lock);
137  	entity->sched_list = sched_list;
138  	entity->num_sched_list = num_sched_list;
139  	spin_unlock(&entity->rq_lock);
140  }
141  EXPORT_SYMBOL(drm_sched_entity_modify_sched);
142  
drm_sched_entity_is_idle(struct drm_sched_entity * entity)143  static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
144  {
145  	rmb(); /* for list_empty to work without lock */
146  
147  	if (list_empty(&entity->list) ||
148  	    spsc_queue_count(&entity->job_queue) == 0 ||
149  	    entity->stopped)
150  		return true;
151  
152  	return false;
153  }
154  
155  /* Return true if entity could provide a job. */
drm_sched_entity_is_ready(struct drm_sched_entity * entity)156  bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
157  {
158  	if (spsc_queue_peek(&entity->job_queue) == NULL)
159  		return false;
160  
161  	if (READ_ONCE(entity->dependency))
162  		return false;
163  
164  	return true;
165  }
166  
167  /**
168   * drm_sched_entity_error - return error of last scheduled job
169   * @entity: scheduler entity to check
170   *
171   * Opportunistically return the error of the last scheduled job. Result can
172   * change any time when new jobs are pushed to the hw.
173   */
drm_sched_entity_error(struct drm_sched_entity * entity)174  int drm_sched_entity_error(struct drm_sched_entity *entity)
175  {
176  	struct dma_fence *fence;
177  	int r;
178  
179  	rcu_read_lock();
180  	fence = rcu_dereference(entity->last_scheduled);
181  	r = fence ? fence->error : 0;
182  	rcu_read_unlock();
183  
184  	return r;
185  }
186  EXPORT_SYMBOL(drm_sched_entity_error);
187  
drm_sched_entity_kill_jobs_work(struct work_struct * wrk)188  static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
189  {
190  	struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
191  
192  	drm_sched_fence_finished(job->s_fence, -ESRCH);
193  	WARN_ON(job->s_fence->parent);
194  	job->sched->ops->free_job(job);
195  }
196  
197  /* Signal the scheduler finished fence when the entity in question is killed. */
drm_sched_entity_kill_jobs_cb(struct dma_fence * f,struct dma_fence_cb * cb)198  static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
199  					  struct dma_fence_cb *cb)
200  {
201  	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
202  						 finish_cb);
203  	unsigned long index;
204  
205  	dma_fence_put(f);
206  
207  	/* Wait for all dependencies to avoid data corruptions */
208  	xa_for_each(&job->dependencies, index, f) {
209  		struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
210  
211  		if (s_fence && f == &s_fence->scheduled) {
212  			/* The dependencies array had a reference on the scheduled
213  			 * fence, and the finished fence refcount might have
214  			 * dropped to zero. Use dma_fence_get_rcu() so we get
215  			 * a NULL fence in that case.
216  			 */
217  			f = dma_fence_get_rcu(&s_fence->finished);
218  
219  			/* Now that we have a reference on the finished fence,
220  			 * we can release the reference the dependencies array
221  			 * had on the scheduled fence.
222  			 */
223  			dma_fence_put(&s_fence->scheduled);
224  		}
225  
226  		xa_erase(&job->dependencies, index);
227  		if (f && !dma_fence_add_callback(f, &job->finish_cb,
228  						 drm_sched_entity_kill_jobs_cb))
229  			return;
230  
231  		dma_fence_put(f);
232  	}
233  
234  	INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
235  	schedule_work(&job->work);
236  }
237  
238  /* Remove the entity from the scheduler and kill all pending jobs */
drm_sched_entity_kill(struct drm_sched_entity * entity)239  static void drm_sched_entity_kill(struct drm_sched_entity *entity)
240  {
241  	struct drm_sched_job *job;
242  	struct dma_fence *prev;
243  
244  	if (!entity->rq)
245  		return;
246  
247  	spin_lock(&entity->rq_lock);
248  	entity->stopped = true;
249  	drm_sched_rq_remove_entity(entity->rq, entity);
250  	spin_unlock(&entity->rq_lock);
251  
252  	/* Make sure this entity is not used by the scheduler at the moment */
253  	wait_for_completion(&entity->entity_idle);
254  
255  	/* The entity is guaranteed to not be used by the scheduler */
256  	prev = rcu_dereference_check(entity->last_scheduled, true);
257  	dma_fence_get(prev);
258  	while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
259  		struct drm_sched_fence *s_fence = job->s_fence;
260  
261  		dma_fence_get(&s_fence->finished);
262  		if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
263  					   drm_sched_entity_kill_jobs_cb))
264  			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
265  
266  		prev = &s_fence->finished;
267  	}
268  	dma_fence_put(prev);
269  }
270  
271  /**
272   * drm_sched_entity_flush - Flush a context entity
273   *
274   * @entity: scheduler entity
275   * @timeout: time to wait in for Q to become empty in jiffies.
276   *
277   * Splitting drm_sched_entity_fini() into two functions, The first one does the
278   * waiting, removes the entity from the runqueue and returns an error when the
279   * process was killed.
280   *
281   * Returns the remaining time in jiffies left from the input timeout
282   */
drm_sched_entity_flush(struct drm_sched_entity * entity,long timeout)283  long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
284  {
285  	struct drm_gpu_scheduler *sched;
286  	struct task_struct *last_user;
287  	long ret = timeout;
288  
289  	if (!entity->rq)
290  		return 0;
291  
292  	sched = entity->rq->sched;
293  	/**
294  	 * The client will not queue more IBs during this fini, consume existing
295  	 * queued IBs or discard them on SIGKILL
296  	 */
297  	if (current->flags & PF_EXITING) {
298  		if (timeout)
299  			ret = wait_event_timeout(
300  					sched->job_scheduled,
301  					drm_sched_entity_is_idle(entity),
302  					timeout);
303  	} else {
304  		wait_event_killable(sched->job_scheduled,
305  				    drm_sched_entity_is_idle(entity));
306  	}
307  
308  	/* For killed process disable any more IBs enqueue right now */
309  	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
310  	if ((!last_user || last_user == current->group_leader) &&
311  	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
312  		drm_sched_entity_kill(entity);
313  
314  	return ret;
315  }
316  EXPORT_SYMBOL(drm_sched_entity_flush);
317  
318  /**
319   * drm_sched_entity_fini - Destroy a context entity
320   *
321   * @entity: scheduler entity
322   *
323   * Cleanups up @entity which has been initialized by drm_sched_entity_init().
324   *
325   * If there are potentially job still in flight or getting newly queued
326   * drm_sched_entity_flush() must be called first. This function then goes over
327   * the entity and signals all jobs with an error code if the process was killed.
328   */
drm_sched_entity_fini(struct drm_sched_entity * entity)329  void drm_sched_entity_fini(struct drm_sched_entity *entity)
330  {
331  	/*
332  	 * If consumption of existing IBs wasn't completed. Forcefully remove
333  	 * them here. Also makes sure that the scheduler won't touch this entity
334  	 * any more.
335  	 */
336  	drm_sched_entity_kill(entity);
337  
338  	if (entity->dependency) {
339  		dma_fence_remove_callback(entity->dependency, &entity->cb);
340  		dma_fence_put(entity->dependency);
341  		entity->dependency = NULL;
342  	}
343  
344  	dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
345  	RCU_INIT_POINTER(entity->last_scheduled, NULL);
346  }
347  EXPORT_SYMBOL(drm_sched_entity_fini);
348  
349  /**
350   * drm_sched_entity_destroy - Destroy a context entity
351   * @entity: scheduler entity
352   *
353   * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
354   * convenience wrapper.
355   */
drm_sched_entity_destroy(struct drm_sched_entity * entity)356  void drm_sched_entity_destroy(struct drm_sched_entity *entity)
357  {
358  	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
359  	drm_sched_entity_fini(entity);
360  }
361  EXPORT_SYMBOL(drm_sched_entity_destroy);
362  
363  /* drm_sched_entity_clear_dep - callback to clear the entities dependency */
drm_sched_entity_clear_dep(struct dma_fence * f,struct dma_fence_cb * cb)364  static void drm_sched_entity_clear_dep(struct dma_fence *f,
365  				       struct dma_fence_cb *cb)
366  {
367  	struct drm_sched_entity *entity =
368  		container_of(cb, struct drm_sched_entity, cb);
369  
370  	entity->dependency = NULL;
371  	dma_fence_put(f);
372  }
373  
374  /*
375   * drm_sched_entity_clear_dep - callback to clear the entities dependency and
376   * wake up scheduler
377   */
drm_sched_entity_wakeup(struct dma_fence * f,struct dma_fence_cb * cb)378  static void drm_sched_entity_wakeup(struct dma_fence *f,
379  				    struct dma_fence_cb *cb)
380  {
381  	struct drm_sched_entity *entity =
382  		container_of(cb, struct drm_sched_entity, cb);
383  
384  	drm_sched_entity_clear_dep(f, cb);
385  	drm_sched_wakeup(entity->rq->sched);
386  }
387  
388  /**
389   * drm_sched_entity_set_priority - Sets priority of the entity
390   *
391   * @entity: scheduler entity
392   * @priority: scheduler priority
393   *
394   * Update the priority of runqueus used for the entity.
395   */
drm_sched_entity_set_priority(struct drm_sched_entity * entity,enum drm_sched_priority priority)396  void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
397  				   enum drm_sched_priority priority)
398  {
399  	spin_lock(&entity->rq_lock);
400  	entity->priority = priority;
401  	spin_unlock(&entity->rq_lock);
402  }
403  EXPORT_SYMBOL(drm_sched_entity_set_priority);
404  
405  /*
406   * Add a callback to the current dependency of the entity to wake up the
407   * scheduler when the entity becomes available.
408   */
drm_sched_entity_add_dependency_cb(struct drm_sched_entity * entity)409  static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
410  {
411  	struct drm_gpu_scheduler *sched = entity->rq->sched;
412  	struct dma_fence *fence = entity->dependency;
413  	struct drm_sched_fence *s_fence;
414  
415  	if (fence->context == entity->fence_context ||
416  	    fence->context == entity->fence_context + 1) {
417  		/*
418  		 * Fence is a scheduled/finished fence from a job
419  		 * which belongs to the same entity, we can ignore
420  		 * fences from ourself
421  		 */
422  		dma_fence_put(entity->dependency);
423  		return false;
424  	}
425  
426  	s_fence = to_drm_sched_fence(fence);
427  	if (!fence->error && s_fence && s_fence->sched == sched &&
428  	    !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
429  
430  		/*
431  		 * Fence is from the same scheduler, only need to wait for
432  		 * it to be scheduled
433  		 */
434  		fence = dma_fence_get(&s_fence->scheduled);
435  		dma_fence_put(entity->dependency);
436  		entity->dependency = fence;
437  		if (!dma_fence_add_callback(fence, &entity->cb,
438  					    drm_sched_entity_clear_dep))
439  			return true;
440  
441  		/* Ignore it when it is already scheduled */
442  		dma_fence_put(fence);
443  		return false;
444  	}
445  
446  	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
447  				    drm_sched_entity_wakeup))
448  		return true;
449  
450  	dma_fence_put(entity->dependency);
451  	return false;
452  }
453  
454  static struct dma_fence *
drm_sched_job_dependency(struct drm_sched_job * job,struct drm_sched_entity * entity)455  drm_sched_job_dependency(struct drm_sched_job *job,
456  			 struct drm_sched_entity *entity)
457  {
458  	struct dma_fence *f;
459  
460  	/* We keep the fence around, so we can iterate over all dependencies
461  	 * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
462  	 * before killing the job.
463  	 */
464  	f = xa_load(&job->dependencies, job->last_dependency);
465  	if (f) {
466  		job->last_dependency++;
467  		return dma_fence_get(f);
468  	}
469  
470  	if (job->sched->ops->prepare_job)
471  		return job->sched->ops->prepare_job(job, entity);
472  
473  	return NULL;
474  }
475  
drm_sched_entity_pop_job(struct drm_sched_entity * entity)476  struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
477  {
478  	struct drm_sched_job *sched_job;
479  
480  	sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
481  	if (!sched_job)
482  		return NULL;
483  
484  	while ((entity->dependency =
485  			drm_sched_job_dependency(sched_job, entity))) {
486  		trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
487  
488  		if (drm_sched_entity_add_dependency_cb(entity))
489  			return NULL;
490  	}
491  
492  	/* skip jobs from entity that marked guilty */
493  	if (entity->guilty && atomic_read(entity->guilty))
494  		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
495  
496  	dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
497  	rcu_assign_pointer(entity->last_scheduled,
498  			   dma_fence_get(&sched_job->s_fence->finished));
499  
500  	/*
501  	 * If the queue is empty we allow drm_sched_entity_select_rq() to
502  	 * locklessly access ->last_scheduled. This only works if we set the
503  	 * pointer before we dequeue and if we a write barrier here.
504  	 */
505  	smp_wmb();
506  
507  	spsc_queue_pop(&entity->job_queue);
508  
509  	/*
510  	 * Update the entity's location in the min heap according to
511  	 * the timestamp of the next job, if any.
512  	 */
513  	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
514  		struct drm_sched_job *next;
515  
516  		next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
517  		if (next)
518  			drm_sched_rq_update_fifo(entity, next->submit_ts);
519  	}
520  
521  	/* Jobs and entities might have different lifecycles. Since we're
522  	 * removing the job from the entities queue, set the jobs entity pointer
523  	 * to NULL to prevent any future access of the entity through this job.
524  	 */
525  	sched_job->entity = NULL;
526  
527  	return sched_job;
528  }
529  
drm_sched_entity_select_rq(struct drm_sched_entity * entity)530  void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
531  {
532  	struct dma_fence *fence;
533  	struct drm_gpu_scheduler *sched;
534  	struct drm_sched_rq *rq;
535  
536  	/* single possible engine and already selected */
537  	if (!entity->sched_list)
538  		return;
539  
540  	/* queue non-empty, stay on the same engine */
541  	if (spsc_queue_count(&entity->job_queue))
542  		return;
543  
544  	/*
545  	 * Only when the queue is empty are we guaranteed that the scheduler
546  	 * thread cannot change ->last_scheduled. To enforce ordering we need
547  	 * a read barrier here. See drm_sched_entity_pop_job() for the other
548  	 * side.
549  	 */
550  	smp_rmb();
551  
552  	fence = rcu_dereference_check(entity->last_scheduled, true);
553  
554  	/* stay on the same engine if the previous job hasn't finished */
555  	if (fence && !dma_fence_is_signaled(fence))
556  		return;
557  
558  	spin_lock(&entity->rq_lock);
559  	sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
560  	rq = sched ? sched->sched_rq[entity->priority] : NULL;
561  	if (rq != entity->rq) {
562  		drm_sched_rq_remove_entity(entity->rq, entity);
563  		entity->rq = rq;
564  	}
565  	spin_unlock(&entity->rq_lock);
566  
567  	if (entity->num_sched_list == 1)
568  		entity->sched_list = NULL;
569  }
570  
571  /**
572   * drm_sched_entity_push_job - Submit a job to the entity's job queue
573   * @sched_job: job to submit
574   *
575   * Note: To guarantee that the order of insertion to queue matches the job's
576   * fence sequence number this function should be called with drm_sched_job_arm()
577   * under common lock for the struct drm_sched_entity that was set up for
578   * @sched_job in drm_sched_job_init().
579   *
580   * Returns 0 for success, negative error code otherwise.
581   */
drm_sched_entity_push_job(struct drm_sched_job * sched_job)582  void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
583  {
584  	struct drm_sched_entity *entity = sched_job->entity;
585  	bool first;
586  	ktime_t submit_ts;
587  
588  	trace_drm_sched_job(sched_job, entity);
589  	atomic_inc(entity->rq->sched->score);
590  	WRITE_ONCE(entity->last_user, current->group_leader);
591  
592  	/*
593  	 * After the sched_job is pushed into the entity queue, it may be
594  	 * completed and freed up at any time. We can no longer access it.
595  	 * Make sure to set the submit_ts first, to avoid a race.
596  	 */
597  	sched_job->submit_ts = submit_ts = ktime_get();
598  	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
599  
600  	/* first job wakes up scheduler */
601  	if (first) {
602  		struct drm_gpu_scheduler *sched;
603  		struct drm_sched_rq *rq;
604  
605  		/* Add the entity to the run queue */
606  		spin_lock(&entity->rq_lock);
607  		if (entity->stopped) {
608  			spin_unlock(&entity->rq_lock);
609  
610  			DRM_ERROR("Trying to push to a killed entity\n");
611  			return;
612  		}
613  
614  		rq = entity->rq;
615  		sched = rq->sched;
616  
617  		drm_sched_rq_add_entity(rq, entity);
618  		spin_unlock(&entity->rq_lock);
619  
620  		if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
621  			drm_sched_rq_update_fifo(entity, submit_ts);
622  
623  		drm_sched_wakeup(sched);
624  	}
625  }
626  EXPORT_SYMBOL(drm_sched_entity_push_job);
627