Lines Matching refs:guc
49 return &q->gt->uc.guc; in exec_queue_to_guc()
72 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED; in exec_queue_registered()
77 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in set_exec_queue_registered()
82 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in clear_exec_queue_registered()
87 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED; in exec_queue_enabled()
92 atomic_or(EXEC_QUEUE_STATE_ENABLED, &q->guc->state); in set_exec_queue_enabled()
97 atomic_and(~EXEC_QUEUE_STATE_ENABLED, &q->guc->state); in clear_exec_queue_enabled()
102 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE; in exec_queue_pending_enable()
107 atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state); in set_exec_queue_pending_enable()
112 atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state); in clear_exec_queue_pending_enable()
117 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_DISABLE; in exec_queue_pending_disable()
122 atomic_or(EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state); in set_exec_queue_pending_disable()
127 atomic_and(~EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state); in clear_exec_queue_pending_disable()
132 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_DESTROYED; in exec_queue_destroyed()
137 atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state); in set_exec_queue_destroyed()
142 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED; in exec_queue_banned()
147 atomic_or(EXEC_QUEUE_STATE_BANNED, &q->guc->state); in set_exec_queue_banned()
152 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_SUSPENDED; in exec_queue_suspended()
157 atomic_or(EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state); in set_exec_queue_suspended()
162 atomic_and(~EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state); in clear_exec_queue_suspended()
167 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_RESET; in exec_queue_reset()
172 atomic_or(EXEC_QUEUE_STATE_RESET, &q->guc->state); in set_exec_queue_reset()
177 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_KILLED; in exec_queue_killed()
182 atomic_or(EXEC_QUEUE_STATE_KILLED, &q->guc->state); in set_exec_queue_killed()
187 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_WEDGED; in exec_queue_wedged()
192 atomic_or(EXEC_QUEUE_STATE_WEDGED, &q->guc->state); in set_exec_queue_wedged()
197 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_CHECK_TIMEOUT; in exec_queue_check_timeout()
202 atomic_or(EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state); in set_exec_queue_check_timeout()
207 atomic_and(~EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state); in clear_exec_queue_check_timeout()
212 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_EXTRA_REF; in exec_queue_extra_ref()
217 atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state); in set_exec_queue_extra_ref()
222 return (atomic_read(&q->guc->state) & in exec_queue_killed_or_banned_or_wedged()
229 struct xe_guc *guc = arg; in guc_submit_fini() local
231 xa_destroy(&guc->submission_state.exec_queue_lookup); in guc_submit_fini()
236 struct xe_guc *guc = arg; in guc_submit_wedged_fini() local
240 mutex_lock(&guc->submission_state.lock); in guc_submit_wedged_fini()
241 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in guc_submit_wedged_fini()
243 mutex_unlock(&guc->submission_state.lock); in guc_submit_wedged_fini()
245 mutex_lock(&guc->submission_state.lock); in guc_submit_wedged_fini()
248 mutex_unlock(&guc->submission_state.lock); in guc_submit_wedged_fini()
253 static void primelockdep(struct xe_guc *guc) in primelockdep() argument
260 mutex_lock(&guc->submission_state.lock); in primelockdep()
261 mutex_unlock(&guc->submission_state.lock); in primelockdep()
279 int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids) in xe_guc_submit_init() argument
281 struct xe_device *xe = guc_to_xe(guc); in xe_guc_submit_init()
282 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_submit_init()
285 err = drmm_mutex_init(&xe->drm, &guc->submission_state.lock); in xe_guc_submit_init()
289 err = xe_guc_id_mgr_init(&guc->submission_state.idm, num_ids); in xe_guc_submit_init()
295 xa_init(&guc->submission_state.exec_queue_lookup); in xe_guc_submit_init()
297 init_waitqueue_head(&guc->submission_state.fini_wq); in xe_guc_submit_init()
299 primelockdep(guc); in xe_guc_submit_init()
301 return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc); in xe_guc_submit_init()
304 static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count) in __release_guc_id() argument
308 lockdep_assert_held(&guc->submission_state.lock); in __release_guc_id()
311 xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i); in __release_guc_id()
313 xe_guc_id_mgr_release_locked(&guc->submission_state.idm, in __release_guc_id()
314 q->guc->id, q->width); in __release_guc_id()
316 if (xa_empty(&guc->submission_state.exec_queue_lookup)) in __release_guc_id()
317 wake_up(&guc->submission_state.fini_wq); in __release_guc_id()
320 static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) in alloc_guc_id() argument
332 lockdep_assert_held(&guc->submission_state.lock); in alloc_guc_id()
334 ret = xe_guc_id_mgr_reserve_locked(&guc->submission_state.idm, in alloc_guc_id()
339 q->guc->id = ret; in alloc_guc_id()
342 ret = xa_err(xa_store(&guc->submission_state.exec_queue_lookup, in alloc_guc_id()
343 q->guc->id + i, q, GFP_NOWAIT)); in alloc_guc_id()
351 __release_guc_id(guc, q, i); in alloc_guc_id()
356 static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) in release_guc_id() argument
358 mutex_lock(&guc->submission_state.lock); in release_guc_id()
359 __release_guc_id(guc, q, q->width); in release_guc_id()
360 mutex_unlock(&guc->submission_state.lock); in release_guc_id()
411 static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q) in init_policies() argument
414 struct xe_device *xe = guc_to_xe(guc); in init_policies()
421 __guc_exec_queue_policy_start_klv(&policy, q->guc->id); in init_policies()
426 xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g, in init_policies()
430 static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q) in set_min_preemption_timeout() argument
434 __guc_exec_queue_policy_start_klv(&policy, q->guc->id); in set_min_preemption_timeout()
437 xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g, in set_min_preemption_timeout()
448 static void __register_mlrc_exec_queue(struct xe_guc *guc, in __register_mlrc_exec_queue() argument
453 struct xe_device *xe = guc_to_xe(guc); in __register_mlrc_exec_queue()
484 xe_guc_ct_send(&guc->ct, action, len, 0, 0); in __register_mlrc_exec_queue()
487 static void __register_exec_queue(struct xe_guc *guc, in __register_exec_queue() argument
505 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0); in __register_exec_queue()
510 struct xe_guc *guc = exec_queue_to_guc(q); in register_exec_queue() local
511 struct xe_device *xe = guc_to_xe(guc); in register_exec_queue()
518 info.context_idx = q->guc->id; in register_exec_queue()
539 q->guc->wqi_head = 0; in register_exec_queue()
540 q->guc->wqi_tail = 0; in register_exec_queue()
556 __register_mlrc_exec_queue(guc, q, &info); in register_exec_queue()
558 __register_exec_queue(guc, &info); in register_exec_queue()
559 init_policies(guc, q); in register_exec_queue()
564 return (WQ_SIZE - q->guc->wqi_tail); in wq_space_until_wrap()
569 struct xe_guc *guc = exec_queue_to_guc(q); in wq_wait_for_space() local
570 struct xe_device *xe = guc_to_xe(guc); in wq_wait_for_space()
575 CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE) in wq_wait_for_space()
578 q->guc->wqi_head = parallel_read(xe, map, wq_desc.head); in wq_wait_for_space()
597 struct xe_guc *guc = exec_queue_to_guc(q); in wq_noop_append() local
598 struct xe_device *xe = guc_to_xe(guc); in wq_noop_append()
607 parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)], in wq_noop_append()
610 q->guc->wqi_tail = 0; in wq_noop_append()
617 struct xe_guc *guc = exec_queue_to_guc(q); in wq_item_append() local
618 struct xe_device *xe = guc_to_xe(guc); in wq_item_append()
636 wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) | in wq_item_append()
648 wq[q->guc->wqi_tail / sizeof(u32)])); in wq_item_append()
650 q->guc->wqi_tail += wqi_size; in wq_item_append()
651 xe_assert(xe, q->guc->wqi_tail <= WQ_SIZE); in wq_item_append()
656 parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail); in wq_item_append()
662 struct xe_guc *guc = exec_queue_to_guc(q); in submit_exec_queue() local
663 struct xe_device *xe = guc_to_xe(guc); in submit_exec_queue()
683 action[len++] = q->guc->id; in submit_exec_queue()
690 q->guc->resume_time = RESUME_PENDING; in submit_exec_queue()
696 action[len++] = q->guc->id; in submit_exec_queue()
700 xe_guc_ct_send(&guc->ct, action, len, g2h_len, num_g2h); in submit_exec_queue()
705 action[len++] = q->guc->id; in submit_exec_queue()
708 xe_guc_ct_send(&guc->ct, action, len, 0, 0); in submit_exec_queue()
717 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_run_job() local
718 struct xe_device *xe = guc_to_xe(guc); in guc_exec_queue_run_job()
752 static int guc_read_stopped(struct xe_guc *guc) in guc_read_stopped() argument
754 return atomic_read(&guc->submission_state.stopped); in guc_read_stopped()
760 q->guc->id, \
764 static void disable_scheduling_deregister(struct xe_guc *guc, in disable_scheduling_deregister() argument
768 struct xe_device *xe = guc_to_xe(guc); in disable_scheduling_deregister()
771 set_min_preemption_timeout(guc, q); in disable_scheduling_deregister()
773 ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) || in disable_scheduling_deregister()
774 guc_read_stopped(guc), HZ * 5); in disable_scheduling_deregister()
776 struct xe_gpu_scheduler *sched = &q->guc->sched; in disable_scheduling_deregister()
794 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), in disable_scheduling_deregister()
801 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_trigger_cleanup() local
802 struct xe_device *xe = guc_to_xe(guc); in xe_guc_exec_queue_trigger_cleanup()
808 queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr); in xe_guc_exec_queue_trigger_cleanup()
810 xe_sched_tdr_queue_imm(&q->guc->sched); in xe_guc_exec_queue_trigger_cleanup()
820 void xe_guc_submit_wedge(struct xe_guc *guc) in xe_guc_submit_wedge() argument
822 struct xe_device *xe = guc_to_xe(guc); in xe_guc_submit_wedge()
827 xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode); in xe_guc_submit_wedge()
829 err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev, in xe_guc_submit_wedge()
830 guc_submit_wedged_fini, guc); in xe_guc_submit_wedge()
836 mutex_lock(&guc->submission_state.lock); in xe_guc_submit_wedge()
837 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) in xe_guc_submit_wedge()
840 mutex_unlock(&guc->submission_state.lock); in xe_guc_submit_wedge()
843 static bool guc_submit_hint_wedged(struct xe_guc *guc) in guc_submit_hint_wedged() argument
845 struct xe_device *xe = guc_to_xe(guc); in guc_submit_hint_wedged()
863 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_lr_cleanup() local
864 struct xe_device *xe = guc_to_xe(guc); in xe_guc_exec_queue_lr_cleanup()
888 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_lr_cleanup() local
892 disable_scheduling_deregister(guc, q); in xe_guc_exec_queue_lr_cleanup()
898 ret = wait_event_timeout(guc->ct.wq, in xe_guc_exec_queue_lr_cleanup()
900 guc_read_stopped(guc), HZ * 5); in xe_guc_exec_queue_lr_cleanup()
925 q->guc->id); in check_timeout()
953 q->guc->id, running_time_ms, timeout_ms, diff); in check_timeout()
961 struct xe_guc *guc = exec_queue_to_guc(q); in enable_scheduling() local
964 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); in enable_scheduling()
965 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in enable_scheduling()
966 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in enable_scheduling()
967 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q)); in enable_scheduling()
973 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), in enable_scheduling()
976 ret = wait_event_timeout(guc->ct.wq, in enable_scheduling()
978 guc_read_stopped(guc), HZ * 5); in enable_scheduling()
979 if (!ret || guc_read_stopped(guc)) { in enable_scheduling()
980 xe_gt_warn(guc_to_gt(guc), "Schedule enable failed to respond"); in enable_scheduling()
983 xe_sched_tdr_queue_imm(&q->guc->sched); in enable_scheduling()
990 struct xe_guc *guc = exec_queue_to_guc(q); in disable_scheduling() local
992 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); in disable_scheduling()
993 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in disable_scheduling()
994 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in disable_scheduling()
997 set_min_preemption_timeout(guc, q); in disable_scheduling()
1002 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), in disable_scheduling()
1006 static void __deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q) in __deregister_exec_queue() argument
1010 q->guc->id, in __deregister_exec_queue()
1013 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); in __deregister_exec_queue()
1014 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in __deregister_exec_queue()
1015 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q)); in __deregister_exec_queue()
1016 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in __deregister_exec_queue()
1021 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), in __deregister_exec_queue()
1031 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_timedout_job()
1032 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_timedout_job() local
1081 ret = wait_event_timeout(guc->ct.wq, in guc_exec_queue_timedout_job()
1083 guc_read_stopped(guc), HZ * 5); in guc_exec_queue_timedout_job()
1084 if (!ret || guc_read_stopped(guc)) in guc_exec_queue_timedout_job()
1106 ret = wait_event_timeout(guc->ct.wq, in guc_exec_queue_timedout_job()
1108 guc_read_stopped(guc), HZ * 5); in guc_exec_queue_timedout_job()
1109 if (!ret || guc_read_stopped(guc)) { in guc_exec_queue_timedout_job()
1112 xe_gt_warn(guc_to_gt(guc), "Schedule disable failed to respond"); in guc_exec_queue_timedout_job()
1135 …xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [… in guc_exec_queue_timedout_job()
1137 q->guc->id, q->flags, process_name, pid); in guc_exec_queue_timedout_job()
1166 __deregister_exec_queue(guc, q); in guc_exec_queue_timedout_job()
1211 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_fini_async() local
1213 xe_pm_runtime_get(guc_to_xe(guc)); in __guc_exec_queue_fini_async()
1218 release_guc_id(guc, q); in __guc_exec_queue_fini_async()
1224 xe_pm_runtime_put(guc_to_xe(guc)); in __guc_exec_queue_fini_async()
1229 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_fini_async() local
1230 struct xe_device *xe = guc_to_xe(guc); in guc_exec_queue_fini_async()
1232 INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async); in guc_exec_queue_fini_async()
1236 __guc_exec_queue_fini_async(&q->guc->fini_async); in guc_exec_queue_fini_async()
1238 queue_work(xe->destroy_wq, &q->guc->fini_async); in guc_exec_queue_fini_async()
1241 static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q) in __guc_exec_queue_fini() argument
1256 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_cleanup() local
1257 struct xe_device *xe = guc_to_xe(guc); in __guc_exec_queue_process_msg_cleanup()
1263 disable_scheduling_deregister(guc, q); in __guc_exec_queue_process_msg_cleanup()
1265 __guc_exec_queue_fini(guc, q); in __guc_exec_queue_process_msg_cleanup()
1276 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_set_sched_props() local
1279 init_policies(guc, q); in __guc_exec_queue_process_msg_set_sched_props()
1285 if (!q->guc->suspend_pending) in __suspend_fence_signal()
1288 WRITE_ONCE(q->guc->suspend_pending, false); in __suspend_fence_signal()
1289 wake_up(&q->guc->suspend_wait); in __suspend_fence_signal()
1294 struct xe_guc *guc = exec_queue_to_guc(q); in suspend_fence_signal() local
1295 struct xe_device *xe = guc_to_xe(guc); in suspend_fence_signal()
1298 guc_read_stopped(guc)); in suspend_fence_signal()
1299 xe_assert(xe, q->guc->suspend_pending); in suspend_fence_signal()
1307 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_suspend() local
1311 wait_event(guc->ct.wq, q->guc->resume_time != RESUME_PENDING || in __guc_exec_queue_process_msg_suspend()
1312 guc_read_stopped(guc)); in __guc_exec_queue_process_msg_suspend()
1314 if (!guc_read_stopped(guc)) { in __guc_exec_queue_process_msg_suspend()
1317 q->guc->resume_time); in __guc_exec_queue_process_msg_suspend()
1321 if (wait_ms > 0 && q->guc->resume_time) in __guc_exec_queue_process_msg_suspend()
1327 } else if (q->guc->suspend_pending) { in __guc_exec_queue_process_msg_suspend()
1340 q->guc->resume_time = RESUME_PENDING; in __guc_exec_queue_process_msg_resume()
1394 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_init() local
1395 struct xe_device *xe = guc_to_xe(guc); in guc_exec_queue_init()
1400 xe_assert(xe, xe_device_uc_enabled(guc_to_xe(guc))); in guc_exec_queue_init()
1406 q->guc = ge; in guc_exec_queue_init()
1417 timeout, guc_to_gt(guc)->ordered_wq, NULL, in guc_exec_queue_init()
1428 INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup); in guc_exec_queue_init()
1430 mutex_lock(&guc->submission_state.lock); in guc_exec_queue_init()
1432 err = alloc_guc_id(guc, q); in guc_exec_queue_init()
1438 if (guc_read_stopped(guc)) in guc_exec_queue_init()
1441 mutex_unlock(&guc->submission_state.lock); in guc_exec_queue_init()
1443 xe_exec_queue_assign_name(q, q->guc->id); in guc_exec_queue_init()
1450 mutex_unlock(&guc->submission_state.lock); in guc_exec_queue_init()
1479 xe_sched_add_msg_locked(&q->guc->sched, msg); in guc_exec_queue_add_msg()
1481 xe_sched_add_msg(&q->guc->sched, msg); in guc_exec_queue_add_msg()
1501 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP; in guc_exec_queue_fini()
1567 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_suspend()
1568 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND; in guc_exec_queue_suspend()
1575 q->guc->suspend_pending = true; in guc_exec_queue_suspend()
1583 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_suspend_wait() local
1591 ret = wait_event_interruptible_timeout(q->guc->suspend_wait, in guc_exec_queue_suspend_wait()
1592 !READ_ONCE(q->guc->suspend_pending) || in guc_exec_queue_suspend_wait()
1594 guc_read_stopped(guc), in guc_exec_queue_suspend_wait()
1598 xe_gt_warn(guc_to_gt(guc), in guc_exec_queue_suspend_wait()
1600 q->guc->id); in guc_exec_queue_suspend_wait()
1610 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_resume()
1611 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME; in guc_exec_queue_resume()
1612 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_resume() local
1613 struct xe_device *xe = guc_to_xe(guc); in guc_exec_queue_resume()
1615 xe_assert(xe, !q->guc->suspend_pending); in guc_exec_queue_resume()
1646 static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q) in guc_exec_queue_stop() argument
1648 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_stop()
1658 __guc_exec_queue_fini(guc, q); in guc_exec_queue_stop()
1660 if (q->guc->suspend_pending) { in guc_exec_queue_stop()
1667 &q->guc->state); in guc_exec_queue_stop()
1668 q->guc->resume_time = 0; in guc_exec_queue_stop()
1699 int xe_guc_submit_reset_prepare(struct xe_guc *guc) in xe_guc_submit_reset_prepare() argument
1710 ret = atomic_fetch_or(1, &guc->submission_state.stopped); in xe_guc_submit_reset_prepare()
1712 wake_up_all(&guc->ct.wq); in xe_guc_submit_reset_prepare()
1717 void xe_guc_submit_reset_wait(struct xe_guc *guc) in xe_guc_submit_reset_wait() argument
1719 wait_event(guc->ct.wq, xe_device_wedged(guc_to_xe(guc)) || in xe_guc_submit_reset_wait()
1720 !guc_read_stopped(guc)); in xe_guc_submit_reset_wait()
1723 void xe_guc_submit_stop(struct xe_guc *guc) in xe_guc_submit_stop() argument
1727 struct xe_device *xe = guc_to_xe(guc); in xe_guc_submit_stop()
1729 xe_assert(xe, guc_read_stopped(guc) == 1); in xe_guc_submit_stop()
1731 mutex_lock(&guc->submission_state.lock); in xe_guc_submit_stop()
1733 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_submit_stop()
1735 if (q->guc->id != index) in xe_guc_submit_stop()
1738 guc_exec_queue_stop(guc, q); in xe_guc_submit_stop()
1741 mutex_unlock(&guc->submission_state.lock); in xe_guc_submit_stop()
1752 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_start()
1767 int xe_guc_submit_start(struct xe_guc *guc) in xe_guc_submit_start() argument
1771 struct xe_device *xe = guc_to_xe(guc); in xe_guc_submit_start()
1773 xe_assert(xe, guc_read_stopped(guc) == 1); in xe_guc_submit_start()
1775 mutex_lock(&guc->submission_state.lock); in xe_guc_submit_start()
1776 atomic_dec(&guc->submission_state.stopped); in xe_guc_submit_start()
1777 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_submit_start()
1779 if (q->guc->id != index) in xe_guc_submit_start()
1784 mutex_unlock(&guc->submission_state.lock); in xe_guc_submit_start()
1786 wake_up_all(&guc->ct.wq); in xe_guc_submit_start()
1792 g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id) in g2h_exec_queue_lookup() argument
1794 struct xe_device *xe = guc_to_xe(guc); in g2h_exec_queue_lookup()
1802 q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id); in g2h_exec_queue_lookup()
1808 xe_assert(xe, guc_id >= q->guc->id); in g2h_exec_queue_lookup()
1809 xe_assert(xe, guc_id < (q->guc->id + q->width)); in g2h_exec_queue_lookup()
1814 static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q) in deregister_exec_queue() argument
1818 q->guc->id, in deregister_exec_queue()
1821 xe_gt_assert(guc_to_gt(guc), exec_queue_destroyed(q)); in deregister_exec_queue()
1822 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in deregister_exec_queue()
1823 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in deregister_exec_queue()
1824 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q)); in deregister_exec_queue()
1828 xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action)); in deregister_exec_queue()
1831 static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q, in handle_sched_done() argument
1837 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_enable(q)); in handle_sched_done()
1839 q->guc->resume_time = ktime_get(); in handle_sched_done()
1842 wake_up_all(&guc->ct.wq); in handle_sched_done()
1846 xe_gt_assert(guc_to_gt(guc), runnable_state == 0); in handle_sched_done()
1847 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_disable(q)); in handle_sched_done()
1850 if (q->guc->suspend_pending) { in handle_sched_done()
1855 wake_up_all(&guc->ct.wq); in handle_sched_done()
1858 deregister_exec_queue(guc, q); in handle_sched_done()
1863 int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len) in xe_guc_sched_done_handler() argument
1865 struct xe_device *xe = guc_to_xe(guc); in xe_guc_sched_done_handler()
1875 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_sched_done_handler()
1881 xe_gt_err(guc_to_gt(guc), in xe_guc_sched_done_handler()
1883 atomic_read(&q->guc->state), q->guc->id, in xe_guc_sched_done_handler()
1888 handle_sched_done(guc, q, runnable_state); in xe_guc_sched_done_handler()
1893 static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q) in handle_deregister_done() argument
1902 __guc_exec_queue_fini(guc, q); in handle_deregister_done()
1905 int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len) in xe_guc_deregister_done_handler() argument
1907 struct xe_device *xe = guc_to_xe(guc); in xe_guc_deregister_done_handler()
1916 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_deregister_done_handler()
1922 xe_gt_err(guc_to_gt(guc), in xe_guc_deregister_done_handler()
1924 atomic_read(&q->guc->state), q->guc->id); in xe_guc_deregister_done_handler()
1928 handle_deregister_done(guc, q); in xe_guc_deregister_done_handler()
1933 int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len) in xe_guc_exec_queue_reset_handler() argument
1935 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_exec_queue_reset_handler()
1936 struct xe_device *xe = guc_to_xe(guc); in xe_guc_exec_queue_reset_handler()
1945 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_exec_queue_reset_handler()
1969 int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg, in xe_guc_exec_queue_memory_cat_error_handler() argument
1972 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_exec_queue_memory_cat_error_handler()
1973 struct xe_device *xe = guc_to_xe(guc); in xe_guc_exec_queue_memory_cat_error_handler()
1982 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_exec_queue_memory_cat_error_handler()
1999 int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len) in xe_guc_exec_queue_reset_failure_handler() argument
2001 struct xe_device *xe = guc_to_xe(guc); in xe_guc_exec_queue_reset_failure_handler()
2018 xe_gt_reset_async(guc_to_gt(guc)); in xe_guc_exec_queue_reset_failure_handler()
2027 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_wq_snapshot_capture() local
2028 struct xe_device *xe = guc_to_xe(guc); in guc_exec_queue_wq_snapshot_capture()
2032 snapshot->guc.wqi_head = q->guc->wqi_head; in guc_exec_queue_wq_snapshot_capture()
2033 snapshot->guc.wqi_tail = q->guc->wqi_tail; in guc_exec_queue_wq_snapshot_capture()
2056 snapshot->guc.wqi_head, snapshot->parallel.wq_desc.head); in guc_exec_queue_wq_snapshot_print()
2058 snapshot->guc.wqi_tail, snapshot->parallel.wq_desc.tail); in guc_exec_queue_wq_snapshot_print()
2084 struct xe_gpu_scheduler *sched = &q->guc->sched; in xe_guc_exec_queue_snapshot_capture()
2093 snapshot->guc.id = q->guc->id; in xe_guc_exec_queue_snapshot_capture()
2115 snapshot->schedule_state = atomic_read(&q->guc->state); in xe_guc_exec_queue_snapshot_capture()
2183 drm_printf(p, "\nGuC ID: %d\n", snapshot->guc.id); in xe_guc_exec_queue_snapshot_print()
2252 void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p) in xe_guc_submit_print() argument
2257 if (!xe_device_uc_enabled(guc_to_xe(guc))) in xe_guc_submit_print()
2260 mutex_lock(&guc->submission_state.lock); in xe_guc_submit_print()
2261 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) in xe_guc_submit_print()
2263 mutex_unlock(&guc->submission_state.lock); in xe_guc_submit_print()