Lines Matching +full:1 +full:q

47 exec_queue_to_guc(struct xe_exec_queue *q)  in exec_queue_to_guc()  argument
49 return &q->gt->uc.guc; in exec_queue_to_guc()
57 #define EXEC_QUEUE_STATE_REGISTERED (1 << 0)
58 #define EXEC_QUEUE_STATE_ENABLED (1 << 1)
59 #define EXEC_QUEUE_STATE_PENDING_ENABLE (1 << 2)
60 #define EXEC_QUEUE_STATE_PENDING_DISABLE (1 << 3)
61 #define EXEC_QUEUE_STATE_DESTROYED (1 << 4)
62 #define EXEC_QUEUE_STATE_SUSPENDED (1 << 5)
63 #define EXEC_QUEUE_STATE_RESET (1 << 6)
64 #define EXEC_QUEUE_STATE_KILLED (1 << 7)
65 #define EXEC_QUEUE_STATE_WEDGED (1 << 8)
66 #define EXEC_QUEUE_STATE_BANNED (1 << 9)
67 #define EXEC_QUEUE_STATE_CHECK_TIMEOUT (1 << 10)
68 #define EXEC_QUEUE_STATE_EXTRA_REF (1 << 11)
70 static bool exec_queue_registered(struct xe_exec_queue *q) in exec_queue_registered() argument
72 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED; in exec_queue_registered()
75 static void set_exec_queue_registered(struct xe_exec_queue *q) in set_exec_queue_registered() argument
77 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in set_exec_queue_registered()
80 static void clear_exec_queue_registered(struct xe_exec_queue *q) in clear_exec_queue_registered() argument
82 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in clear_exec_queue_registered()
85 static bool exec_queue_enabled(struct xe_exec_queue *q) in exec_queue_enabled() argument
87 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED; in exec_queue_enabled()
90 static void set_exec_queue_enabled(struct xe_exec_queue *q) in set_exec_queue_enabled() argument
92 atomic_or(EXEC_QUEUE_STATE_ENABLED, &q->guc->state); in set_exec_queue_enabled()
95 static void clear_exec_queue_enabled(struct xe_exec_queue *q) in clear_exec_queue_enabled() argument
97 atomic_and(~EXEC_QUEUE_STATE_ENABLED, &q->guc->state); in clear_exec_queue_enabled()
100 static bool exec_queue_pending_enable(struct xe_exec_queue *q) in exec_queue_pending_enable() argument
102 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE; in exec_queue_pending_enable()
105 static void set_exec_queue_pending_enable(struct xe_exec_queue *q) in set_exec_queue_pending_enable() argument
107 atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state); in set_exec_queue_pending_enable()
110 static void clear_exec_queue_pending_enable(struct xe_exec_queue *q) in clear_exec_queue_pending_enable() argument
112 atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state); in clear_exec_queue_pending_enable()
115 static bool exec_queue_pending_disable(struct xe_exec_queue *q) in exec_queue_pending_disable() argument
117 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_DISABLE; in exec_queue_pending_disable()
120 static void set_exec_queue_pending_disable(struct xe_exec_queue *q) in set_exec_queue_pending_disable() argument
122 atomic_or(EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state); in set_exec_queue_pending_disable()
125 static void clear_exec_queue_pending_disable(struct xe_exec_queue *q) in clear_exec_queue_pending_disable() argument
127 atomic_and(~EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state); in clear_exec_queue_pending_disable()
130 static bool exec_queue_destroyed(struct xe_exec_queue *q) in exec_queue_destroyed() argument
132 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_DESTROYED; in exec_queue_destroyed()
135 static void set_exec_queue_destroyed(struct xe_exec_queue *q) in set_exec_queue_destroyed() argument
137 atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state); in set_exec_queue_destroyed()
140 static bool exec_queue_banned(struct xe_exec_queue *q) in exec_queue_banned() argument
142 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED; in exec_queue_banned()
145 static void set_exec_queue_banned(struct xe_exec_queue *q) in set_exec_queue_banned() argument
147 atomic_or(EXEC_QUEUE_STATE_BANNED, &q->guc->state); in set_exec_queue_banned()
150 static bool exec_queue_suspended(struct xe_exec_queue *q) in exec_queue_suspended() argument
152 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_SUSPENDED; in exec_queue_suspended()
155 static void set_exec_queue_suspended(struct xe_exec_queue *q) in set_exec_queue_suspended() argument
157 atomic_or(EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state); in set_exec_queue_suspended()
160 static void clear_exec_queue_suspended(struct xe_exec_queue *q) in clear_exec_queue_suspended() argument
162 atomic_and(~EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state); in clear_exec_queue_suspended()
165 static bool exec_queue_reset(struct xe_exec_queue *q) in exec_queue_reset() argument
167 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_RESET; in exec_queue_reset()
170 static void set_exec_queue_reset(struct xe_exec_queue *q) in set_exec_queue_reset() argument
172 atomic_or(EXEC_QUEUE_STATE_RESET, &q->guc->state); in set_exec_queue_reset()
175 static bool exec_queue_killed(struct xe_exec_queue *q) in exec_queue_killed() argument
177 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_KILLED; in exec_queue_killed()
180 static void set_exec_queue_killed(struct xe_exec_queue *q) in set_exec_queue_killed() argument
182 atomic_or(EXEC_QUEUE_STATE_KILLED, &q->guc->state); in set_exec_queue_killed()
185 static bool exec_queue_wedged(struct xe_exec_queue *q) in exec_queue_wedged() argument
187 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_WEDGED; in exec_queue_wedged()
190 static void set_exec_queue_wedged(struct xe_exec_queue *q) in set_exec_queue_wedged() argument
192 atomic_or(EXEC_QUEUE_STATE_WEDGED, &q->guc->state); in set_exec_queue_wedged()
195 static bool exec_queue_check_timeout(struct xe_exec_queue *q) in exec_queue_check_timeout() argument
197 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_CHECK_TIMEOUT; in exec_queue_check_timeout()
200 static void set_exec_queue_check_timeout(struct xe_exec_queue *q) in set_exec_queue_check_timeout() argument
202 atomic_or(EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state); in set_exec_queue_check_timeout()
205 static void clear_exec_queue_check_timeout(struct xe_exec_queue *q) in clear_exec_queue_check_timeout() argument
207 atomic_and(~EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state); in clear_exec_queue_check_timeout()
210 static bool exec_queue_extra_ref(struct xe_exec_queue *q) in exec_queue_extra_ref() argument
212 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_EXTRA_REF; in exec_queue_extra_ref()
215 static void set_exec_queue_extra_ref(struct xe_exec_queue *q) in set_exec_queue_extra_ref() argument
217 atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state); in set_exec_queue_extra_ref()
220 static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q) in exec_queue_killed_or_banned_or_wedged() argument
222 return (atomic_read(&q->guc->state) & in exec_queue_killed_or_banned_or_wedged()
237 struct xe_exec_queue *q; in guc_submit_wedged_fini() local
241 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in guc_submit_wedged_fini()
242 if (exec_queue_wedged(q)) { in guc_submit_wedged_fini()
244 xe_exec_queue_put(q); in guc_submit_wedged_fini()
304 static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count) in __release_guc_id() argument
311 xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i); in __release_guc_id()
314 q->guc->id, q->width); in __release_guc_id()
320 static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) in alloc_guc_id() argument
335 q->width); in alloc_guc_id()
339 q->guc->id = ret; in alloc_guc_id()
341 for (i = 0; i < q->width; ++i) { in alloc_guc_id()
343 q->guc->id + i, q, GFP_NOWAIT)); in alloc_guc_id()
351 __release_guc_id(guc, q, i); in alloc_guc_id()
356 static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) in release_guc_id() argument
359 __release_guc_id(guc, q, q->width); in release_guc_id()
394 FIELD_PREP(GUC_KLV_0_LEN, 1); \
411 static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q) in init_policies() argument
415 enum xe_exec_queue_priority prio = q->sched_props.priority; in init_policies()
416 u32 timeslice_us = q->sched_props.timeslice_us; in init_policies()
417 u32 preempt_timeout_us = q->sched_props.preempt_timeout_us; in init_policies()
419 xe_assert(xe, exec_queue_registered(q)); in init_policies()
421 __guc_exec_queue_policy_start_klv(&policy, q->guc->id); in init_policies()
430 static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q) in set_min_preemption_timeout() argument
434 __guc_exec_queue_policy_start_klv(&policy, q->guc->id); in set_min_preemption_timeout()
435 __guc_exec_queue_policy_add_preemption_timeout(&policy, 1); in set_min_preemption_timeout()
449 struct xe_exec_queue *q, in __register_mlrc_exec_queue() argument
458 xe_assert(xe, xe_exec_queue_is_parallel(q)); in __register_mlrc_exec_queue()
470 action[len++] = q->width; in __register_mlrc_exec_queue()
474 for (i = 1; i < q->width; ++i) { in __register_mlrc_exec_queue()
475 struct xe_lrc *lrc = q->lrc[i]; in __register_mlrc_exec_queue()
508 static void register_exec_queue(struct xe_exec_queue *q) in register_exec_queue() argument
510 struct xe_guc *guc = exec_queue_to_guc(q); in register_exec_queue()
512 struct xe_lrc *lrc = q->lrc[0]; in register_exec_queue()
515 xe_assert(xe, !exec_queue_registered(q)); in register_exec_queue()
518 info.context_idx = q->guc->id; in register_exec_queue()
519 info.engine_class = xe_engine_class_to_guc_class(q->class); in register_exec_queue()
520 info.engine_submit_mask = q->logical_mask; in register_exec_queue()
525 if (xe_exec_queue_is_parallel(q)) { in register_exec_queue()
539 q->guc->wqi_head = 0; in register_exec_queue()
540 q->guc->wqi_tail = 0; in register_exec_queue()
550 if (xe_exec_queue_is_lr(q)) in register_exec_queue()
551 xe_exec_queue_get(q); in register_exec_queue()
553 set_exec_queue_registered(q); in register_exec_queue()
554 trace_xe_exec_queue_register(q); in register_exec_queue()
555 if (xe_exec_queue_is_parallel(q)) in register_exec_queue()
556 __register_mlrc_exec_queue(guc, q, &info); in register_exec_queue()
559 init_policies(guc, q); in register_exec_queue()
562 static u32 wq_space_until_wrap(struct xe_exec_queue *q) in wq_space_until_wrap() argument
564 return (WQ_SIZE - q->guc->wqi_tail); in wq_space_until_wrap()
567 static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size) in wq_wait_for_space() argument
569 struct xe_guc *guc = exec_queue_to_guc(q); in wq_wait_for_space()
571 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in wq_wait_for_space()
572 unsigned int sleep_period_ms = 1; in wq_wait_for_space()
575 CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE) in wq_wait_for_space()
578 q->guc->wqi_head = parallel_read(xe, map, wq_desc.head); in wq_wait_for_space()
581 xe_gt_reset_async(q->gt); in wq_wait_for_space()
586 sleep_period_ms <<= 1; in wq_wait_for_space()
595 static int wq_noop_append(struct xe_exec_queue *q) in wq_noop_append() argument
597 struct xe_guc *guc = exec_queue_to_guc(q); in wq_noop_append()
599 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in wq_noop_append()
600 u32 len_dw = wq_space_until_wrap(q) / sizeof(u32) - 1; in wq_noop_append()
602 if (wq_wait_for_space(q, wq_space_until_wrap(q))) in wq_noop_append()
607 parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)], in wq_noop_append()
610 q->guc->wqi_tail = 0; in wq_noop_append()
615 static void wq_item_append(struct xe_exec_queue *q) in wq_item_append() argument
617 struct xe_guc *guc = exec_queue_to_guc(q); in wq_item_append()
619 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in wq_item_append()
620 #define WQ_HEADER_SIZE 4 /* Includes 1 LRC address too */ in wq_item_append()
621 u32 wqi[XE_HW_ENGINE_MAX_INSTANCE + (WQ_HEADER_SIZE - 1)]; in wq_item_append()
622 u32 wqi_size = (q->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32); in wq_item_append()
623 u32 len_dw = (wqi_size / sizeof(u32)) - 1; in wq_item_append()
626 if (wqi_size > wq_space_until_wrap(q)) { in wq_item_append()
627 if (wq_noop_append(q)) in wq_item_append()
630 if (wq_wait_for_space(q, wqi_size)) in wq_item_append()
635 wqi[i++] = xe_lrc_descriptor(q->lrc[0]); in wq_item_append()
636 wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) | in wq_item_append()
637 FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc[0]->ring.tail / sizeof(u64)); in wq_item_append()
639 for (j = 1; j < q->width; ++j) { in wq_item_append()
640 struct xe_lrc *lrc = q->lrc[j]; in wq_item_append()
648 wq[q->guc->wqi_tail / sizeof(u32)])); in wq_item_append()
650 q->guc->wqi_tail += wqi_size; in wq_item_append()
651 xe_assert(xe, q->guc->wqi_tail <= WQ_SIZE); in wq_item_append()
655 map = xe_lrc_parallel_map(q->lrc[0]); in wq_item_append()
656 parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail); in wq_item_append()
660 static void submit_exec_queue(struct xe_exec_queue *q) in submit_exec_queue() argument
662 struct xe_guc *guc = exec_queue_to_guc(q); in submit_exec_queue()
664 struct xe_lrc *lrc = q->lrc[0]; in submit_exec_queue()
671 xe_assert(xe, exec_queue_registered(q)); in submit_exec_queue()
673 if (xe_exec_queue_is_parallel(q)) in submit_exec_queue()
674 wq_item_append(q); in submit_exec_queue()
678 if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q)) in submit_exec_queue()
681 if (!exec_queue_enabled(q) && !exec_queue_suspended(q)) { in submit_exec_queue()
683 action[len++] = q->guc->id; in submit_exec_queue()
686 num_g2h = 1; in submit_exec_queue()
687 if (xe_exec_queue_is_parallel(q)) in submit_exec_queue()
690 q->guc->resume_time = RESUME_PENDING; in submit_exec_queue()
691 set_exec_queue_pending_enable(q); in submit_exec_queue()
692 set_exec_queue_enabled(q); in submit_exec_queue()
693 trace_xe_exec_queue_scheduling_enable(q); in submit_exec_queue()
696 action[len++] = q->guc->id; in submit_exec_queue()
697 trace_xe_exec_queue_submit(q); in submit_exec_queue()
705 action[len++] = q->guc->id; in submit_exec_queue()
706 trace_xe_exec_queue_submit(q); in submit_exec_queue()
716 struct xe_exec_queue *q = job->q; in guc_exec_queue_run_job() local
717 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_run_job()
719 bool lr = xe_exec_queue_is_lr(q); in guc_exec_queue_run_job()
721 xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) || in guc_exec_queue_run_job()
722 exec_queue_banned(q) || exec_queue_suspended(q)); in guc_exec_queue_run_job()
726 if (!exec_queue_killed_or_banned_or_wedged(q) && !xe_sched_job_is_error(job)) { in guc_exec_queue_run_job()
727 if (!exec_queue_registered(q)) in guc_exec_queue_run_job()
728 register_exec_queue(q); in guc_exec_queue_run_job()
730 q->ring_ops->emit_job(job); in guc_exec_queue_run_job()
731 submit_exec_queue(q); in guc_exec_queue_run_job()
757 #define MAKE_SCHED_CONTEXT_ACTION(q, enable_disable) \ argument
760 q->guc->id, \
765 struct xe_exec_queue *q) in disable_scheduling_deregister() argument
767 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE); in disable_scheduling_deregister()
771 set_min_preemption_timeout(guc, q); in disable_scheduling_deregister()
773 ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) || in disable_scheduling_deregister()
776 struct xe_gpu_scheduler *sched = &q->guc->sched; in disable_scheduling_deregister()
780 xe_gt_reset_async(q->gt); in disable_scheduling_deregister()
785 clear_exec_queue_enabled(q); in disable_scheduling_deregister()
786 set_exec_queue_pending_disable(q); in disable_scheduling_deregister()
787 set_exec_queue_destroyed(q); in disable_scheduling_deregister()
788 trace_xe_exec_queue_scheduling_disable(q); in disable_scheduling_deregister()
799 static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q) in xe_guc_exec_queue_trigger_cleanup() argument
801 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_trigger_cleanup()
807 if (xe_exec_queue_is_lr(q)) in xe_guc_exec_queue_trigger_cleanup()
808 queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr); in xe_guc_exec_queue_trigger_cleanup()
810 xe_sched_tdr_queue_imm(&q->guc->sched); in xe_guc_exec_queue_trigger_cleanup()
823 struct xe_exec_queue *q; in xe_guc_submit_wedge() local
837 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) in xe_guc_submit_wedge()
838 if (xe_exec_queue_get_unless_zero(q)) in xe_guc_submit_wedge()
839 set_exec_queue_wedged(q); in xe_guc_submit_wedge()
862 struct xe_exec_queue *q = ge->q; in xe_guc_exec_queue_lr_cleanup() local
863 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_lr_cleanup()
868 xe_assert(xe, xe_exec_queue_is_lr(q)); in xe_guc_exec_queue_lr_cleanup()
869 trace_xe_exec_queue_lr_cleanup(q); in xe_guc_exec_queue_lr_cleanup()
871 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q)); in xe_guc_exec_queue_lr_cleanup()
887 if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) { in xe_guc_exec_queue_lr_cleanup()
888 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_lr_cleanup()
891 set_exec_queue_banned(q); in xe_guc_exec_queue_lr_cleanup()
892 disable_scheduling_deregister(guc, q); in xe_guc_exec_queue_lr_cleanup()
899 !exec_queue_pending_disable(q) || in xe_guc_exec_queue_lr_cleanup()
904 xe_gt_reset_async(q->gt); in xe_guc_exec_queue_lr_cleanup()
914 static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job) in check_timeout() argument
916 struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q)); in check_timeout()
918 u32 timeout_ms = q->sched_props.job_timeout_ms; in check_timeout()
925 q->guc->id); in check_timeout()
930 ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]); in check_timeout()
931 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]); in check_timeout()
953 q->guc->id, running_time_ms, timeout_ms, diff); in check_timeout()
958 static void enable_scheduling(struct xe_exec_queue *q) in enable_scheduling() argument
960 MAKE_SCHED_CONTEXT_ACTION(q, ENABLE); in enable_scheduling()
961 struct xe_guc *guc = exec_queue_to_guc(q); in enable_scheduling()
964 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); in enable_scheduling()
965 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in enable_scheduling()
966 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in enable_scheduling()
967 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q)); in enable_scheduling()
969 set_exec_queue_pending_enable(q); in enable_scheduling()
970 set_exec_queue_enabled(q); in enable_scheduling()
971 trace_xe_exec_queue_scheduling_enable(q); in enable_scheduling()
974 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1); in enable_scheduling()
977 !exec_queue_pending_enable(q) || in enable_scheduling()
981 set_exec_queue_banned(q); in enable_scheduling()
982 xe_gt_reset_async(q->gt); in enable_scheduling()
983 xe_sched_tdr_queue_imm(&q->guc->sched); in enable_scheduling()
987 static void disable_scheduling(struct xe_exec_queue *q, bool immediate) in disable_scheduling() argument
989 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE); in disable_scheduling()
990 struct xe_guc *guc = exec_queue_to_guc(q); in disable_scheduling()
992 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); in disable_scheduling()
993 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in disable_scheduling()
994 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in disable_scheduling()
997 set_min_preemption_timeout(guc, q); in disable_scheduling()
998 clear_exec_queue_enabled(q); in disable_scheduling()
999 set_exec_queue_pending_disable(q); in disable_scheduling()
1000 trace_xe_exec_queue_scheduling_disable(q); in disable_scheduling()
1003 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1); in disable_scheduling()
1006 static void __deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q) in __deregister_exec_queue() argument
1010 q->guc->id, in __deregister_exec_queue()
1013 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); in __deregister_exec_queue()
1014 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in __deregister_exec_queue()
1015 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q)); in __deregister_exec_queue()
1016 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in __deregister_exec_queue()
1018 set_exec_queue_destroyed(q); in __deregister_exec_queue()
1019 trace_xe_exec_queue_deregister(q); in __deregister_exec_queue()
1022 G2H_LEN_DW_DEREGISTER_CONTEXT, 1); in __deregister_exec_queue()
1030 struct xe_exec_queue *q = job->q; in guc_exec_queue_timedout_job() local
1031 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_timedout_job()
1032 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_timedout_job()
1035 pid_t pid = -1; in guc_exec_queue_timedout_job()
1056 skip_timeout_check = exec_queue_reset(q) || in guc_exec_queue_timedout_job()
1057 exec_queue_killed_or_banned_or_wedged(q) || in guc_exec_queue_timedout_job()
1058 exec_queue_destroyed(q); in guc_exec_queue_timedout_job()
1067 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q)); in guc_exec_queue_timedout_job()
1070 if (!wedged && exec_queue_registered(q)) { in guc_exec_queue_timedout_job()
1073 if (exec_queue_reset(q)) in guc_exec_queue_timedout_job()
1076 if (!exec_queue_destroyed(q)) { in guc_exec_queue_timedout_job()
1082 !exec_queue_pending_enable(q) || in guc_exec_queue_timedout_job()
1093 set_exec_queue_check_timeout(q); in guc_exec_queue_timedout_job()
1094 disable_scheduling(q, skip_timeout_check); in guc_exec_queue_timedout_job()
1107 !exec_queue_pending_disable(q) || in guc_exec_queue_timedout_job()
1113 set_exec_queue_extra_ref(q); in guc_exec_queue_timedout_job()
1114 xe_exec_queue_get(q); /* GT reset owns this */ in guc_exec_queue_timedout_job()
1115 set_exec_queue_banned(q); in guc_exec_queue_timedout_job()
1116 xe_gt_reset_async(q->gt); in guc_exec_queue_timedout_job()
1125 if (!wedged && !skip_timeout_check && !check_timeout(q, job) && in guc_exec_queue_timedout_job()
1126 !exec_queue_reset(q) && exec_queue_registered(q)) { in guc_exec_queue_timedout_job()
1127 clear_exec_queue_check_timeout(q); in guc_exec_queue_timedout_job()
1131 if (q->vm && q->vm->xef) { in guc_exec_queue_timedout_job()
1132 process_name = q->vm->xef->process_name; in guc_exec_queue_timedout_job()
1133 pid = q->vm->xef->pid; in guc_exec_queue_timedout_job()
1137 q->guc->id, q->flags, process_name, pid); in guc_exec_queue_timedout_job()
1141 if (!exec_queue_killed(q)) in guc_exec_queue_timedout_job()
1148 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL, in guc_exec_queue_timedout_job()
1150 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q), in guc_exec_queue_timedout_job()
1152 if (!wedged && (q->flags & EXEC_QUEUE_FLAG_KERNEL || in guc_exec_queue_timedout_job()
1153 (q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)))) { in guc_exec_queue_timedout_job()
1155 clear_exec_queue_check_timeout(q); in guc_exec_queue_timedout_job()
1156 xe_gt_reset_async(q->gt); in guc_exec_queue_timedout_job()
1162 set_exec_queue_banned(q); in guc_exec_queue_timedout_job()
1163 if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) { in guc_exec_queue_timedout_job()
1164 set_exec_queue_extra_ref(q); in guc_exec_queue_timedout_job()
1165 xe_exec_queue_get(q); in guc_exec_queue_timedout_job()
1166 __deregister_exec_queue(guc, q); in guc_exec_queue_timedout_job()
1170 xe_hw_fence_irq_stop(q->fence_irq); in guc_exec_queue_timedout_job()
1179 xe_guc_exec_queue_trigger_cleanup(q); in guc_exec_queue_timedout_job()
1188 xe_hw_fence_irq_start(q->fence_irq); in guc_exec_queue_timedout_job()
1193 enable_scheduling(q); in guc_exec_queue_timedout_job()
1210 struct xe_exec_queue *q = ge->q; in __guc_exec_queue_fini_async() local
1211 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_fini_async()
1214 trace_xe_exec_queue_destroy(q); in __guc_exec_queue_fini_async()
1216 if (xe_exec_queue_is_lr(q)) in __guc_exec_queue_fini_async()
1218 release_guc_id(guc, q); in __guc_exec_queue_fini_async()
1223 xe_exec_queue_fini(q); in __guc_exec_queue_fini_async()
1227 static void guc_exec_queue_fini_async(struct xe_exec_queue *q) in guc_exec_queue_fini_async() argument
1229 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_fini_async()
1232 INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async); in guc_exec_queue_fini_async()
1235 if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q)) in guc_exec_queue_fini_async()
1236 __guc_exec_queue_fini_async(&q->guc->fini_async); in guc_exec_queue_fini_async()
1238 queue_work(xe->destroy_wq, &q->guc->fini_async); in guc_exec_queue_fini_async()
1241 static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q) in __guc_exec_queue_fini() argument
1250 guc_exec_queue_fini_async(q); in __guc_exec_queue_fini()
1255 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_cleanup() local
1256 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_cleanup()
1259 xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT)); in __guc_exec_queue_process_msg_cleanup()
1260 trace_xe_exec_queue_cleanup_entity(q); in __guc_exec_queue_process_msg_cleanup()
1262 if (exec_queue_registered(q)) in __guc_exec_queue_process_msg_cleanup()
1263 disable_scheduling_deregister(guc, q); in __guc_exec_queue_process_msg_cleanup()
1265 __guc_exec_queue_fini(guc, q); in __guc_exec_queue_process_msg_cleanup()
1268 static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q) in guc_exec_queue_allowed_to_change_state() argument
1270 return !exec_queue_killed_or_banned_or_wedged(q) && exec_queue_registered(q); in guc_exec_queue_allowed_to_change_state()
1275 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_set_sched_props() local
1276 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_set_sched_props()
1278 if (guc_exec_queue_allowed_to_change_state(q)) in __guc_exec_queue_process_msg_set_sched_props()
1279 init_policies(guc, q); in __guc_exec_queue_process_msg_set_sched_props()
1283 static void __suspend_fence_signal(struct xe_exec_queue *q) in __suspend_fence_signal() argument
1285 if (!q->guc->suspend_pending) in __suspend_fence_signal()
1288 WRITE_ONCE(q->guc->suspend_pending, false); in __suspend_fence_signal()
1289 wake_up(&q->guc->suspend_wait); in __suspend_fence_signal()
1292 static void suspend_fence_signal(struct xe_exec_queue *q) in suspend_fence_signal() argument
1294 struct xe_guc *guc = exec_queue_to_guc(q); in suspend_fence_signal()
1297 xe_assert(xe, exec_queue_suspended(q) || exec_queue_killed(q) || in suspend_fence_signal()
1299 xe_assert(xe, q->guc->suspend_pending); in suspend_fence_signal()
1301 __suspend_fence_signal(q); in suspend_fence_signal()
1306 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_suspend() local
1307 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_suspend()
1309 if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) && in __guc_exec_queue_process_msg_suspend()
1310 exec_queue_enabled(q)) { in __guc_exec_queue_process_msg_suspend()
1311 wait_event(guc->ct.wq, q->guc->resume_time != RESUME_PENDING || in __guc_exec_queue_process_msg_suspend()
1317 q->guc->resume_time); in __guc_exec_queue_process_msg_suspend()
1318 s64 wait_ms = q->vm->preempt.min_run_period_ms - in __guc_exec_queue_process_msg_suspend()
1321 if (wait_ms > 0 && q->guc->resume_time) in __guc_exec_queue_process_msg_suspend()
1324 set_exec_queue_suspended(q); in __guc_exec_queue_process_msg_suspend()
1325 disable_scheduling(q, false); in __guc_exec_queue_process_msg_suspend()
1327 } else if (q->guc->suspend_pending) { in __guc_exec_queue_process_msg_suspend()
1328 set_exec_queue_suspended(q); in __guc_exec_queue_process_msg_suspend()
1329 suspend_fence_signal(q); in __guc_exec_queue_process_msg_suspend()
1335 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_resume() local
1337 if (guc_exec_queue_allowed_to_change_state(q)) { in __guc_exec_queue_process_msg_resume()
1338 clear_exec_queue_suspended(q); in __guc_exec_queue_process_msg_resume()
1339 if (!exec_queue_enabled(q)) { in __guc_exec_queue_process_msg_resume()
1340 q->guc->resume_time = RESUME_PENDING; in __guc_exec_queue_process_msg_resume()
1341 enable_scheduling(q); in __guc_exec_queue_process_msg_resume()
1344 clear_exec_queue_suspended(q); in __guc_exec_queue_process_msg_resume()
1348 #define CLEANUP 1 /* Non-zero values to catch uninitialized msg */
1391 static int guc_exec_queue_init(struct xe_exec_queue *q) in guc_exec_queue_init() argument
1394 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_init()
1406 q->guc = ge; in guc_exec_queue_init()
1407 ge->q = q; in guc_exec_queue_init()
1413 timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT : in guc_exec_queue_init()
1414 msecs_to_jiffies(q->sched_props.job_timeout_ms); in guc_exec_queue_init()
1416 NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64, in guc_exec_queue_init()
1418 q->name, gt_to_xe(q->gt)->drm.dev); in guc_exec_queue_init()
1427 if (xe_exec_queue_is_lr(q)) in guc_exec_queue_init()
1428 INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup); in guc_exec_queue_init()
1432 err = alloc_guc_id(guc, q); in guc_exec_queue_init()
1436 q->entity = &ge->entity; in guc_exec_queue_init()
1443 xe_exec_queue_assign_name(q, q->guc->id); in guc_exec_queue_init()
1445 trace_xe_exec_queue_create(q); in guc_exec_queue_init()
1460 static void guc_exec_queue_kill(struct xe_exec_queue *q) in guc_exec_queue_kill() argument
1462 trace_xe_exec_queue_kill(q); in guc_exec_queue_kill()
1463 set_exec_queue_killed(q); in guc_exec_queue_kill()
1464 __suspend_fence_signal(q); in guc_exec_queue_kill()
1465 xe_guc_exec_queue_trigger_cleanup(q); in guc_exec_queue_kill()
1468 static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg, in guc_exec_queue_add_msg() argument
1471 xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q))); in guc_exec_queue_add_msg()
1475 msg->private_data = q; in guc_exec_queue_add_msg()
1479 xe_sched_add_msg_locked(&q->guc->sched, msg); in guc_exec_queue_add_msg()
1481 xe_sched_add_msg(&q->guc->sched, msg); in guc_exec_queue_add_msg()
1484 static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q, in guc_exec_queue_try_add_msg() argument
1491 guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED); in guc_exec_queue_try_add_msg()
1497 #define STATIC_MSG_SUSPEND 1
1499 static void guc_exec_queue_fini(struct xe_exec_queue *q) in guc_exec_queue_fini() argument
1501 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP; in guc_exec_queue_fini()
1503 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q)) in guc_exec_queue_fini()
1504 guc_exec_queue_add_msg(q, msg, CLEANUP); in guc_exec_queue_fini()
1506 __guc_exec_queue_fini(exec_queue_to_guc(q), q); in guc_exec_queue_fini()
1509 static int guc_exec_queue_set_priority(struct xe_exec_queue *q, in guc_exec_queue_set_priority() argument
1514 if (q->sched_props.priority == priority || in guc_exec_queue_set_priority()
1515 exec_queue_killed_or_banned_or_wedged(q)) in guc_exec_queue_set_priority()
1522 q->sched_props.priority = priority; in guc_exec_queue_set_priority()
1523 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); in guc_exec_queue_set_priority()
1528 static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us) in guc_exec_queue_set_timeslice() argument
1532 if (q->sched_props.timeslice_us == timeslice_us || in guc_exec_queue_set_timeslice()
1533 exec_queue_killed_or_banned_or_wedged(q)) in guc_exec_queue_set_timeslice()
1540 q->sched_props.timeslice_us = timeslice_us; in guc_exec_queue_set_timeslice()
1541 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); in guc_exec_queue_set_timeslice()
1546 static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q, in guc_exec_queue_set_preempt_timeout() argument
1551 if (q->sched_props.preempt_timeout_us == preempt_timeout_us || in guc_exec_queue_set_preempt_timeout()
1552 exec_queue_killed_or_banned_or_wedged(q)) in guc_exec_queue_set_preempt_timeout()
1559 q->sched_props.preempt_timeout_us = preempt_timeout_us; in guc_exec_queue_set_preempt_timeout()
1560 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); in guc_exec_queue_set_preempt_timeout()
1565 static int guc_exec_queue_suspend(struct xe_exec_queue *q) in guc_exec_queue_suspend() argument
1567 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_suspend()
1568 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND; in guc_exec_queue_suspend()
1570 if (exec_queue_killed_or_banned_or_wedged(q)) in guc_exec_queue_suspend()
1574 if (guc_exec_queue_try_add_msg(q, msg, SUSPEND)) in guc_exec_queue_suspend()
1575 q->guc->suspend_pending = true; in guc_exec_queue_suspend()
1581 static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q) in guc_exec_queue_suspend_wait() argument
1583 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_suspend_wait()
1591 ret = wait_event_interruptible_timeout(q->guc->suspend_wait, in guc_exec_queue_suspend_wait()
1592 !READ_ONCE(q->guc->suspend_pending) || in guc_exec_queue_suspend_wait()
1593 exec_queue_killed(q) || in guc_exec_queue_suspend_wait()
1600 q->guc->id); in guc_exec_queue_suspend_wait()
1608 static void guc_exec_queue_resume(struct xe_exec_queue *q) in guc_exec_queue_resume() argument
1610 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_resume()
1611 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME; in guc_exec_queue_resume()
1612 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_resume()
1615 xe_assert(xe, !q->guc->suspend_pending); in guc_exec_queue_resume()
1618 guc_exec_queue_try_add_msg(q, msg, RESUME); in guc_exec_queue_resume()
1622 static bool guc_exec_queue_reset_status(struct xe_exec_queue *q) in guc_exec_queue_reset_status() argument
1624 return exec_queue_reset(q) || exec_queue_killed_or_banned_or_wedged(q); in guc_exec_queue_reset_status()
1646 static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q) in guc_exec_queue_stop() argument
1648 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_stop()
1654 if (exec_queue_registered(q)) { in guc_exec_queue_stop()
1655 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q)) in guc_exec_queue_stop()
1656 xe_exec_queue_put(q); in guc_exec_queue_stop()
1657 else if (exec_queue_destroyed(q)) in guc_exec_queue_stop()
1658 __guc_exec_queue_fini(guc, q); in guc_exec_queue_stop()
1660 if (q->guc->suspend_pending) { in guc_exec_queue_stop()
1661 set_exec_queue_suspended(q); in guc_exec_queue_stop()
1662 suspend_fence_signal(q); in guc_exec_queue_stop()
1667 &q->guc->state); in guc_exec_queue_stop()
1668 q->guc->resume_time = 0; in guc_exec_queue_stop()
1669 trace_xe_exec_queue_stop(q); in guc_exec_queue_stop()
1676 if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) { in guc_exec_queue_stop()
1687 } else if (xe_exec_queue_is_lr(q) && in guc_exec_queue_stop()
1688 (xe_lrc_ring_head(q->lrc[0]) != xe_lrc_ring_tail(q->lrc[0]))) { in guc_exec_queue_stop()
1693 set_exec_queue_banned(q); in guc_exec_queue_stop()
1694 xe_guc_exec_queue_trigger_cleanup(q); in guc_exec_queue_stop()
1710 ret = atomic_fetch_or(1, &guc->submission_state.stopped); in xe_guc_submit_reset_prepare()
1725 struct xe_exec_queue *q; in xe_guc_submit_stop() local
1729 xe_assert(xe, guc_read_stopped(guc) == 1); in xe_guc_submit_stop()
1733 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_submit_stop()
1735 if (q->guc->id != index) in xe_guc_submit_stop()
1738 guc_exec_queue_stop(guc, q); in xe_guc_submit_stop()
1750 static void guc_exec_queue_start(struct xe_exec_queue *q) in guc_exec_queue_start() argument
1752 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_start()
1754 if (!exec_queue_killed_or_banned_or_wedged(q)) { in guc_exec_queue_start()
1757 trace_xe_exec_queue_resubmit(q); in guc_exec_queue_start()
1758 for (i = 0; i < q->width; ++i) in guc_exec_queue_start()
1759 xe_lrc_set_ring_head(q->lrc[i], q->lrc[i]->ring.tail); in guc_exec_queue_start()
1769 struct xe_exec_queue *q; in xe_guc_submit_start() local
1773 xe_assert(xe, guc_read_stopped(guc) == 1); in xe_guc_submit_start()
1777 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_submit_start()
1779 if (q->guc->id != index) in xe_guc_submit_start()
1782 guc_exec_queue_start(q); in xe_guc_submit_start()
1795 struct xe_exec_queue *q; in g2h_exec_queue_lookup() local
1802 q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id); in g2h_exec_queue_lookup()
1803 if (unlikely(!q)) { in g2h_exec_queue_lookup()
1808 xe_assert(xe, guc_id >= q->guc->id); in g2h_exec_queue_lookup()
1809 xe_assert(xe, guc_id < (q->guc->id + q->width)); in g2h_exec_queue_lookup()
1811 return q; in g2h_exec_queue_lookup()
1814 static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q) in deregister_exec_queue() argument
1818 q->guc->id, in deregister_exec_queue()
1821 xe_gt_assert(guc_to_gt(guc), exec_queue_destroyed(q)); in deregister_exec_queue()
1822 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in deregister_exec_queue()
1823 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in deregister_exec_queue()
1824 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q)); in deregister_exec_queue()
1826 trace_xe_exec_queue_deregister(q); in deregister_exec_queue()
1831 static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q, in handle_sched_done() argument
1834 trace_xe_exec_queue_scheduling_done(q); in handle_sched_done()
1836 if (runnable_state == 1) { in handle_sched_done()
1837 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_enable(q)); in handle_sched_done()
1839 q->guc->resume_time = ktime_get(); in handle_sched_done()
1840 clear_exec_queue_pending_enable(q); in handle_sched_done()
1844 bool check_timeout = exec_queue_check_timeout(q); in handle_sched_done()
1847 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_disable(q)); in handle_sched_done()
1849 clear_exec_queue_pending_disable(q); in handle_sched_done()
1850 if (q->guc->suspend_pending) { in handle_sched_done()
1851 suspend_fence_signal(q); in handle_sched_done()
1853 if (exec_queue_banned(q) || check_timeout) { in handle_sched_done()
1858 deregister_exec_queue(guc, q); in handle_sched_done()
1866 struct xe_exec_queue *q; in xe_guc_sched_done_handler() local
1868 u32 runnable_state = msg[1]; in xe_guc_sched_done_handler()
1875 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_sched_done_handler()
1876 if (unlikely(!q)) in xe_guc_sched_done_handler()
1879 if (unlikely(!exec_queue_pending_enable(q) && in xe_guc_sched_done_handler()
1880 !exec_queue_pending_disable(q))) { in xe_guc_sched_done_handler()
1883 atomic_read(&q->guc->state), q->guc->id, in xe_guc_sched_done_handler()
1888 handle_sched_done(guc, q, runnable_state); in xe_guc_sched_done_handler()
1893 static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q) in handle_deregister_done() argument
1895 trace_xe_exec_queue_deregister_done(q); in handle_deregister_done()
1897 clear_exec_queue_registered(q); in handle_deregister_done()
1899 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q)) in handle_deregister_done()
1900 xe_exec_queue_put(q); in handle_deregister_done()
1902 __guc_exec_queue_fini(guc, q); in handle_deregister_done()
1908 struct xe_exec_queue *q; in xe_guc_deregister_done_handler() local
1911 if (unlikely(len < 1)) { in xe_guc_deregister_done_handler()
1916 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_deregister_done_handler()
1917 if (unlikely(!q)) in xe_guc_deregister_done_handler()
1920 if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) || in xe_guc_deregister_done_handler()
1921 exec_queue_pending_enable(q) || exec_queue_enabled(q)) { in xe_guc_deregister_done_handler()
1924 atomic_read(&q->guc->state), q->guc->id); in xe_guc_deregister_done_handler()
1928 handle_deregister_done(guc, q); in xe_guc_deregister_done_handler()
1937 struct xe_exec_queue *q; in xe_guc_exec_queue_reset_handler() local
1940 if (unlikely(len < 1)) { in xe_guc_exec_queue_reset_handler()
1945 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_exec_queue_reset_handler()
1946 if (unlikely(!q)) in xe_guc_exec_queue_reset_handler()
1950 xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id); in xe_guc_exec_queue_reset_handler()
1954 trace_xe_exec_queue_reset(q); in xe_guc_exec_queue_reset_handler()
1962 set_exec_queue_reset(q); in xe_guc_exec_queue_reset_handler()
1963 if (!exec_queue_banned(q) && !exec_queue_check_timeout(q)) in xe_guc_exec_queue_reset_handler()
1964 xe_guc_exec_queue_trigger_cleanup(q); in xe_guc_exec_queue_reset_handler()
1974 struct xe_exec_queue *q; in xe_guc_exec_queue_memory_cat_error_handler() local
1977 if (unlikely(len < 1)) { in xe_guc_exec_queue_memory_cat_error_handler()
1982 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_exec_queue_memory_cat_error_handler()
1983 if (unlikely(!q)) in xe_guc_exec_queue_memory_cat_error_handler()
1987 xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id); in xe_guc_exec_queue_memory_cat_error_handler()
1989 trace_xe_exec_queue_memory_cat_error(q); in xe_guc_exec_queue_memory_cat_error_handler()
1992 set_exec_queue_reset(q); in xe_guc_exec_queue_memory_cat_error_handler()
1993 if (!exec_queue_banned(q) && !exec_queue_check_timeout(q)) in xe_guc_exec_queue_memory_cat_error_handler()
1994 xe_guc_exec_queue_trigger_cleanup(q); in xe_guc_exec_queue_memory_cat_error_handler()
2011 instance = msg[1]; in xe_guc_exec_queue_reset_failure_handler()
2024 guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q, in guc_exec_queue_wq_snapshot_capture() argument
2027 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_wq_snapshot_capture()
2029 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in guc_exec_queue_wq_snapshot_capture()
2032 snapshot->guc.wqi_head = q->guc->wqi_head; in guc_exec_queue_wq_snapshot_capture()
2033 snapshot->guc.wqi_tail = q->guc->wqi_tail; in guc_exec_queue_wq_snapshot_capture()
2073 * @q: faulty exec queue
2082 xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q) in xe_guc_exec_queue_snapshot_capture() argument
2084 struct xe_gpu_scheduler *sched = &q->guc->sched; in xe_guc_exec_queue_snapshot_capture()
2093 snapshot->guc.id = q->guc->id; in xe_guc_exec_queue_snapshot_capture()
2094 memcpy(&snapshot->name, &q->name, sizeof(snapshot->name)); in xe_guc_exec_queue_snapshot_capture()
2095 snapshot->class = q->class; in xe_guc_exec_queue_snapshot_capture()
2096 snapshot->logical_mask = q->logical_mask; in xe_guc_exec_queue_snapshot_capture()
2097 snapshot->width = q->width; in xe_guc_exec_queue_snapshot_capture()
2098 snapshot->refcount = kref_read(&q->refcount); in xe_guc_exec_queue_snapshot_capture()
2100 snapshot->sched_props.timeslice_us = q->sched_props.timeslice_us; in xe_guc_exec_queue_snapshot_capture()
2102 q->sched_props.preempt_timeout_us; in xe_guc_exec_queue_snapshot_capture()
2104 snapshot->lrc = kmalloc_array(q->width, sizeof(struct xe_lrc_snapshot *), in xe_guc_exec_queue_snapshot_capture()
2108 for (i = 0; i < q->width; ++i) { in xe_guc_exec_queue_snapshot_capture()
2109 struct xe_lrc *lrc = q->lrc[i]; in xe_guc_exec_queue_snapshot_capture()
2115 snapshot->schedule_state = atomic_read(&q->guc->state); in xe_guc_exec_queue_snapshot_capture()
2116 snapshot->exec_queue_flags = q->flags; in xe_guc_exec_queue_snapshot_capture()
2118 snapshot->parallel_execution = xe_exec_queue_is_parallel(q); in xe_guc_exec_queue_snapshot_capture()
2120 guc_exec_queue_wq_snapshot_capture(q, snapshot); in xe_guc_exec_queue_snapshot_capture()
2136 dma_fence_is_signaled(job_iter->fence) ? 1 : 0; in xe_guc_exec_queue_snapshot_capture()
2139 ? 1 : 0; in xe_guc_exec_queue_snapshot_capture()
2236 static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p) in guc_exec_queue_print() argument
2240 snapshot = xe_guc_exec_queue_snapshot_capture(q); in guc_exec_queue_print()
2254 struct xe_exec_queue *q; in xe_guc_submit_print() local
2261 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) in xe_guc_submit_print()
2262 guc_exec_queue_print(q, p); in xe_guc_submit_print()