Lines Matching refs:guc
161 #define NUMBER_MULTI_LRC_GUC_ID(guc) \ argument
162 ((guc)->submission_state.num_guc_ids / 16)
451 GEM_BUG_ON(!ce->parallel.guc.parent_page); in __get_parent_scratch_offset()
453 return ce->parallel.guc.parent_page * PAGE_SIZE; in __get_parent_scratch_offset()
504 CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE) in get_wq_pointer()
506 ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head); in get_wq_pointer()
513 return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)]; in get_wq_pointer()
516 static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id) in __get_context() argument
518 struct intel_context *ce = xa_load(&guc->context_lookup, id); in __get_context()
525 static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index) in __get_lrc_desc_v69() argument
527 struct guc_lrc_desc_v69 *base = guc->lrc_desc_pool_vaddr_v69; in __get_lrc_desc_v69()
537 static int guc_lrc_desc_pool_create_v69(struct intel_guc *guc) in guc_lrc_desc_pool_create_v69() argument
544 ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool_v69, in guc_lrc_desc_pool_create_v69()
545 (void **)&guc->lrc_desc_pool_vaddr_v69); in guc_lrc_desc_pool_create_v69()
552 static void guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc) in guc_lrc_desc_pool_destroy_v69() argument
554 if (!guc->lrc_desc_pool_vaddr_v69) in guc_lrc_desc_pool_destroy_v69()
557 guc->lrc_desc_pool_vaddr_v69 = NULL; in guc_lrc_desc_pool_destroy_v69()
558 i915_vma_unpin_and_release(&guc->lrc_desc_pool_v69, I915_VMA_RELEASE_MAP); in guc_lrc_desc_pool_destroy_v69()
561 static inline bool guc_submission_initialized(struct intel_guc *guc) in guc_submission_initialized() argument
563 return guc->submission_initialized; in guc_submission_initialized()
566 static inline void _reset_lrc_desc_v69(struct intel_guc *guc, u32 id) in _reset_lrc_desc_v69() argument
568 struct guc_lrc_desc_v69 *desc = __get_lrc_desc_v69(guc, id); in _reset_lrc_desc_v69()
574 static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id) in ctx_id_mapped() argument
576 return __get_context(guc, id); in ctx_id_mapped()
579 static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id, in set_ctx_id_mapping() argument
588 xa_lock_irqsave(&guc->context_lookup, flags); in set_ctx_id_mapping()
589 __xa_store(&guc->context_lookup, id, ce, GFP_ATOMIC); in set_ctx_id_mapping()
590 xa_unlock_irqrestore(&guc->context_lookup, flags); in set_ctx_id_mapping()
593 static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id) in clr_ctx_id_mapping() argument
597 if (unlikely(!guc_submission_initialized(guc))) in clr_ctx_id_mapping()
600 _reset_lrc_desc_v69(guc, id); in clr_ctx_id_mapping()
606 xa_lock_irqsave(&guc->context_lookup, flags); in clr_ctx_id_mapping()
607 __xa_erase(&guc->context_lookup, id); in clr_ctx_id_mapping()
608 xa_unlock_irqrestore(&guc->context_lookup, flags); in clr_ctx_id_mapping()
611 static void decr_outstanding_submission_g2h(struct intel_guc *guc) in decr_outstanding_submission_g2h() argument
613 if (atomic_dec_and_test(&guc->outstanding_submission_g2h)) in decr_outstanding_submission_g2h()
614 wake_up_all(&guc->ct.wq); in decr_outstanding_submission_g2h()
617 static int guc_submission_send_busy_loop(struct intel_guc *guc, in guc_submission_send_busy_loop() argument
633 atomic_inc(&guc->outstanding_submission_g2h); in guc_submission_send_busy_loop()
635 ret = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop); in guc_submission_send_busy_loop()
637 atomic_dec(&guc->outstanding_submission_g2h); in guc_submission_send_busy_loop()
642 int intel_guc_wait_for_pending_msg(struct intel_guc *guc, in intel_guc_wait_for_pending_msg() argument
661 prepare_to_wait(&guc->ct.wq, &wait, state); in intel_guc_wait_for_pending_msg()
678 finish_wait(&guc->ct.wq, &wait); in intel_guc_wait_for_pending_msg()
683 int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout) in intel_guc_wait_for_idle() argument
685 if (!intel_uc_uses_guc_submission(&guc_to_gt(guc)->uc)) in intel_guc_wait_for_idle()
688 return intel_guc_wait_for_pending_msg(guc, in intel_guc_wait_for_idle()
689 &guc->outstanding_submission_g2h, in intel_guc_wait_for_idle()
696 static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq) in __guc_add_request() argument
750 err = intel_guc_send_nb(guc, action, len, g2h_len_dw); in __guc_add_request()
753 atomic_inc(&guc->outstanding_submission_g2h); in __guc_add_request()
765 err = intel_guc_send_nb(guc, action, len - 1, 0); in __guc_add_request()
779 static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) in guc_add_request() argument
781 int ret = __guc_add_request(guc, rq); in guc_add_request()
784 guc->stalled_request = rq; in guc_add_request()
785 guc->submission_stall_reason = STALL_ADD_REQUEST; in guc_add_request()
816 return (WQ_SIZE - ce->parallel.guc.wqi_tail); in wq_space_until_wrap()
828 ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) & in write_wqi()
830 WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail); in write_wqi()
845 ce->parallel.guc.wqi_tail = 0; in guc_wq_noop_append()
893 static int guc_wq_item_append(struct intel_guc *guc, in guc_wq_item_append() argument
904 guc->stalled_request = rq; in guc_wq_item_append()
905 guc->submission_stall_reason = STALL_MOVE_LRC_TAIL; in guc_wq_item_append()
927 static int guc_dequeue_one_context(struct intel_guc *guc) in guc_dequeue_one_context() argument
929 struct i915_sched_engine * const sched_engine = guc->sched_engine; in guc_dequeue_one_context()
937 if (guc->stalled_request) { in guc_dequeue_one_context()
939 last = guc->stalled_request; in guc_dequeue_one_context()
941 switch (guc->submission_stall_reason) { in guc_dequeue_one_context()
949 MISSING_CASE(guc->submission_stall_reason); in guc_dequeue_one_context()
992 if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) && in guc_dequeue_one_context()
998 guc->stalled_request = last; in guc_dequeue_one_context()
999 guc->submission_stall_reason = in guc_dequeue_one_context()
1010 ret = guc_wq_item_append(guc, last); in guc_dequeue_one_context()
1022 ret = guc_add_request(guc, last); in guc_dequeue_one_context()
1033 guc->stalled_request = NULL; in guc_dequeue_one_context()
1034 guc->submission_stall_reason = STALL_NONE; in guc_dequeue_one_context()
1072 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
1077 static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) in scrub_guc_desc_for_outstanding_g2h() argument
1083 xa_lock_irqsave(&guc->context_lookup, flags); in scrub_guc_desc_for_outstanding_g2h()
1084 xa_for_each(&guc->context_lookup, index, ce) { in scrub_guc_desc_for_outstanding_g2h()
1092 xa_unlock(&guc->context_lookup); in scrub_guc_desc_for_outstanding_g2h()
1119 decr_outstanding_submission_g2h(guc); in scrub_guc_desc_for_outstanding_g2h()
1123 intel_gt_pm_put_async_untracked(guc_to_gt(guc)); in scrub_guc_desc_for_outstanding_g2h()
1124 release_guc_id(guc, ce); in scrub_guc_desc_for_outstanding_g2h()
1139 decr_outstanding_submission_g2h(guc); in scrub_guc_desc_for_outstanding_g2h()
1150 xa_lock(&guc->context_lookup); in scrub_guc_desc_for_outstanding_g2h()
1152 xa_unlock_irqrestore(&guc->context_lookup, flags); in scrub_guc_desc_for_outstanding_g2h()
1182 __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start) in __extend_last_switch() argument
1184 u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp); in __extend_last_switch()
1185 u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp); in __extend_last_switch()
1248 struct intel_engine_guc_stats *stats = &engine->stats.guc; in guc_update_engine_gt_clks()
1249 struct intel_guc *guc = gt_to_guc(engine->gt); in guc_update_engine_gt_clks() local
1252 lockdep_assert_held(&guc->timestamp.lock); in guc_update_engine_gt_clks()
1258 __extend_last_switch(guc, &stats->start_gt_clk, last_switch); in guc_update_engine_gt_clks()
1284 static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now) in guc_update_pm_timestamp() argument
1286 struct intel_gt *gt = guc_to_gt(guc); in guc_update_pm_timestamp()
1290 lockdep_assert_held(&guc->timestamp.lock); in guc_update_pm_timestamp()
1292 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp); in guc_update_pm_timestamp()
1294 MISC_STATUS1) >> guc->timestamp.shift; in guc_update_pm_timestamp()
1298 if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp)) in guc_update_pm_timestamp()
1301 guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo; in guc_update_pm_timestamp()
1311 struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc; in guc_engine_busyness()
1314 struct intel_guc *guc = gt_to_guc(gt); in guc_engine_busyness() local
1321 spin_lock_irqsave(&guc->timestamp.lock, flags); in guc_engine_busyness()
1345 gt_stamp_saved = guc->timestamp.gt_stamp; in guc_engine_busyness()
1351 guc_update_pm_timestamp(guc, now); in guc_engine_busyness()
1355 guc->timestamp.gt_stamp = gt_stamp_saved; in guc_engine_busyness()
1361 u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk; in guc_engine_busyness()
1366 spin_unlock_irqrestore(&guc->timestamp.lock, flags); in guc_engine_busyness()
1371 static void guc_enable_busyness_worker(struct intel_guc *guc) in guc_enable_busyness_worker() argument
1373 mod_delayed_work(system_highpri_wq, &guc->timestamp.work, guc->timestamp.ping_delay); in guc_enable_busyness_worker()
1376 static void guc_cancel_busyness_worker(struct intel_guc *guc) in guc_cancel_busyness_worker() argument
1415 if (mutex_is_locked(&guc_to_gt(guc)->reset.mutex) || in guc_cancel_busyness_worker()
1416 test_bit(I915_RESET_BACKOFF, &guc_to_gt(guc)->reset.flags)) in guc_cancel_busyness_worker()
1417 cancel_delayed_work(&guc->timestamp.work); in guc_cancel_busyness_worker()
1419 cancel_delayed_work_sync(&guc->timestamp.work); in guc_cancel_busyness_worker()
1422 static void __reset_guc_busyness_stats(struct intel_guc *guc) in __reset_guc_busyness_stats() argument
1424 struct intel_gt *gt = guc_to_gt(guc); in __reset_guc_busyness_stats()
1430 spin_lock_irqsave(&guc->timestamp.lock, flags); in __reset_guc_busyness_stats()
1432 guc_update_pm_timestamp(guc, &unused); in __reset_guc_busyness_stats()
1435 engine->stats.guc.prev_total = 0; in __reset_guc_busyness_stats()
1438 spin_unlock_irqrestore(&guc->timestamp.lock, flags); in __reset_guc_busyness_stats()
1441 static void __update_guc_busyness_stats(struct intel_guc *guc) in __update_guc_busyness_stats() argument
1443 struct intel_gt *gt = guc_to_gt(guc); in __update_guc_busyness_stats()
1449 guc->timestamp.last_stat_jiffies = jiffies; in __update_guc_busyness_stats()
1451 spin_lock_irqsave(&guc->timestamp.lock, flags); in __update_guc_busyness_stats()
1453 guc_update_pm_timestamp(guc, &unused); in __update_guc_busyness_stats()
1457 spin_unlock_irqrestore(&guc->timestamp.lock, flags); in __update_guc_busyness_stats()
1462 struct intel_guc *guc = ce_to_guc(ce); in __guc_context_update_stats() local
1465 spin_lock_irqsave(&guc->timestamp.lock, flags); in __guc_context_update_stats()
1467 spin_unlock_irqrestore(&guc->timestamp.lock, flags); in __guc_context_update_stats()
1481 struct intel_guc *guc = container_of(wrk, typeof(*guc), in guc_timestamp_ping() local
1483 struct intel_uc *uc = container_of(guc, typeof(*uc), guc); in guc_timestamp_ping()
1484 struct intel_gt *gt = guc_to_gt(guc); in guc_timestamp_ping()
1530 __update_guc_busyness_stats(guc); in guc_timestamp_ping()
1533 xa_for_each(&guc->context_lookup, index, ce) in guc_timestamp_ping()
1538 guc_enable_busyness_worker(guc); in guc_timestamp_ping()
1544 static int guc_action_enable_usage_stats(struct intel_guc *guc) in guc_action_enable_usage_stats() argument
1546 u32 offset = intel_guc_engine_usage_offset(guc); in guc_action_enable_usage_stats()
1553 return intel_guc_send(guc, action, ARRAY_SIZE(action)); in guc_action_enable_usage_stats()
1556 static int guc_init_engine_stats(struct intel_guc *guc) in guc_init_engine_stats() argument
1558 struct intel_gt *gt = guc_to_gt(guc); in guc_init_engine_stats()
1563 ret = guc_action_enable_usage_stats(guc); in guc_init_engine_stats()
1566 guc_err(guc, "Failed to enable usage stats: %pe\n", ERR_PTR(ret)); in guc_init_engine_stats()
1568 guc_enable_busyness_worker(guc); in guc_init_engine_stats()
1573 static void guc_fini_engine_stats(struct intel_guc *guc) in guc_fini_engine_stats() argument
1575 guc_cancel_busyness_worker(guc); in guc_fini_engine_stats()
1580 struct intel_guc *guc = gt_to_guc(gt); in intel_guc_busyness_park() local
1582 if (!guc_submission_initialized(guc)) in intel_guc_busyness_park()
1590 guc_cancel_busyness_worker(guc); in intel_guc_busyness_park()
1597 if (guc->timestamp.last_stat_jiffies && in intel_guc_busyness_park()
1598 !time_after(jiffies, guc->timestamp.last_stat_jiffies + in intel_guc_busyness_park()
1599 (guc->timestamp.ping_delay / 2))) in intel_guc_busyness_park()
1602 __update_guc_busyness_stats(guc); in intel_guc_busyness_park()
1607 struct intel_guc *guc = gt_to_guc(gt); in intel_guc_busyness_unpark() local
1611 if (!guc_submission_initialized(guc)) in intel_guc_busyness_unpark()
1614 spin_lock_irqsave(&guc->timestamp.lock, flags); in intel_guc_busyness_unpark()
1615 guc_update_pm_timestamp(guc, &unused); in intel_guc_busyness_unpark()
1616 spin_unlock_irqrestore(&guc->timestamp.lock, flags); in intel_guc_busyness_unpark()
1617 guc_enable_busyness_worker(guc); in intel_guc_busyness_unpark()
1621 submission_disabled(struct intel_guc *guc) in submission_disabled() argument
1623 struct i915_sched_engine * const sched_engine = guc->sched_engine; in submission_disabled()
1627 intel_gt_is_wedged(guc_to_gt(guc))); in submission_disabled()
1630 static void disable_submission(struct intel_guc *guc) in disable_submission() argument
1632 struct i915_sched_engine * const sched_engine = guc->sched_engine; in disable_submission()
1635 GEM_BUG_ON(!guc->ct.enabled); in disable_submission()
1641 static void enable_submission(struct intel_guc *guc) in enable_submission() argument
1643 struct i915_sched_engine * const sched_engine = guc->sched_engine; in enable_submission()
1646 spin_lock_irqsave(&guc->sched_engine->lock, flags); in enable_submission()
1651 GEM_BUG_ON(!guc->ct.enabled); in enable_submission()
1656 spin_unlock_irqrestore(&guc->sched_engine->lock, flags); in enable_submission()
1659 static void guc_flush_submissions(struct intel_guc *guc) in guc_flush_submissions() argument
1661 struct i915_sched_engine * const sched_engine = guc->sched_engine; in guc_flush_submissions()
1668 void intel_guc_submission_flush_work(struct intel_guc *guc) in intel_guc_submission_flush_work() argument
1670 flush_work(&guc->submission_state.destroyed_worker); in intel_guc_submission_flush_work()
1673 static void guc_flush_destroyed_contexts(struct intel_guc *guc);
1675 void intel_guc_submission_reset_prepare(struct intel_guc *guc) in intel_guc_submission_reset_prepare() argument
1677 if (unlikely(!guc_submission_initialized(guc))) { in intel_guc_submission_reset_prepare()
1682 intel_gt_park_heartbeats(guc_to_gt(guc)); in intel_guc_submission_reset_prepare()
1683 disable_submission(guc); in intel_guc_submission_reset_prepare()
1684 guc->interrupts.disable(guc); in intel_guc_submission_reset_prepare()
1685 __reset_guc_busyness_stats(guc); in intel_guc_submission_reset_prepare()
1688 spin_lock_irq(guc_to_gt(guc)->irq_lock); in intel_guc_submission_reset_prepare()
1689 spin_unlock_irq(guc_to_gt(guc)->irq_lock); in intel_guc_submission_reset_prepare()
1691 guc_flush_submissions(guc); in intel_guc_submission_reset_prepare()
1692 guc_flush_destroyed_contexts(guc); in intel_guc_submission_reset_prepare()
1693 flush_work(&guc->ct.requests.worker); in intel_guc_submission_reset_prepare()
1695 scrub_guc_desc_for_outstanding_g2h(guc); in intel_guc_submission_reset_prepare()
1859 void wake_up_all_tlb_invalidate(struct intel_guc *guc) in wake_up_all_tlb_invalidate() argument
1864 if (!intel_guc_tlb_invalidation_is_available(guc)) in wake_up_all_tlb_invalidate()
1867 xa_lock_irq(&guc->tlb_lookup); in wake_up_all_tlb_invalidate()
1868 xa_for_each(&guc->tlb_lookup, i, wait) in wake_up_all_tlb_invalidate()
1870 xa_unlock_irq(&guc->tlb_lookup); in wake_up_all_tlb_invalidate()
1873 void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled) in intel_guc_submission_reset() argument
1879 if (unlikely(!guc_submission_initialized(guc))) { in intel_guc_submission_reset()
1884 xa_lock_irqsave(&guc->context_lookup, flags); in intel_guc_submission_reset()
1885 xa_for_each(&guc->context_lookup, index, ce) { in intel_guc_submission_reset()
1889 xa_unlock(&guc->context_lookup); in intel_guc_submission_reset()
1897 xa_lock(&guc->context_lookup); in intel_guc_submission_reset()
1899 xa_unlock_irqrestore(&guc->context_lookup, flags); in intel_guc_submission_reset()
1902 xa_destroy(&guc->context_lookup); in intel_guc_submission_reset()
1971 void intel_guc_submission_cancel_requests(struct intel_guc *guc) in intel_guc_submission_cancel_requests() argument
1977 xa_lock_irqsave(&guc->context_lookup, flags); in intel_guc_submission_cancel_requests()
1978 xa_for_each(&guc->context_lookup, index, ce) { in intel_guc_submission_cancel_requests()
1982 xa_unlock(&guc->context_lookup); in intel_guc_submission_cancel_requests()
1990 xa_lock(&guc->context_lookup); in intel_guc_submission_cancel_requests()
1992 xa_unlock_irqrestore(&guc->context_lookup, flags); in intel_guc_submission_cancel_requests()
1994 guc_cancel_sched_engine_requests(guc->sched_engine); in intel_guc_submission_cancel_requests()
1997 xa_destroy(&guc->context_lookup); in intel_guc_submission_cancel_requests()
2003 wake_up_all_tlb_invalidate(guc); in intel_guc_submission_cancel_requests()
2006 void intel_guc_submission_reset_finish(struct intel_guc *guc) in intel_guc_submission_reset_finish() argument
2009 if (unlikely(!guc_submission_initialized(guc) || in intel_guc_submission_reset_finish()
2010 !intel_guc_is_fw_running(guc) || in intel_guc_submission_reset_finish()
2011 intel_gt_is_wedged(guc_to_gt(guc)))) { in intel_guc_submission_reset_finish()
2021 if (atomic_read(&guc->outstanding_submission_g2h)) in intel_guc_submission_reset_finish()
2022 guc_err(guc, "Unexpected outstanding GuC to Host in reset finish\n"); in intel_guc_submission_reset_finish()
2023 atomic_set(&guc->outstanding_submission_g2h, 0); in intel_guc_submission_reset_finish()
2025 intel_guc_global_policies_update(guc); in intel_guc_submission_reset_finish()
2026 enable_submission(guc); in intel_guc_submission_reset_finish()
2027 intel_gt_unpark_heartbeats(guc_to_gt(guc)); in intel_guc_submission_reset_finish()
2033 wake_up_all_tlb_invalidate(guc); in intel_guc_submission_reset_finish()
2039 bool intel_guc_tlb_invalidation_is_available(struct intel_guc *guc) in intel_guc_tlb_invalidation_is_available() argument
2041 return HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915) && in intel_guc_tlb_invalidation_is_available()
2042 intel_guc_is_ready(guc); in intel_guc_tlb_invalidation_is_available()
2045 static int init_tlb_lookup(struct intel_guc *guc) in init_tlb_lookup() argument
2050 if (!HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915)) in init_tlb_lookup()
2053 xa_init_flags(&guc->tlb_lookup, XA_FLAGS_ALLOC); in init_tlb_lookup()
2062 err = xa_alloc_cyclic_irq(&guc->tlb_lookup, &guc->serial_slot, wait, in init_tlb_lookup()
2063 xa_limit_32b, &guc->next_seqno, GFP_KERNEL); in init_tlb_lookup()
2072 static void fini_tlb_lookup(struct intel_guc *guc) in fini_tlb_lookup() argument
2076 if (!HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915)) in fini_tlb_lookup()
2079 wait = xa_load(&guc->tlb_lookup, guc->serial_slot); in fini_tlb_lookup()
2081 guc_err(guc, "Unexpected busy item in tlb_lookup on fini\n"); in fini_tlb_lookup()
2084 xa_destroy(&guc->tlb_lookup); in fini_tlb_lookup()
2091 int intel_guc_submission_init(struct intel_guc *guc) in intel_guc_submission_init() argument
2093 struct intel_gt *gt = guc_to_gt(guc); in intel_guc_submission_init()
2096 if (guc->submission_initialized) in intel_guc_submission_init()
2099 if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 0, 0)) { in intel_guc_submission_init()
2100 ret = guc_lrc_desc_pool_create_v69(guc); in intel_guc_submission_init()
2105 ret = init_tlb_lookup(guc); in intel_guc_submission_init()
2109 guc->submission_state.guc_ids_bitmap = in intel_guc_submission_init()
2110 bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL); in intel_guc_submission_init()
2111 if (!guc->submission_state.guc_ids_bitmap) { in intel_guc_submission_init()
2116 guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ; in intel_guc_submission_init()
2117 guc->timestamp.shift = gpm_timestamp_shift(gt); in intel_guc_submission_init()
2118 guc->submission_initialized = true; in intel_guc_submission_init()
2123 fini_tlb_lookup(guc); in intel_guc_submission_init()
2125 guc_lrc_desc_pool_destroy_v69(guc); in intel_guc_submission_init()
2129 void intel_guc_submission_fini(struct intel_guc *guc) in intel_guc_submission_fini() argument
2131 if (!guc->submission_initialized) in intel_guc_submission_fini()
2134 guc_fini_engine_stats(guc); in intel_guc_submission_fini()
2135 guc_flush_destroyed_contexts(guc); in intel_guc_submission_fini()
2136 guc_lrc_desc_pool_destroy_v69(guc); in intel_guc_submission_fini()
2137 i915_sched_engine_put(guc->sched_engine); in intel_guc_submission_fini()
2138 bitmap_free(guc->submission_state.guc_ids_bitmap); in intel_guc_submission_fini()
2139 fini_tlb_lookup(guc); in intel_guc_submission_fini()
2140 guc->submission_initialized = false; in intel_guc_submission_fini()
2154 static int guc_bypass_tasklet_submit(struct intel_guc *guc, in guc_bypass_tasklet_submit() argument
2165 ret = guc_wq_item_append(guc, rq); in guc_bypass_tasklet_submit()
2167 ret = guc_add_request(guc, rq); in guc_bypass_tasklet_submit()
2171 ret = guc_add_request(guc, rq); in guc_bypass_tasklet_submit()
2175 disable_submission(guc); in guc_bypass_tasklet_submit()
2180 static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq) in need_tasklet() argument
2185 return submission_disabled(guc) || guc->stalled_request || in need_tasklet()
2187 !ctx_id_mapped(guc, ce->guc_id.id); in need_tasklet()
2193 struct intel_guc *guc = gt_to_guc(rq->engine->gt); in guc_submit_request() local
2199 if (need_tasklet(guc, rq)) in guc_submit_request()
2201 else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY) in guc_submit_request()
2207 static int new_guc_id(struct intel_guc *guc, struct intel_context *ce) in new_guc_id() argument
2214 ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap, in new_guc_id()
2215 NUMBER_MULTI_LRC_GUC_ID(guc), in new_guc_id()
2219 ret = ida_alloc_range(&guc->submission_state.guc_ids, in new_guc_id()
2220 NUMBER_MULTI_LRC_GUC_ID(guc), in new_guc_id()
2221 guc->submission_state.num_guc_ids - 1, in new_guc_id()
2227 ++guc->submission_state.guc_ids_in_use; in new_guc_id()
2233 static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce) in __release_guc_id() argument
2239 bitmap_release_region(guc->submission_state.guc_ids_bitmap, in __release_guc_id()
2244 --guc->submission_state.guc_ids_in_use; in __release_guc_id()
2245 ida_free(&guc->submission_state.guc_ids, in __release_guc_id()
2248 clr_ctx_id_mapping(guc, ce->guc_id.id); in __release_guc_id()
2255 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce) in release_guc_id() argument
2259 spin_lock_irqsave(&guc->submission_state.lock, flags); in release_guc_id()
2260 __release_guc_id(guc, ce); in release_guc_id()
2261 spin_unlock_irqrestore(&guc->submission_state.lock, flags); in release_guc_id()
2264 static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce) in steal_guc_id() argument
2268 lockdep_assert_held(&guc->submission_state.lock); in steal_guc_id()
2272 if (!list_empty(&guc->submission_state.guc_id_list)) { in steal_guc_id()
2273 cn = list_first_entry(&guc->submission_state.guc_id_list, in steal_guc_id()
2292 guc->number_guc_id_stolen++; in steal_guc_id()
2301 static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce) in assign_guc_id() argument
2305 lockdep_assert_held(&guc->submission_state.lock); in assign_guc_id()
2308 ret = new_guc_id(guc, ce); in assign_guc_id()
2313 ret = steal_guc_id(guc, ce); in assign_guc_id()
2330 static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce) in pin_guc_id() argument
2338 spin_lock_irqsave(&guc->submission_state.lock, flags); in pin_guc_id()
2343 ret = assign_guc_id(guc, ce); in pin_guc_id()
2353 spin_unlock_irqrestore(&guc->submission_state.lock, flags); in pin_guc_id()
2373 intel_gt_retire_requests(guc_to_gt(guc)); in pin_guc_id()
2380 static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce) in unpin_guc_id() argument
2391 spin_lock_irqsave(&guc->submission_state.lock, flags); in unpin_guc_id()
2395 &guc->submission_state.guc_id_list); in unpin_guc_id()
2396 spin_unlock_irqrestore(&guc->submission_state.lock, flags); in unpin_guc_id()
2399 static int __guc_action_register_multi_lrc_v69(struct intel_guc *guc, in __guc_action_register_multi_lrc_v69() argument
2420 return guc_submission_send_busy_loop(guc, action, len, 0, loop); in __guc_action_register_multi_lrc_v69()
2423 static int __guc_action_register_multi_lrc_v70(struct intel_guc *guc, in __guc_action_register_multi_lrc_v70() argument
2463 return guc_submission_send_busy_loop(guc, action, len, 0, loop); in __guc_action_register_multi_lrc_v70()
2466 static int __guc_action_register_context_v69(struct intel_guc *guc, in __guc_action_register_context_v69() argument
2477 return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), in __guc_action_register_context_v69()
2481 static int __guc_action_register_context_v70(struct intel_guc *guc, in __guc_action_register_context_v70() argument
2500 return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), in __guc_action_register_context_v70()
2509 register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop) in register_context_v69() argument
2511 u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool_v69) + in register_context_v69()
2517 return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id, in register_context_v69()
2520 return __guc_action_register_context_v69(guc, ce->guc_id.id, in register_context_v69()
2525 register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop) in register_context_v70() argument
2532 return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop); in register_context_v70()
2534 return __guc_action_register_context_v70(guc, &info, loop); in register_context_v70()
2539 struct intel_guc *guc = ce_to_guc(ce); in register_context() local
2545 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) in register_context()
2546 ret = register_context_v70(guc, ce, loop); in register_context()
2548 ret = register_context_v69(guc, ce, loop); in register_context()
2557 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) in register_context()
2564 static int __guc_action_deregister_context(struct intel_guc *guc, in __guc_action_deregister_context() argument
2572 return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), in __guc_action_deregister_context()
2579 struct intel_guc *guc = ce_to_guc(ce); in deregister_context() local
2584 return __guc_action_deregister_context(guc, guc_id); in deregister_context()
2647 static int __guc_context_set_context_policies(struct intel_guc *guc, in MAKE_CONTEXT_POLICY_ADD()
2651 return guc_submission_send_busy_loop(guc, (u32 *)&policy->h2g, in MAKE_CONTEXT_POLICY_ADD()
2659 struct intel_guc *guc = gt_to_guc(engine->gt); in guc_context_policy_init_v70() local
2688 ret = __guc_context_set_context_policies(guc, &policy, loop); in guc_context_policy_init_v70()
2740 struct intel_guc *guc = gt_to_guc(engine->gt); in prepare_context_registration_info_v69() local
2751 GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) != in prepare_context_registration_info_v69()
2754 desc = __get_lrc_desc_v69(guc, ctx_id); in prepare_context_registration_info_v69()
2770 ce->parallel.guc.wqi_tail = 0; in prepare_context_registration_info_v69()
2771 ce->parallel.guc.wqi_head = 0; in prepare_context_registration_info_v69()
2786 ce->parallel.guc.wq_head = &pdesc->head; in prepare_context_registration_info_v69()
2787 ce->parallel.guc.wq_tail = &pdesc->tail; in prepare_context_registration_info_v69()
2788 ce->parallel.guc.wq_status = &pdesc->wq_status; in prepare_context_registration_info_v69()
2791 desc = __get_lrc_desc_v69(guc, child->guc_id.id); in prepare_context_registration_info_v69()
2809 struct intel_guc *guc = gt_to_guc(engine->gt); in prepare_context_registration_info_v70() local
2818 GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) != in prepare_context_registration_info_v70()
2843 ce->parallel.guc.wqi_tail = 0; in prepare_context_registration_info_v70()
2844 ce->parallel.guc.wqi_head = 0; in prepare_context_registration_info_v70()
2860 ce->parallel.guc.wq_head = &wq_desc->head; in prepare_context_registration_info_v70()
2861 ce->parallel.guc.wq_tail = &wq_desc->tail; in prepare_context_registration_info_v70()
2862 ce->parallel.guc.wq_status = &wq_desc->wq_status; in prepare_context_registration_info_v70()
2872 struct intel_guc *guc = gt_to_guc(engine->gt); in try_context_registration() local
2880 context_registered = ctx_id_mapped(guc, ctx_id); in try_context_registration()
2882 clr_ctx_id_mapping(guc, ctx_id); in try_context_registration()
2883 set_ctx_id_mapping(guc, ctx_id, ce); in try_context_registration()
2902 disabled = submission_disabled(guc); in try_context_registration()
2909 clr_ctx_id_mapping(guc, ctx_id); in try_context_registration()
2925 clr_ctx_id_mapping(guc, ctx_id); in try_context_registration()
2927 clr_ctx_id_mapping(guc, ctx_id); in try_context_registration()
2978 struct intel_guc *guc = ce_to_guc(ce); in guc_context_unpin() local
2981 unpin_guc_id(guc, ce); in guc_context_unpin()
2993 static void __guc_context_sched_enable(struct intel_guc *guc, in __guc_context_sched_enable() argument
3004 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), in __guc_context_sched_enable()
3008 static void __guc_context_sched_disable(struct intel_guc *guc, in __guc_context_sched_disable() argument
3023 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), in __guc_context_sched_disable()
3065 struct intel_guc *guc = ce_to_guc(ce); in guc_context_block() local
3079 if (unlikely(!enabled || submission_disabled(guc))) { in guc_context_block()
3097 __guc_context_sched_disable(guc, ce, guc_id); in guc_context_block()
3121 struct intel_guc *guc = ce_to_guc(ce); in guc_context_unblock() local
3132 if (unlikely(submission_disabled(guc) || in guc_context_unblock()
3148 __guc_context_sched_enable(guc, ce); in guc_context_unblock()
3175 static void __guc_context_set_preemption_timeout(struct intel_guc *guc, in __guc_context_set_preemption_timeout() argument
3179 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) { in __guc_context_set_preemption_timeout()
3184 __guc_context_set_context_policies(guc, &policy, true); in __guc_context_set_preemption_timeout()
3192 intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true); in __guc_context_set_preemption_timeout()
3200 struct intel_guc *guc = ce_to_guc(ce); in guc_context_revoke() local
3208 guc_flush_submissions(guc); in guc_context_revoke()
3213 if (submission_disabled(guc) || in guc_context_revoke()
3237 __guc_context_set_preemption_timeout(guc, guc_id, in guc_context_revoke()
3239 __guc_context_sched_disable(guc, ce, guc_id); in guc_context_revoke()
3244 __guc_context_set_preemption_timeout(guc, in guc_context_revoke()
3251 static void do_sched_disable(struct intel_guc *guc, struct intel_context *ce, in do_sched_disable() argument
3265 __guc_context_sched_disable(guc, ce, guc_id); in do_sched_disable()
3268 static bool bypass_sched_disable(struct intel_guc *guc, in bypass_sched_disable() argument
3274 if (submission_disabled(guc) || context_guc_id_invalid(ce) || in bypass_sched_disable()
3275 !ctx_id_mapped(guc, ce->guc_id.id)) { in bypass_sched_disable()
3287 struct intel_guc *guc = ce_to_guc(ce); in __delay_sched_disable() local
3292 if (bypass_sched_disable(guc, ce)) { in __delay_sched_disable()
3296 do_sched_disable(guc, ce, flags); in __delay_sched_disable()
3300 static bool guc_id_pressure(struct intel_guc *guc, struct intel_context *ce) in guc_id_pressure() argument
3312 return guc->submission_state.guc_ids_in_use > in guc_id_pressure()
3313 guc->submission_state.sched_disable_gucid_threshold; in guc_id_pressure()
3318 struct intel_guc *guc = ce_to_guc(ce); in guc_context_sched_disable() local
3319 u64 delay = guc->submission_state.sched_disable_delay_ms; in guc_context_sched_disable()
3324 if (bypass_sched_disable(guc, ce)) { in guc_context_sched_disable()
3327 } else if (!intel_context_is_closed(ce) && !guc_id_pressure(guc, ce) && in guc_context_sched_disable()
3334 do_sched_disable(guc, ce, flags); in guc_context_sched_disable()
3353 struct intel_guc *guc = ce_to_guc(ce); in guc_lrc_desc_unpin() local
3354 struct intel_gt *gt = guc_to_gt(guc); in guc_lrc_desc_unpin()
3360 GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id)); in guc_lrc_desc_unpin()
3361 GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id)); in guc_lrc_desc_unpin()
3366 disabled = submission_disabled(guc); in guc_lrc_desc_unpin()
3379 release_guc_id(guc, ce); in guc_lrc_desc_unpin()
3428 static void guc_flush_destroyed_contexts(struct intel_guc *guc) in guc_flush_destroyed_contexts() argument
3433 GEM_BUG_ON(!submission_disabled(guc) && in guc_flush_destroyed_contexts()
3434 guc_submission_initialized(guc)); in guc_flush_destroyed_contexts()
3436 while (!list_empty(&guc->submission_state.destroyed_contexts)) { in guc_flush_destroyed_contexts()
3437 spin_lock_irqsave(&guc->submission_state.lock, flags); in guc_flush_destroyed_contexts()
3438 ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts, in guc_flush_destroyed_contexts()
3443 spin_unlock_irqrestore(&guc->submission_state.lock, flags); in guc_flush_destroyed_contexts()
3448 release_guc_id(guc, ce); in guc_flush_destroyed_contexts()
3453 static void deregister_destroyed_contexts(struct intel_guc *guc) in deregister_destroyed_contexts() argument
3458 while (!list_empty(&guc->submission_state.destroyed_contexts)) { in deregister_destroyed_contexts()
3459 spin_lock_irqsave(&guc->submission_state.lock, flags); in deregister_destroyed_contexts()
3460 ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts, in deregister_destroyed_contexts()
3465 spin_unlock_irqrestore(&guc->submission_state.lock, flags); in deregister_destroyed_contexts()
3478 spin_lock_irqsave(&guc->submission_state.lock, flags); in deregister_destroyed_contexts()
3480 &guc->submission_state.destroyed_contexts); in deregister_destroyed_contexts()
3481 spin_unlock_irqrestore(&guc->submission_state.lock, flags); in deregister_destroyed_contexts()
3491 struct intel_guc *guc = container_of(w, struct intel_guc, in destroyed_worker_func() local
3493 struct intel_gt *gt = guc_to_gt(guc); in destroyed_worker_func()
3504 if (!intel_guc_is_ready(guc)) in destroyed_worker_func()
3508 deregister_destroyed_contexts(guc); in destroyed_worker_func()
3514 struct intel_guc *guc = ce_to_guc(ce); in guc_context_destroy() local
3523 spin_lock_irqsave(&guc->submission_state.lock, flags); in guc_context_destroy()
3524 destroy = submission_disabled(guc) || context_guc_id_invalid(ce) || in guc_context_destroy()
3525 !ctx_id_mapped(guc, ce->guc_id.id); in guc_context_destroy()
3530 &guc->submission_state.destroyed_contexts); in guc_context_destroy()
3532 __release_guc_id(guc, ce); in guc_context_destroy()
3534 spin_unlock_irqrestore(&guc->submission_state.lock, flags); in guc_context_destroy()
3545 queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker); in guc_context_destroy()
3553 static void __guc_context_set_prio(struct intel_guc *guc, in __guc_context_set_prio() argument
3556 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) { in __guc_context_set_prio()
3561 __guc_context_set_context_policies(guc, &policy, true); in __guc_context_set_prio()
3569 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true); in __guc_context_set_prio()
3573 static void guc_context_set_prio(struct intel_guc *guc, in guc_context_set_prio() argument
3581 if (ce->guc_state.prio == prio || submission_disabled(guc) || in guc_context_set_prio()
3588 __guc_context_set_prio(guc, ce); in guc_context_set_prio()
3631 struct intel_guc *guc = &ce->engine->gt->uc.guc; in update_context_prio() local
3641 guc_context_set_prio(guc, ce, i); in update_context_prio()
3811 struct intel_guc *guc = ce_to_guc(ce); in guc_request_alloc() local
3860 guc_warn(guc, "timed out waiting on context sched close before realloc\n"); in guc_request_alloc()
3881 ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */ in guc_request_alloc()
3888 disable_submission(guc); in guc_request_alloc()
3892 unpin_guc_id(guc, ce); in guc_request_alloc()
3946 struct intel_guc *guc = ce_to_guc(ce); in guc_virtual_context_unpin() local
3951 unpin_guc_id(guc, ce); in guc_virtual_context_unpin()
4016 struct intel_guc *guc = ce_to_guc(ce); in guc_parent_context_pin() local
4022 ret = pin_guc_id(guc, ce); in guc_parent_context_pin()
4042 struct intel_guc *guc = ce_to_guc(ce); in guc_parent_context_unpin() local
4049 unpin_guc_id(guc, ce); in guc_parent_context_unpin()
4383 static inline int guc_kernel_context_pin(struct intel_guc *guc, in guc_kernel_context_pin() argument
4396 ret = pin_guc_id(guc, ce); in guc_kernel_context_pin()
4407 unpin_guc_id(guc, ce); in guc_kernel_context_pin()
4412 static inline int guc_init_submission(struct intel_guc *guc) in guc_init_submission() argument
4414 struct intel_gt *gt = guc_to_gt(guc); in guc_init_submission()
4419 xa_destroy(&guc->context_lookup); in guc_init_submission()
4425 guc->stalled_request = NULL; in guc_init_submission()
4426 guc->submission_stall_reason = STALL_NONE; in guc_init_submission()
4440 int ret = guc_kernel_context_pin(guc, ce); in guc_init_submission()
4554 struct intel_guc *guc = sched_engine->private_data; in guc_sched_engine_destroy() local
4556 guc->sched_engine = NULL; in guc_sched_engine_destroy()
4564 struct intel_guc *guc = gt_to_guc(engine->gt); in intel_guc_submission_setup() local
4572 if (!guc->sched_engine) { in intel_guc_submission_setup()
4573 guc->sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL); in intel_guc_submission_setup()
4574 if (!guc->sched_engine) in intel_guc_submission_setup()
4577 guc->sched_engine->schedule = i915_schedule; in intel_guc_submission_setup()
4578 guc->sched_engine->disabled = guc_sched_engine_disabled; in intel_guc_submission_setup()
4579 guc->sched_engine->private_data = guc; in intel_guc_submission_setup()
4580 guc->sched_engine->destroy = guc_sched_engine_destroy; in intel_guc_submission_setup()
4581 guc->sched_engine->bump_inflight_request_prio = in intel_guc_submission_setup()
4583 guc->sched_engine->retire_inflight_request_prio = in intel_guc_submission_setup()
4585 tasklet_setup(&guc->sched_engine->tasklet, in intel_guc_submission_setup()
4589 engine->sched_engine = i915_sched_engine_get(guc->sched_engine); in intel_guc_submission_setup()
4647 static int __guc_action_set_scheduling_policies(struct intel_guc *guc, in __guc_action_set_scheduling_policies() argument
4652 ret = intel_guc_send(guc, (u32 *)&policy->h2g, in __guc_action_set_scheduling_policies()
4655 guc_probe_error(guc, "Failed to configure global scheduling policies: %pe!\n", in __guc_action_set_scheduling_policies()
4661 guc_warn(guc, "global scheduler policy processed %d of %d KLVs!", in __guc_action_set_scheduling_policies()
4670 static int guc_init_global_schedule_policy(struct intel_guc *guc) in guc_init_global_schedule_policy() argument
4673 struct intel_gt *gt = guc_to_gt(guc); in guc_init_global_schedule_policy()
4677 if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0)) in guc_init_global_schedule_policy()
4692 ret = __guc_action_set_scheduling_policies(guc, &policy); in guc_init_global_schedule_policy()
4698 static void guc_route_semaphores(struct intel_guc *guc, bool to_guc) in guc_route_semaphores() argument
4700 struct intel_gt *gt = guc_to_gt(guc); in guc_route_semaphores()
4714 int intel_guc_submission_enable(struct intel_guc *guc) in intel_guc_submission_enable() argument
4719 guc_route_semaphores(guc, true); in intel_guc_submission_enable()
4721 ret = guc_init_submission(guc); in intel_guc_submission_enable()
4725 ret = guc_init_engine_stats(guc); in intel_guc_submission_enable()
4729 ret = guc_init_global_schedule_policy(guc); in intel_guc_submission_enable()
4736 guc_fini_engine_stats(guc); in intel_guc_submission_enable()
4738 guc_route_semaphores(guc, false); in intel_guc_submission_enable()
4743 void intel_guc_submission_disable(struct intel_guc *guc) in intel_guc_submission_disable() argument
4745 guc_cancel_busyness_worker(guc); in intel_guc_submission_disable()
4748 guc_route_semaphores(guc, false); in intel_guc_submission_disable()
4751 static bool __guc_submission_supported(struct intel_guc *guc) in __guc_submission_supported() argument
4754 return intel_guc_is_supported(guc) && in __guc_submission_supported()
4755 GRAPHICS_VER(guc_to_i915(guc)) >= 11; in __guc_submission_supported()
4758 static bool __guc_submission_selected(struct intel_guc *guc) in __guc_submission_selected() argument
4760 struct drm_i915_private *i915 = guc_to_i915(guc); in __guc_submission_selected()
4762 if (!intel_guc_submission_is_supported(guc)) in __guc_submission_selected()
4768 int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc) in intel_guc_sched_disable_gucid_threshold_max() argument
4770 return guc->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc); in intel_guc_sched_disable_gucid_threshold_max()
4786 (((intel_guc_sched_disable_gucid_threshold_max(guc)) * 3) / 4)
4788 void intel_guc_submission_init_early(struct intel_guc *guc) in intel_guc_submission_init_early() argument
4790 xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ); in intel_guc_submission_init_early()
4792 spin_lock_init(&guc->submission_state.lock); in intel_guc_submission_init_early()
4793 INIT_LIST_HEAD(&guc->submission_state.guc_id_list); in intel_guc_submission_init_early()
4794 ida_init(&guc->submission_state.guc_ids); in intel_guc_submission_init_early()
4795 INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts); in intel_guc_submission_init_early()
4796 INIT_WORK(&guc->submission_state.destroyed_worker, in intel_guc_submission_init_early()
4798 INIT_WORK(&guc->submission_state.reset_fail_worker, in intel_guc_submission_init_early()
4801 spin_lock_init(&guc->timestamp.lock); in intel_guc_submission_init_early()
4802 INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping); in intel_guc_submission_init_early()
4804 guc->submission_state.sched_disable_delay_ms = SCHED_DISABLE_DELAY_MS; in intel_guc_submission_init_early()
4805 guc->submission_state.num_guc_ids = GUC_MAX_CONTEXT_ID; in intel_guc_submission_init_early()
4806 guc->submission_state.sched_disable_gucid_threshold = in intel_guc_submission_init_early()
4807 NUM_SCHED_DISABLE_GUCIDS_DEFAULT_THRESHOLD(guc); in intel_guc_submission_init_early()
4808 guc->submission_supported = __guc_submission_supported(guc); in intel_guc_submission_init_early()
4809 guc->submission_selected = __guc_submission_selected(guc); in intel_guc_submission_init_early()
4813 g2h_context_lookup(struct intel_guc *guc, u32 ctx_id) in g2h_context_lookup() argument
4818 guc_err(guc, "Invalid ctx_id %u\n", ctx_id); in g2h_context_lookup()
4822 ce = __get_context(guc, ctx_id); in g2h_context_lookup()
4824 guc_err(guc, "Context is NULL, ctx_id %u\n", ctx_id); in g2h_context_lookup()
4829 guc_err(guc, "Context is child, ctx_id %u\n", ctx_id); in g2h_context_lookup()
4836 static void wait_wake_outstanding_tlb_g2h(struct intel_guc *guc, u32 seqno) in wait_wake_outstanding_tlb_g2h() argument
4841 xa_lock_irqsave(&guc->tlb_lookup, flags); in wait_wake_outstanding_tlb_g2h()
4842 wait = xa_load(&guc->tlb_lookup, seqno); in wait_wake_outstanding_tlb_g2h()
4847 guc_dbg(guc, in wait_wake_outstanding_tlb_g2h()
4850 xa_unlock_irqrestore(&guc->tlb_lookup, flags); in wait_wake_outstanding_tlb_g2h()
4853 int intel_guc_tlb_invalidation_done(struct intel_guc *guc, in intel_guc_tlb_invalidation_done() argument
4859 wait_wake_outstanding_tlb_g2h(guc, payload[0]); in intel_guc_tlb_invalidation_done()
4895 static int guc_send_invalidate_tlb(struct intel_guc *guc, in guc_send_invalidate_tlb() argument
4899 struct intel_gt *gt = guc_to_gt(guc); in guc_send_invalidate_tlb()
4922 if (xa_alloc_cyclic_irq(&guc->tlb_lookup, &seqno, wq, in guc_send_invalidate_tlb()
4923 xa_limit_32b, &guc->next_seqno, in guc_send_invalidate_tlb()
4926 xa_lock_irq(&guc->tlb_lookup); in guc_send_invalidate_tlb()
4927 wq = xa_load(&guc->tlb_lookup, guc->serial_slot); in guc_send_invalidate_tlb()
4930 guc->tlb_lookup.xa_lock); in guc_send_invalidate_tlb()
4938 xa_unlock_irq(&guc->tlb_lookup); in guc_send_invalidate_tlb()
4940 seqno = guc->serial_slot; in guc_send_invalidate_tlb()
4948 err = intel_guc_send_busy_loop(guc, action, size, G2H_LEN_DW_INVALIDATE_TLB, true); in guc_send_invalidate_tlb()
4960 guc_err(guc, in guc_send_invalidate_tlb()
4966 if (seqno != guc->serial_slot) in guc_send_invalidate_tlb()
4967 xa_erase_irq(&guc->tlb_lookup, seqno); in guc_send_invalidate_tlb()
4973 int intel_guc_invalidate_tlb_engines(struct intel_guc *guc) in intel_guc_invalidate_tlb_engines() argument
4975 return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_ENGINES); in intel_guc_invalidate_tlb_engines()
4979 int intel_guc_invalidate_tlb_guc(struct intel_guc *guc) in intel_guc_invalidate_tlb_guc() argument
4981 return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_GUC); in intel_guc_invalidate_tlb_guc()
4984 int intel_guc_deregister_done_process_msg(struct intel_guc *guc, in intel_guc_deregister_done_process_msg() argument
4992 guc_err(guc, "Invalid length %u\n", len); in intel_guc_deregister_done_process_msg()
4997 ce = g2h_context_lookup(guc, ctx_id); in intel_guc_deregister_done_process_msg()
5025 intel_gt_pm_put_async_untracked(guc_to_gt(guc)); in intel_guc_deregister_done_process_msg()
5026 release_guc_id(guc, ce); in intel_guc_deregister_done_process_msg()
5030 decr_outstanding_submission_g2h(guc); in intel_guc_deregister_done_process_msg()
5035 int intel_guc_sched_done_process_msg(struct intel_guc *guc, in intel_guc_sched_done_process_msg() argument
5044 guc_err(guc, "Invalid length %u\n", len); in intel_guc_sched_done_process_msg()
5049 ce = g2h_context_lookup(guc, ctx_id); in intel_guc_sched_done_process_msg()
5056 guc_err(guc, "Bad context sched_state 0x%x, ctx_id %u\n", in intel_guc_sched_done_process_msg()
5107 decr_outstanding_submission_g2h(guc); in intel_guc_sched_done_process_msg()
5113 static void capture_error_state(struct intel_guc *guc, in capture_error_state() argument
5116 struct intel_gt *gt = guc_to_gt(guc); in capture_error_state()
5138 guc_warn(guc, "No matching physical engine capture for virtual engine context 0x%04X / %s", in capture_error_state()
5160 static void guc_handle_context_reset(struct intel_guc *guc, in guc_handle_context_reset() argument
5167 guc_dbg(guc, "%s context reset notification: 0x%04X on %s, exiting = %s, banned = %s\n", in guc_handle_context_reset()
5174 capture_error_state(guc, ce); in guc_handle_context_reset()
5179 int intel_guc_context_reset_process_msg(struct intel_guc *guc, in intel_guc_context_reset_process_msg() argument
5187 guc_err(guc, "Invalid length %u", len); in intel_guc_context_reset_process_msg()
5199 xa_lock_irqsave(&guc->context_lookup, flags); in intel_guc_context_reset_process_msg()
5200 ce = g2h_context_lookup(guc, ctx_id); in intel_guc_context_reset_process_msg()
5203 xa_unlock_irqrestore(&guc->context_lookup, flags); in intel_guc_context_reset_process_msg()
5208 guc_handle_context_reset(guc, ce); in intel_guc_context_reset_process_msg()
5214 int intel_guc_error_capture_process_msg(struct intel_guc *guc, in intel_guc_error_capture_process_msg() argument
5220 guc_dbg(guc, "Invalid length %u", len); in intel_guc_error_capture_process_msg()
5226 guc_warn(guc, "No space for error capture"); in intel_guc_error_capture_process_msg()
5228 intel_guc_capture_process(guc); in intel_guc_error_capture_process_msg()
5234 intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance) in intel_guc_lookup_engine() argument
5236 struct intel_gt *gt = guc_to_gt(guc); in intel_guc_lookup_engine()
5247 struct intel_guc *guc = container_of(w, struct intel_guc, in reset_fail_worker_func() local
5249 struct intel_gt *gt = guc_to_gt(guc); in reset_fail_worker_func()
5253 spin_lock_irqsave(&guc->submission_state.lock, flags); in reset_fail_worker_func()
5254 reset_fail_mask = guc->submission_state.reset_fail_mask; in reset_fail_worker_func()
5255 guc->submission_state.reset_fail_mask = 0; in reset_fail_worker_func()
5256 spin_unlock_irqrestore(&guc->submission_state.lock, flags); in reset_fail_worker_func()
5278 int intel_guc_engine_failure_process_msg(struct intel_guc *guc, in intel_guc_engine_failure_process_msg() argument
5287 guc_err(guc, "Invalid length %u", len); in intel_guc_engine_failure_process_msg()
5295 engine = intel_guc_lookup_engine(guc, guc_class, instance); in intel_guc_engine_failure_process_msg()
5297 guc_err(guc, "Invalid engine %d:%d", guc_class, instance); in intel_guc_engine_failure_process_msg()
5305 guc_err(guc, "Engine reset failed on %d:%d (%s) because 0x%08X", in intel_guc_engine_failure_process_msg()
5308 spin_lock_irqsave(&guc->submission_state.lock, flags); in intel_guc_engine_failure_process_msg()
5309 guc->submission_state.reset_fail_mask |= engine->mask; in intel_guc_engine_failure_process_msg()
5310 spin_unlock_irqrestore(&guc->submission_state.lock, flags); in intel_guc_engine_failure_process_msg()
5316 queue_work(system_unbound_wq, &guc->submission_state.reset_fail_worker); in intel_guc_engine_failure_process_msg()
5323 struct intel_guc *guc = gt_to_guc(engine->gt); in intel_guc_find_hung_context() local
5330 if (unlikely(!guc_submission_initialized(guc))) in intel_guc_find_hung_context()
5333 xa_lock_irqsave(&guc->context_lookup, flags); in intel_guc_find_hung_context()
5334 xa_for_each(&guc->context_lookup, index, ce) { in intel_guc_find_hung_context()
5340 xa_unlock(&guc->context_lookup); in intel_guc_find_hung_context()
5369 xa_lock(&guc->context_lookup); in intel_guc_find_hung_context()
5375 xa_lock(&guc->context_lookup); in intel_guc_find_hung_context()
5378 xa_unlock_irqrestore(&guc->context_lookup, flags); in intel_guc_find_hung_context()
5385 struct intel_guc *guc = gt_to_guc(engine->gt); in intel_guc_dump_active_requests() local
5391 if (unlikely(!guc_submission_initialized(guc))) in intel_guc_dump_active_requests()
5394 xa_lock_irqsave(&guc->context_lookup, flags); in intel_guc_dump_active_requests()
5395 xa_for_each(&guc->context_lookup, index, ce) { in intel_guc_dump_active_requests()
5399 xa_unlock(&guc->context_lookup); in intel_guc_dump_active_requests()
5419 xa_lock(&guc->context_lookup); in intel_guc_dump_active_requests()
5421 xa_unlock_irqrestore(&guc->context_lookup, flags); in intel_guc_dump_active_requests()
5424 void intel_guc_submission_print_info(struct intel_guc *guc, in intel_guc_submission_print_info() argument
5427 struct i915_sched_engine *sched_engine = guc->sched_engine; in intel_guc_submission_print_info()
5435 guc->submission_version.major, guc->submission_version.minor, in intel_guc_submission_print_info()
5436 guc->submission_version.patch); in intel_guc_submission_print_info()
5438 atomic_read(&guc->outstanding_submission_g2h)); in intel_guc_submission_print_info()
5491 void intel_guc_submission_print_context_info(struct intel_guc *guc, in intel_guc_submission_print_context_info() argument
5498 xa_lock_irqsave(&guc->context_lookup, flags); in intel_guc_submission_print_context_info()
5499 xa_for_each(&guc->context_lookup, index, ce) { in intel_guc_submission_print_context_info()
5511 if (ce->parallel.guc.wq_status) { in intel_guc_submission_print_context_info()
5513 READ_ONCE(*ce->parallel.guc.wq_head)); in intel_guc_submission_print_context_info()
5515 READ_ONCE(*ce->parallel.guc.wq_tail)); in intel_guc_submission_print_context_info()
5517 READ_ONCE(*ce->parallel.guc.wq_status)); in intel_guc_submission_print_context_info()
5535 xa_unlock_irqrestore(&guc->context_lookup, flags); in intel_guc_submission_print_context_info()
5829 struct intel_guc *guc; in guc_create_virtual() local
5837 guc = gt_to_guc(siblings[0]->gt); in guc_create_virtual()
5851 ve->base.sched_engine = i915_sched_engine_get(guc->sched_engine); in guc_create_virtual()
5871 guc_dbg(guc, "duplicate %s entry in load balancer\n", in guc_create_virtual()
5881 guc_dbg(guc, "invalid mixing of engine class, sibling %d, already %d\n", in guc_create_virtual()