Lines Matching refs:guc

41 static u32 guc_bo_ggtt_addr(struct xe_guc *guc,  in guc_bo_ggtt_addr()  argument
44 struct xe_device *xe = guc_to_xe(guc); in guc_bo_ggtt_addr()
48 xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc))); in guc_bo_ggtt_addr()
55 static u32 guc_ctl_debug_flags(struct xe_guc *guc) in guc_ctl_debug_flags() argument
57 u32 level = xe_guc_log_get_level(&guc->log); in guc_ctl_debug_flags()
69 static u32 guc_ctl_feature_flags(struct xe_guc *guc) in guc_ctl_feature_flags() argument
73 if (!guc_to_xe(guc)->info.skip_guc_pc) in guc_ctl_feature_flags()
79 static u32 guc_ctl_log_params_flags(struct xe_guc *guc) in guc_ctl_log_params_flags() argument
81 u32 offset = guc_bo_ggtt_addr(guc, guc->log.bo) >> PAGE_SHIFT; in guc_ctl_log_params_flags()
132 static u32 guc_ctl_ads_flags(struct xe_guc *guc) in guc_ctl_ads_flags() argument
134 u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT; in guc_ctl_ads_flags()
140 static u32 guc_ctl_wa_flags(struct xe_guc *guc) in guc_ctl_wa_flags() argument
142 struct xe_device *xe = guc_to_xe(guc); in guc_ctl_wa_flags()
143 struct xe_gt *gt = guc_to_gt(guc); in guc_ctl_wa_flags()
179 static u32 guc_ctl_devid(struct xe_guc *guc) in guc_ctl_devid() argument
181 struct xe_device *xe = guc_to_xe(guc); in guc_ctl_devid()
186 static void guc_print_params(struct xe_guc *guc) in guc_print_params() argument
188 struct xe_gt *gt = guc_to_gt(guc); in guc_print_params()
189 u32 *params = guc->params; in guc_print_params()
192 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32)); in guc_print_params()
199 static void guc_init_params(struct xe_guc *guc) in guc_init_params() argument
201 u32 *params = guc->params; in guc_init_params()
203 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); in guc_init_params()
205 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); in guc_init_params()
206 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc); in guc_init_params()
208 params[GUC_CTL_DEVID] = guc_ctl_devid(guc); in guc_init_params()
210 guc_print_params(guc); in guc_init_params()
213 static void guc_init_params_post_hwconfig(struct xe_guc *guc) in guc_init_params_post_hwconfig() argument
215 u32 *params = guc->params; in guc_init_params_post_hwconfig()
217 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); in guc_init_params_post_hwconfig()
218 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc); in guc_init_params_post_hwconfig()
219 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); in guc_init_params_post_hwconfig()
220 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc); in guc_init_params_post_hwconfig()
221 params[GUC_CTL_WA] = guc_ctl_wa_flags(guc); in guc_init_params_post_hwconfig()
222 params[GUC_CTL_DEVID] = guc_ctl_devid(guc); in guc_init_params_post_hwconfig()
224 guc_print_params(guc); in guc_init_params_post_hwconfig()
232 static void guc_write_params(struct xe_guc *guc) in guc_write_params() argument
234 struct xe_gt *gt = guc_to_gt(guc); in guc_write_params()
242 xe_mmio_write32(gt, SOFT_SCRATCH(1 + i), guc->params[i]); in guc_write_params()
247 struct xe_guc *guc = arg; in guc_fini_hw() local
248 struct xe_gt *gt = guc_to_gt(guc); in guc_fini_hw()
251 xe_uc_fini_hw(&guc_to_gt(guc)->uc); in guc_fini_hw()
261 void xe_guc_comm_init_early(struct xe_guc *guc) in xe_guc_comm_init_early() argument
263 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_comm_init_early()
266 guc->notify_reg = MED_GUC_HOST_INTERRUPT; in xe_guc_comm_init_early()
268 guc->notify_reg = GUC_HOST_INTERRUPT; in xe_guc_comm_init_early()
271 static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc) in xe_guc_realloc_post_hwconfig() argument
273 struct xe_tile *tile = gt_to_tile(guc_to_gt(guc)); in xe_guc_realloc_post_hwconfig()
274 struct xe_device *xe = guc_to_xe(guc); in xe_guc_realloc_post_hwconfig()
277 if (!IS_DGFX(guc_to_xe(guc))) in xe_guc_realloc_post_hwconfig()
280 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->fw.bo); in xe_guc_realloc_post_hwconfig()
284 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->log.bo); in xe_guc_realloc_post_hwconfig()
288 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ads.bo); in xe_guc_realloc_post_hwconfig()
292 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ct.bo); in xe_guc_realloc_post_hwconfig()
299 static int vf_guc_init(struct xe_guc *guc) in vf_guc_init() argument
303 xe_guc_comm_init_early(guc); in vf_guc_init()
305 err = xe_guc_ct_init(&guc->ct); in vf_guc_init()
309 err = xe_guc_relay_init(&guc->relay); in vf_guc_init()
316 int xe_guc_init(struct xe_guc *guc) in xe_guc_init() argument
318 struct xe_device *xe = guc_to_xe(guc); in xe_guc_init()
319 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_init()
322 guc->fw.type = XE_UC_FW_TYPE_GUC; in xe_guc_init()
323 ret = xe_uc_fw_init(&guc->fw); in xe_guc_init()
327 if (!xe_uc_fw_is_enabled(&guc->fw)) in xe_guc_init()
331 ret = vf_guc_init(guc); in xe_guc_init()
337 ret = xe_guc_log_init(&guc->log); in xe_guc_init()
341 ret = xe_guc_ads_init(&guc->ads); in xe_guc_init()
345 ret = xe_guc_ct_init(&guc->ct); in xe_guc_init()
349 ret = xe_guc_relay_init(&guc->relay); in xe_guc_init()
353 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE); in xe_guc_init()
355 ret = devm_add_action_or_reset(xe->drm.dev, guc_fini_hw, guc); in xe_guc_init()
359 guc_init_params(guc); in xe_guc_init()
361 xe_guc_comm_init_early(guc); in xe_guc_init()
370 static int vf_guc_init_post_hwconfig(struct xe_guc *guc) in vf_guc_init_post_hwconfig() argument
374 err = xe_guc_submit_init(guc, xe_gt_sriov_vf_guc_ids(guc_to_gt(guc))); in vf_guc_init_post_hwconfig()
389 int xe_guc_init_post_hwconfig(struct xe_guc *guc) in xe_guc_init_post_hwconfig() argument
393 if (IS_SRIOV_VF(guc_to_xe(guc))) in xe_guc_init_post_hwconfig()
394 return vf_guc_init_post_hwconfig(guc); in xe_guc_init_post_hwconfig()
396 ret = xe_guc_realloc_post_hwconfig(guc); in xe_guc_init_post_hwconfig()
400 guc_init_params_post_hwconfig(guc); in xe_guc_init_post_hwconfig()
402 ret = xe_guc_submit_init(guc, ~0); in xe_guc_init_post_hwconfig()
406 ret = xe_guc_db_mgr_init(&guc->dbm, ~0); in xe_guc_init_post_hwconfig()
410 ret = xe_guc_pc_init(&guc->pc); in xe_guc_init_post_hwconfig()
414 return xe_guc_ads_init_post_hwconfig(&guc->ads); in xe_guc_init_post_hwconfig()
417 int xe_guc_post_load_init(struct xe_guc *guc) in xe_guc_post_load_init() argument
419 xe_guc_ads_populate_post_load(&guc->ads); in xe_guc_post_load_init()
420 guc->submission_state.enabled = true; in xe_guc_post_load_init()
425 int xe_guc_reset(struct xe_guc *guc) in xe_guc_reset() argument
427 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_reset()
459 static void guc_prepare_xfer(struct xe_guc *guc) in guc_prepare_xfer() argument
461 struct xe_gt *gt = guc_to_gt(guc); in guc_prepare_xfer()
462 struct xe_device *xe = guc_to_xe(guc); in guc_prepare_xfer()
487 static int guc_xfer_rsa(struct xe_guc *guc) in guc_xfer_rsa() argument
489 struct xe_gt *gt = guc_to_gt(guc); in guc_xfer_rsa()
494 if (guc->fw.rsa_size > 256) { in guc_xfer_rsa()
495 u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) + in guc_xfer_rsa()
496 xe_uc_fw_rsa_offset(&guc->fw); in guc_xfer_rsa()
501 copied = xe_uc_fw_copy_rsa(&guc->fw, rsa, sizeof(rsa)); in guc_xfer_rsa()
593 static void guc_wait_ucode(struct xe_guc *guc) in guc_wait_ucode() argument
595 struct xe_gt *gt = guc_to_gt(guc); in guc_wait_ucode()
596 struct xe_guc_pc *guc_pc = &gt->uc.guc.pc; in guc_wait_ucode()
701 static int __xe_guc_upload(struct xe_guc *guc) in __xe_guc_upload() argument
706 xe_guc_pc_raise_unslice(&guc->pc); in __xe_guc_upload()
708 guc_write_params(guc); in __xe_guc_upload()
709 guc_prepare_xfer(guc); in __xe_guc_upload()
719 ret = guc_xfer_rsa(guc); in __xe_guc_upload()
726 ret = xe_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE); in __xe_guc_upload()
731 guc_wait_ucode(guc); in __xe_guc_upload()
733 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING); in __xe_guc_upload()
737 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL); in __xe_guc_upload()
741 static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc) in vf_guc_min_load_for_hwconfig() argument
743 struct xe_gt *gt = guc_to_gt(guc); in vf_guc_min_load_for_hwconfig()
754 ret = xe_guc_hwconfig_init(guc); in vf_guc_min_load_for_hwconfig()
758 ret = xe_guc_enable_communication(guc); in vf_guc_min_load_for_hwconfig()
784 int xe_guc_min_load_for_hwconfig(struct xe_guc *guc) in xe_guc_min_load_for_hwconfig() argument
788 if (IS_SRIOV_VF(guc_to_xe(guc))) in xe_guc_min_load_for_hwconfig()
789 return vf_guc_min_load_for_hwconfig(guc); in xe_guc_min_load_for_hwconfig()
791 xe_guc_ads_populate_minimal(&guc->ads); in xe_guc_min_load_for_hwconfig()
793 xe_guc_pc_init_early(&guc->pc); in xe_guc_min_load_for_hwconfig()
795 ret = __xe_guc_upload(guc); in xe_guc_min_load_for_hwconfig()
799 ret = xe_guc_hwconfig_init(guc); in xe_guc_min_load_for_hwconfig()
803 ret = xe_guc_enable_communication(guc); in xe_guc_min_load_for_hwconfig()
810 int xe_guc_upload(struct xe_guc *guc) in xe_guc_upload() argument
812 xe_guc_ads_populate(&guc->ads); in xe_guc_upload()
814 return __xe_guc_upload(guc); in xe_guc_upload()
817 static void guc_handle_mmio_msg(struct xe_guc *guc) in guc_handle_mmio_msg() argument
819 struct xe_gt *gt = guc_to_gt(guc); in guc_handle_mmio_msg()
822 if (IS_SRIOV_VF(guc_to_xe(guc))) in guc_handle_mmio_msg()
839 static void guc_enable_irq(struct xe_guc *guc) in guc_enable_irq() argument
841 struct xe_gt *gt = guc_to_gt(guc); in guc_enable_irq()
857 int xe_guc_enable_communication(struct xe_guc *guc) in xe_guc_enable_communication() argument
859 struct xe_device *xe = guc_to_xe(guc); in xe_guc_enable_communication()
863 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_enable_communication()
866 err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc); in xe_guc_enable_communication()
870 guc_enable_irq(guc); in xe_guc_enable_communication()
873 err = xe_guc_ct_enable(&guc->ct); in xe_guc_enable_communication()
877 guc_handle_mmio_msg(guc); in xe_guc_enable_communication()
882 int xe_guc_suspend(struct xe_guc *guc) in xe_guc_suspend() argument
884 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_suspend()
890 ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action)); in xe_guc_suspend()
896 xe_guc_sanitize(guc); in xe_guc_suspend()
900 void xe_guc_notify(struct xe_guc *guc) in xe_guc_notify() argument
902 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_notify()
910 xe_mmio_write32(gt, guc->notify_reg, default_notify_data); in xe_guc_notify()
913 int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr) in xe_guc_auth_huc() argument
920 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action)); in xe_guc_auth_huc()
923 int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request, in xe_guc_mmio_send_recv() argument
926 struct xe_device *xe = guc_to_xe(guc); in xe_guc_mmio_send_recv()
927 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_mmio_send_recv()
937 xe_assert(xe, !xe_guc_ct_enabled(&guc->ct)); in xe_guc_mmio_send_recv()
960 xe_guc_notify(guc); in xe_guc_mmio_send_recv()
1043 int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len) in xe_guc_mmio_send() argument
1045 return xe_guc_mmio_send_recv(guc, request, len, NULL); in xe_guc_mmio_send()
1048 static int guc_self_cfg(struct xe_guc *guc, u16 key, u16 len, u64 val) in guc_self_cfg() argument
1050 struct xe_device *xe = guc_to_xe(guc); in guc_self_cfg()
1069 ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request)); in guc_self_cfg()
1081 int xe_guc_self_cfg32(struct xe_guc *guc, u16 key, u32 val) in xe_guc_self_cfg32() argument
1083 return guc_self_cfg(guc, key, 1, val); in xe_guc_self_cfg32()
1086 int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val) in xe_guc_self_cfg64() argument
1088 return guc_self_cfg(guc, key, 2, val); in xe_guc_self_cfg64()
1091 void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir) in xe_guc_irq_handler() argument
1094 xe_guc_ct_irq_handler(&guc->ct); in xe_guc_irq_handler()
1097 void xe_guc_sanitize(struct xe_guc *guc) in xe_guc_sanitize() argument
1099 xe_uc_fw_sanitize(&guc->fw); in xe_guc_sanitize()
1100 xe_guc_ct_disable(&guc->ct); in xe_guc_sanitize()
1101 guc->submission_state.enabled = false; in xe_guc_sanitize()
1104 int xe_guc_reset_prepare(struct xe_guc *guc) in xe_guc_reset_prepare() argument
1106 return xe_guc_submit_reset_prepare(guc); in xe_guc_reset_prepare()
1109 void xe_guc_reset_wait(struct xe_guc *guc) in xe_guc_reset_wait() argument
1111 xe_guc_submit_reset_wait(guc); in xe_guc_reset_wait()
1114 void xe_guc_stop_prepare(struct xe_guc *guc) in xe_guc_stop_prepare() argument
1116 if (!IS_SRIOV_VF(guc_to_xe(guc))) { in xe_guc_stop_prepare()
1119 err = xe_guc_pc_stop(&guc->pc); in xe_guc_stop_prepare()
1120 xe_gt_WARN(guc_to_gt(guc), err, "Failed to stop GuC PC: %pe\n", in xe_guc_stop_prepare()
1125 void xe_guc_stop(struct xe_guc *guc) in xe_guc_stop() argument
1127 xe_guc_ct_stop(&guc->ct); in xe_guc_stop()
1129 xe_guc_submit_stop(guc); in xe_guc_stop()
1132 int xe_guc_start(struct xe_guc *guc) in xe_guc_start() argument
1134 if (!IS_SRIOV_VF(guc_to_xe(guc))) { in xe_guc_start()
1137 err = xe_guc_pc_start(&guc->pc); in xe_guc_start()
1138 xe_gt_WARN(guc_to_gt(guc), err, "Failed to start GuC PC: %pe\n", in xe_guc_start()
1142 return xe_guc_submit_start(guc); in xe_guc_start()
1145 void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) in xe_guc_print_info() argument
1147 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_print_info()
1152 xe_uc_fw_print(&guc->fw, p); in xe_guc_print_info()
1168 xe_guc_log_get_level(&guc->log)); in xe_guc_print_info()
1178 xe_guc_ct_print(&guc->ct, p, false); in xe_guc_print_info()
1179 xe_guc_submit_print(guc, p); in xe_guc_print_info()
1189 void xe_guc_declare_wedged(struct xe_guc *guc) in xe_guc_declare_wedged() argument
1191 xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode); in xe_guc_declare_wedged()
1193 xe_guc_reset_prepare(guc); in xe_guc_declare_wedged()
1194 xe_guc_ct_stop(&guc->ct); in xe_guc_declare_wedged()
1195 xe_guc_submit_wedge(guc); in xe_guc_declare_wedged()