Lines Matching +full:re +full:- +full:initialization

1 // SPDX-License-Identifier: MIT
3 * Copyright © 2016-2019 Intel Corporation
28 struct drm_i915_private *i915 = uc_to_gt(uc)->i915; in uc_expand_default_options()
30 if (i915->params.enable_guc != -1) in uc_expand_default_options()
33 /* Don't enable GuC/HuC on pre-Gen12 */ in uc_expand_default_options()
35 i915->params.enable_guc = 0; in uc_expand_default_options()
41 i915->params.enable_guc = 0; in uc_expand_default_options()
47 i915->params.enable_guc = ENABLE_GUC_LOAD_HUC; in uc_expand_default_options()
52 i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION; in uc_expand_default_options()
63 ret = i915_inject_probe_error(gt->i915, -ENXIO); in __intel_uc_reset_hw()
73 guc_status = intel_uncore_read(gt->uncore, GUC_STATUS); in __intel_uc_reset_hw()
84 struct drm_i915_private *i915 = gt->i915; in __confirm_options()
87 i915->params.enable_guc, in __confirm_options()
93 if (i915->params.enable_guc == 0) { in __confirm_options()
102 gt_info(gt, "Incompatible option enable_guc=%d - %s\n", in __confirm_options()
103 i915->params.enable_guc, "GuC is not supported!"); in __confirm_options()
105 if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION && in __confirm_options()
107 gt_info(gt, "Incompatible option enable_guc=%d - %s\n", in __confirm_options()
108 i915->params.enable_guc, "GuC submission is N/A"); in __confirm_options()
110 if (i915->params.enable_guc & ~ENABLE_GUC_MASK) in __confirm_options()
111 gt_info(gt, "Incompatible option enable_guc=%d - %s\n", in __confirm_options()
112 i915->params.enable_guc, "undocumented flag"); in __confirm_options()
119 intel_guc_init_early(&uc->guc); in intel_uc_init_early()
120 intel_huc_init_early(&uc->huc); in intel_uc_init_early()
121 intel_gsc_uc_init_early(&uc->gsc); in intel_uc_init_early()
126 uc->ops = &uc_ops_on; in intel_uc_init_early()
128 uc->ops = &uc_ops_off; in intel_uc_init_early()
133 intel_guc_init_late(&uc->guc); in intel_uc_init_late()
134 intel_gsc_uc_load_start(&uc->gsc); in intel_uc_init_late()
142 * intel_uc_init_mmio - setup uC MMIO access
146 * initialization sequence.
150 intel_guc_init_send_regs(&uc->guc); in intel_uc_init_mmio()
155 struct intel_guc *guc = &uc->guc; in __uc_capture_load_err_log()
157 if (guc->log.vma && !uc->load_err_log) in __uc_capture_load_err_log()
158 uc->load_err_log = i915_gem_object_get(guc->log.vma->obj); in __uc_capture_load_err_log()
163 struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log); in __uc_free_load_err_log()
184 intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0); in guc_clear_mmio_msg()
191 spin_lock_irq(&guc->irq_lock); in guc_get_mmio_msg()
193 val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15)); in guc_get_mmio_msg()
194 guc->mmio_msg |= val & guc->msg_enabled_mask; in guc_get_mmio_msg()
197 * clear all events, including the ones we're not currently servicing, in guc_get_mmio_msg()
203 spin_unlock_irq(&guc->irq_lock); in guc_get_mmio_msg()
209 GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct)); in guc_handle_mmio_msg()
211 spin_lock_irq(&guc->irq_lock); in guc_handle_mmio_msg()
212 if (guc->mmio_msg) { in guc_handle_mmio_msg()
213 intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1); in guc_handle_mmio_msg()
214 guc->mmio_msg = 0; in guc_handle_mmio_msg()
216 spin_unlock_irq(&guc->irq_lock); in guc_handle_mmio_msg()
222 struct drm_i915_private *i915 = gt->i915; in guc_enable_communication()
225 GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct)); in guc_enable_communication()
227 ret = i915_inject_probe_error(i915, -ENXIO); in guc_enable_communication()
231 ret = intel_guc_ct_enable(&guc->ct); in guc_enable_communication()
242 spin_lock_irq(gt->irq_lock); in guc_enable_communication()
243 intel_guc_ct_event_handler(&guc->ct); in guc_enable_communication()
244 spin_unlock_irq(gt->irq_lock); in guc_enable_communication()
262 intel_guc_ct_disable(&guc->ct); in guc_disable_communication()
282 err = intel_uc_fw_fetch(&uc->guc.fw); in __uc_fetch_firmwares()
287 intel_uc_fw_change_status(&uc->huc.fw, in __uc_fetch_firmwares()
293 intel_uc_fw_change_status(&uc->gsc.fw, in __uc_fetch_firmwares()
301 intel_uc_fw_fetch(&uc->huc.fw); in __uc_fetch_firmwares()
304 intel_uc_fw_fetch(&uc->gsc.fw); in __uc_fetch_firmwares()
309 intel_uc_fw_cleanup_fetch(&uc->gsc.fw); in __uc_cleanup_firmwares()
310 intel_uc_fw_cleanup_fetch(&uc->huc.fw); in __uc_cleanup_firmwares()
311 intel_uc_fw_cleanup_fetch(&uc->guc.fw); in __uc_cleanup_firmwares()
316 struct intel_guc *guc = &uc->guc; in __uc_init()
317 struct intel_huc *huc = &uc->huc; in __uc_init()
325 if (i915_inject_probe_failure(uc_to_gt(uc)->i915)) in __uc_init()
326 return -ENOMEM; in __uc_init()
336 intel_gsc_uc_init(&uc->gsc); in __uc_init()
343 intel_gsc_uc_fini(&uc->gsc); in __uc_fini()
344 intel_huc_fini(&uc->huc); in __uc_fini()
345 intel_guc_fini(&uc->guc); in __uc_fini()
350 struct intel_guc *guc = &uc->guc; in __uc_sanitize()
351 struct intel_huc *huc = &uc->huc; in __uc_sanitize()
365 struct intel_uncore *uncore = gt->uncore; in uc_init_wopcm()
366 u32 base = intel_wopcm_guc_base(&gt->wopcm); in uc_init_wopcm()
367 u32 size = intel_wopcm_guc_size(&gt->wopcm); in uc_init_wopcm()
374 return -E2BIG; in uc_init_wopcm()
383 err = i915_inject_probe_error(gt->i915, -ENXIO); in uc_init_wopcm()
418 struct intel_uncore *uncore = gt->uncore; in uc_is_wopcm_locked()
426 if (uc->fw_table_invalid) in __uc_check_hw()
427 return -EIO; in __uc_check_hw()
438 return -EIO; in __uc_check_hw()
446 intel_uc_fw_type_repr(fw->type), fw->file_selected.path, in print_fw_ver()
447 fw->file_selected.ver.major, in print_fw_ver()
448 fw->file_selected.ver.minor, in print_fw_ver()
449 fw->file_selected.ver.patch); in print_fw_ver()
455 struct drm_i915_private *i915 = gt->i915; in __uc_init_hw()
456 struct intel_guc *guc = &uc->guc; in __uc_init_hw()
457 struct intel_huc *huc = &uc->huc; in __uc_init_hw()
464 print_fw_ver(gt, &guc->fw); in __uc_init_hw()
467 print_fw_ver(gt, &huc->fw); in __uc_init_hw()
469 if (!intel_uc_fw_is_loadable(&guc->fw)) { in __uc_init_hw()
471 intel_uc_fw_is_overridden(&guc->fw) || in __uc_init_hw()
473 intel_uc_fw_status_to_error(guc->fw.status) : 0; in __uc_init_hw()
491 i915_hwmon_power_max_disable(gt->i915, &pl1en); in __uc_init_hw()
493 intel_rps_raise_unslice(&uc_to_gt(uc)->rps); in __uc_init_hw()
495 while (attempts--) { in __uc_init_hw()
497 * Always reset the GuC just before (re)loading, so in __uc_init_hw()
524 * GSC-loaded HuC is authenticated by the GSC, so we don't need to in __uc_init_hw()
541 ret = intel_guc_slpc_enable(&guc->slpc); in __uc_init_hw()
545 /* Restore GT back to RPn for non-SLPC path */ in __uc_init_hw()
546 intel_rps_lower_unslice(&uc_to_gt(uc)->rps); in __uc_init_hw()
549 i915_hwmon_power_max_restore(gt->i915, pl1en); in __uc_init_hw()
565 intel_rps_lower_unslice(&uc_to_gt(uc)->rps); in __uc_init_hw()
567 i915_hwmon_power_max_restore(gt->i915, pl1en); in __uc_init_hw()
577 gt_probe_error(gt, "GuC initialization failed %pe\n", ERR_PTR(ret)); in __uc_init_hw()
580 return -EIO; in __uc_init_hw()
585 struct intel_guc *guc = &uc->guc; in __uc_fini_hw()
597 * intel_uc_reset_prepare - Prepare for reset
604 struct intel_guc *guc = &uc->guc; in intel_uc_reset_prepare()
606 uc->reset_in_progress = true; in intel_uc_reset_prepare()
625 struct intel_guc *guc = &uc->guc; in intel_uc_reset()
634 struct intel_guc *guc = &uc->guc; in intel_uc_reset_finish()
637 * NB: The wedge code path results in prepare -> prepare -> finish -> finish. in intel_uc_reset_finish()
638 * So this function is sometimes called with the in-progress flag not set. in intel_uc_reset_finish()
640 uc->reset_in_progress = false; in intel_uc_reset_finish()
649 struct intel_guc *guc = &uc->guc; in intel_uc_cancel_requests()
658 struct intel_guc *guc = &uc->guc; in intel_uc_runtime_suspend()
661 guc->interrupts.enabled = false; in intel_uc_runtime_suspend()
670 intel_guc_wait_for_pending_msg(guc, &guc->outstanding_submission_g2h, in intel_uc_runtime_suspend()
672 GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h)); in intel_uc_runtime_suspend()
679 struct intel_guc *guc = &uc->guc; in intel_uc_suspend()
684 intel_gsc_uc_flush_work(&uc->gsc); in intel_uc_suspend()
689 guc->interrupts.enabled = false; in intel_uc_suspend()
695 with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) { in intel_uc_suspend()
704 intel_uc_fw_resume_mapping(&uc->guc.fw); in __uc_resume_mappings()
705 intel_uc_fw_resume_mapping(&uc->huc.fw); in __uc_resume_mappings()
710 struct intel_guc *guc = &uc->guc; in __uc_resume()
718 GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct)); in __uc_resume()
736 intel_gsc_uc_resume(&uc->gsc); in __uc_resume()
749 * When coming out of S3/S4 we sanitize and re-init the HW, so in intel_uc_resume()
750 * communication is already re-enabled at this point. in intel_uc_resume()
758 * During runtime resume we don't sanitize, so we need to re-init in intel_uc_runtime_resume()
766 .fini = __uc_fini, /* to clean-up the init_early initialization */