Lines Matching full:gt

15 #include "gt/intel_gt_regs.h"
17 #include "gt/uc/intel_gsc_fw.h"
158 static int i915_do_reset(struct intel_gt *gt, in i915_do_reset() argument
162 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); in i915_do_reset()
187 static int g33_do_reset(struct intel_gt *gt, in g33_do_reset() argument
191 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); in g33_do_reset()
197 static int g4x_do_reset(struct intel_gt *gt, in g4x_do_reset() argument
201 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); in g4x_do_reset()
202 struct intel_uncore *uncore = gt->uncore; in g4x_do_reset()
213 GT_TRACE(gt, "Wait for media reset failed\n"); in g4x_do_reset()
221 GT_TRACE(gt, "Wait for render reset failed\n"); in g4x_do_reset()
234 static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, in ilk_do_reset() argument
237 struct intel_uncore *uncore = gt->uncore; in ilk_do_reset()
247 GT_TRACE(gt, "Wait for render reset failed\n"); in ilk_do_reset()
258 GT_TRACE(gt, "Wait for media reset failed\n"); in ilk_do_reset()
269 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask) in gen6_hw_domain_reset() argument
271 struct intel_uncore *uncore = gt->uncore; in gen6_hw_domain_reset()
291 loops = GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70) ? 2 : 1; in gen6_hw_domain_reset()
294 * GEN6_GDRST is not in the gt power well, no need to check in gen6_hw_domain_reset()
308 GT_TRACE(gt, in gen6_hw_domain_reset()
321 static int __gen6_reset_engines(struct intel_gt *gt, in __gen6_reset_engines() argument
334 for_each_engine_masked(engine, gt, engine_mask, tmp) { in __gen6_reset_engines()
339 return gen6_hw_domain_reset(gt, hw_mask); in __gen6_reset_engines()
342 static int gen6_reset_engines(struct intel_gt *gt, in gen6_reset_engines() argument
349 spin_lock_irqsave(&gt->uncore->lock, flags); in gen6_reset_engines()
350 ret = __gen6_reset_engines(gt, engine_mask, retry); in gen6_reset_engines()
351 spin_unlock_irqrestore(&gt->uncore->lock, flags); in gen6_reset_engines()
364 return engine->gt->engine[vecs_id]; in find_sfc_paired_vecs_engine()
416 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; in gen11_lock_sfc()
506 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; in gen11_unlock_sfc()
522 static int __gen11_reset_engines(struct intel_gt *gt, in __gen11_reset_engines() argument
535 for_each_engine_masked(engine, gt, engine_mask, tmp) { in __gen11_reset_engines()
543 ret = gen6_hw_domain_reset(gt, reset_mask); in __gen11_reset_engines()
557 for_each_engine_masked(engine, gt, unlock_mask, tmp) in __gen11_reset_engines()
596 gt_err(engine->gt, in gen8_engine_reset_prepare()
611 static int gen8_reset_engines(struct intel_gt *gt, in gen8_reset_engines() argument
621 spin_lock_irqsave(&gt->uncore->lock, flags); in gen8_reset_engines()
623 for_each_engine_masked(engine, gt, engine_mask, tmp) { in gen8_reset_engines()
649 if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES) in gen8_reset_engines()
650 __gen11_reset_engines(gt, gt->info.engine_mask, 0); in gen8_reset_engines()
652 if (GRAPHICS_VER(gt->i915) >= 11) in gen8_reset_engines()
653 ret = __gen11_reset_engines(gt, engine_mask, retry); in gen8_reset_engines()
655 ret = __gen6_reset_engines(gt, engine_mask, retry); in gen8_reset_engines()
658 for_each_engine_masked(engine, gt, engine_mask, tmp) in gen8_reset_engines()
661 spin_unlock_irqrestore(&gt->uncore->lock, flags); in gen8_reset_engines()
666 static int mock_reset(struct intel_gt *gt, in mock_reset() argument
677 static reset_func intel_get_gpu_reset(const struct intel_gt *gt) in intel_get_gpu_reset() argument
679 struct drm_i915_private *i915 = gt->i915; in intel_get_gpu_reset()
681 if (is_mock_gt(gt)) in intel_get_gpu_reset()
699 static int __reset_guc(struct intel_gt *gt) in __reset_guc() argument
702 GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC; in __reset_guc()
704 return gen6_hw_domain_reset(gt, guc_domain); in __reset_guc()
707 static bool needs_wa_14015076503(struct intel_gt *gt, intel_engine_mask_t engine_mask) in needs_wa_14015076503() argument
709 if (MEDIA_VER_FULL(gt->i915) != IP_VER(13, 0) || !HAS_ENGINE(gt, GSC0)) in needs_wa_14015076503()
715 return intel_gsc_uc_fw_init_done(&gt->uc.gsc); in needs_wa_14015076503()
719 wa_14015076503_start(struct intel_gt *gt, intel_engine_mask_t engine_mask, bool first) in wa_14015076503_start() argument
721 if (!needs_wa_14015076503(gt, engine_mask)) in wa_14015076503_start()
738 if (engine_mask == ALL_ENGINES && first && intel_engine_is_idle(gt->engine[GSC0])) { in wa_14015076503_start()
739 __reset_guc(gt); in wa_14015076503_start()
740 engine_mask = gt->info.engine_mask & ~BIT(GSC0); in wa_14015076503_start()
742 intel_uncore_rmw(gt->uncore, in wa_14015076503_start()
747 intel_uncore_rmw(gt->uncore, in wa_14015076503_start()
757 wa_14015076503_end(struct intel_gt *gt, intel_engine_mask_t engine_mask) in wa_14015076503_end() argument
759 if (!needs_wa_14015076503(gt, engine_mask)) in wa_14015076503_end()
762 intel_uncore_rmw(gt->uncore, in wa_14015076503_end()
767 static int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) in __intel_gt_reset() argument
774 reset = intel_get_gpu_reset(gt); in __intel_gt_reset()
782 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); in __intel_gt_reset()
786 reset_mask = wa_14015076503_start(gt, engine_mask, !retry); in __intel_gt_reset()
788 GT_TRACE(gt, "engine_mask=%x\n", reset_mask); in __intel_gt_reset()
789 ret = reset(gt, reset_mask, retry); in __intel_gt_reset()
791 wa_14015076503_end(gt, reset_mask); in __intel_gt_reset()
793 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); in __intel_gt_reset()
798 bool intel_has_gpu_reset(const struct intel_gt *gt) in intel_has_gpu_reset() argument
800 if (!gt->i915->params.reset) in intel_has_gpu_reset()
803 return intel_get_gpu_reset(gt); in intel_has_gpu_reset()
806 bool intel_has_reset_engine(const struct intel_gt *gt) in intel_has_reset_engine() argument
808 if (gt->i915->params.reset < 2) in intel_has_reset_engine()
811 return INTEL_INFO(gt->i915)->has_reset_engine; in intel_has_reset_engine()
814 int intel_reset_guc(struct intel_gt *gt) in intel_reset_guc() argument
818 GEM_BUG_ON(!HAS_GT_UC(gt->i915)); in intel_reset_guc()
820 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); in intel_reset_guc()
821 ret = __reset_guc(gt); in intel_reset_guc()
822 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); in intel_reset_guc()
845 static void revoke_mmaps(struct intel_gt *gt) in revoke_mmaps() argument
849 for (i = 0; i < gt->ggtt->num_fences; i++) { in revoke_mmaps()
854 vma = READ_ONCE(gt->ggtt->fence_regs[i].vma); in revoke_mmaps()
861 GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]); in revoke_mmaps()
869 unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping, in revoke_mmaps()
876 static intel_engine_mask_t reset_prepare(struct intel_gt *gt) in reset_prepare() argument
891 if (intel_uc_uses_guc_submission(&gt->uc)) in reset_prepare()
892 intel_uc_reset_prepare(&gt->uc); in reset_prepare()
894 for_each_engine(engine, gt, id) { in reset_prepare()
903 static void gt_revoke(struct intel_gt *gt) in gt_revoke() argument
905 revoke_mmaps(gt); in gt_revoke()
908 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) in gt_reset() argument
918 err = i915_ggtt_enable_hw(gt->i915); in gt_reset()
923 for_each_engine(engine, gt, id) in gt_reset()
927 intel_uc_reset(&gt->uc, ALL_ENGINES); in gt_reset()
929 intel_ggtt_restore_fences(gt->ggtt); in gt_reset()
943 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake) in reset_finish() argument
948 for_each_engine(engine, gt, id) { in reset_finish()
954 intel_uc_reset_finish(&gt->uc); in reset_finish()
970 static void __intel_gt_set_wedged(struct intel_gt *gt) in __intel_gt_set_wedged() argument
976 if (test_bit(I915_WEDGED, &gt->reset.flags)) in __intel_gt_set_wedged()
979 GT_TRACE(gt, "start\n"); in __intel_gt_set_wedged()
986 awake = reset_prepare(gt); in __intel_gt_set_wedged()
989 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) in __intel_gt_set_wedged()
990 intel_gt_reset_all_engines(gt); in __intel_gt_set_wedged()
992 for_each_engine(engine, gt, id) in __intel_gt_set_wedged()
1001 set_bit(I915_WEDGED, &gt->reset.flags); in __intel_gt_set_wedged()
1005 for_each_engine(engine, gt, id) in __intel_gt_set_wedged()
1008 intel_uc_cancel_requests(&gt->uc); in __intel_gt_set_wedged()
1011 reset_finish(gt, awake); in __intel_gt_set_wedged()
1013 GT_TRACE(gt, "end\n"); in __intel_gt_set_wedged()
1018 struct intel_gt *gt = container_of(w, struct intel_gt, wedge); in set_wedged_work() local
1021 with_intel_runtime_pm(gt->uncore->rpm, wf) in set_wedged_work()
1022 __intel_gt_set_wedged(gt); in set_wedged_work()
1025 void intel_gt_set_wedged(struct intel_gt *gt) in intel_gt_set_wedged() argument
1029 if (test_bit(I915_WEDGED, &gt->reset.flags)) in intel_gt_set_wedged()
1032 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in intel_gt_set_wedged()
1033 mutex_lock(&gt->reset.mutex); in intel_gt_set_wedged()
1036 struct drm_printer p = drm_dbg_printer(&gt->i915->drm, in intel_gt_set_wedged()
1042 for_each_engine(engine, gt, id) { in intel_gt_set_wedged()
1050 __intel_gt_set_wedged(gt); in intel_gt_set_wedged()
1052 mutex_unlock(&gt->reset.mutex); in intel_gt_set_wedged()
1053 intel_runtime_pm_put(gt->uncore->rpm, wakeref); in intel_gt_set_wedged()
1056 static bool __intel_gt_unset_wedged(struct intel_gt *gt) in __intel_gt_unset_wedged() argument
1058 struct intel_gt_timelines *timelines = &gt->timelines; in __intel_gt_unset_wedged()
1062 if (!test_bit(I915_WEDGED, &gt->reset.flags)) in __intel_gt_unset_wedged()
1066 if (intel_gt_has_unrecoverable_error(gt)) in __intel_gt_unset_wedged()
1069 GT_TRACE(gt, "start\n"); in __intel_gt_unset_wedged()
1108 ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */ in __intel_gt_unset_wedged()
1109 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) in __intel_gt_unset_wedged()
1110 ok = intel_gt_reset_all_engines(gt) == 0; in __intel_gt_unset_wedged()
1116 add_taint_for_CI(gt->i915, TAINT_WARN); in __intel_gt_unset_wedged()
1129 intel_engines_reset_default_submission(gt); in __intel_gt_unset_wedged()
1131 GT_TRACE(gt, "end\n"); in __intel_gt_unset_wedged()
1134 clear_bit(I915_WEDGED, &gt->reset.flags); in __intel_gt_unset_wedged()
1139 bool intel_gt_unset_wedged(struct intel_gt *gt) in intel_gt_unset_wedged() argument
1143 mutex_lock(&gt->reset.mutex); in intel_gt_unset_wedged()
1144 result = __intel_gt_unset_wedged(gt); in intel_gt_unset_wedged()
1145 mutex_unlock(&gt->reset.mutex); in intel_gt_unset_wedged()
1150 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) in do_reset() argument
1154 err = intel_gt_reset_all_engines(gt); in do_reset()
1157 err = intel_gt_reset_all_engines(gt); in do_reset()
1162 return gt_reset(gt, stalled_mask); in do_reset()
1165 static int resume(struct intel_gt *gt) in resume() argument
1171 for_each_engine(engine, gt, id) { in resume()
1182 * @gt: #intel_gt to reset
1197 void intel_gt_reset(struct intel_gt *gt, in intel_gt_reset() argument
1204 GT_TRACE(gt, "flags=%lx\n", gt->reset.flags); in intel_gt_reset()
1207 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags)); in intel_gt_reset()
1213 gt_revoke(gt); in intel_gt_reset()
1215 mutex_lock(&gt->reset.mutex); in intel_gt_reset()
1218 if (!__intel_gt_unset_wedged(gt)) in intel_gt_reset()
1222 gt_notice(gt, "Resetting chip for %s\n", reason); in intel_gt_reset()
1223 atomic_inc(&gt->i915->gpu_error.reset_count); in intel_gt_reset()
1225 awake = reset_prepare(gt); in intel_gt_reset()
1227 if (!intel_has_gpu_reset(gt)) { in intel_gt_reset()
1228 if (gt->i915->params.reset) in intel_gt_reset()
1229 gt_err(gt, "GPU reset not supported\n"); in intel_gt_reset()
1231 gt_dbg(gt, "GPU reset disabled\n"); in intel_gt_reset()
1235 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) in intel_gt_reset()
1236 intel_runtime_pm_disable_interrupts(gt->i915); in intel_gt_reset()
1238 if (do_reset(gt, stalled_mask)) { in intel_gt_reset()
1239 gt_err(gt, "Failed to reset chip\n"); in intel_gt_reset()
1243 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) in intel_gt_reset()
1244 intel_runtime_pm_enable_interrupts(gt->i915); in intel_gt_reset()
1246 intel_overlay_reset(gt->i915); in intel_gt_reset()
1249 if (!intel_uc_uses_guc_submission(&gt->uc)) in intel_gt_reset()
1250 intel_uc_reset_prepare(&gt->uc); in intel_gt_reset()
1259 ret = intel_gt_init_hw(gt); in intel_gt_reset()
1261 gt_err(gt, "Failed to initialise HW following reset (%d)\n", ret); in intel_gt_reset()
1265 ret = resume(gt); in intel_gt_reset()
1270 reset_finish(gt, awake); in intel_gt_reset()
1272 mutex_unlock(&gt->reset.mutex); in intel_gt_reset()
1288 add_taint_for_CI(gt->i915, TAINT_WARN); in intel_gt_reset()
1290 __intel_gt_set_wedged(gt); in intel_gt_reset()
1295 * intel_gt_reset_all_engines() - Reset all engines in the given gt.
1296 * @gt: the GT to reset all engines for.
1298 * This function resets all engines within the given gt.
1303 int intel_gt_reset_all_engines(struct intel_gt *gt) in intel_gt_reset_all_engines() argument
1305 return __intel_gt_reset(gt, ALL_ENGINES); in intel_gt_reset_all_engines()
1309 * intel_gt_reset_engine() - Reset a specific engine within a gt.
1312 * This function resets the specified engine within a gt.
1319 return __intel_gt_reset(engine->gt, engine->mask); in intel_gt_reset_engine()
1324 struct intel_gt *gt = engine->gt; in __intel_engine_reset_bh() local
1327 ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags); in __intel_engine_reset_bh()
1328 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags)); in __intel_engine_reset_bh()
1395 static void intel_gt_reset_global(struct intel_gt *gt, in intel_gt_reset_global() argument
1399 struct kobject *kobj = &gt->i915->drm.primary->kdev->kobj; in intel_gt_reset_global()
1407 GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask); in intel_gt_reset_global()
1411 intel_wedge_on_timeout(&w, gt, 60 * HZ) { in intel_gt_reset_global()
1412 intel_display_reset_prepare(gt->i915); in intel_gt_reset_global()
1414 intel_gt_reset(gt, engine_mask, reason); in intel_gt_reset_global()
1416 intel_display_reset_finish(gt->i915); in intel_gt_reset_global()
1419 if (!test_bit(I915_WEDGED, &gt->reset.flags)) in intel_gt_reset_global()
1425 * @gt: the intel_gt
1436 void intel_gt_handle_error(struct intel_gt *gt, in intel_gt_handle_error() argument
1464 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in intel_gt_handle_error()
1466 engine_mask &= gt->info.engine_mask; in intel_gt_handle_error()
1469 i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE); in intel_gt_handle_error()
1470 intel_gt_clear_error_registers(gt, engine_mask); in intel_gt_handle_error()
1477 if (!intel_uc_uses_guc_submission(&gt->uc) && in intel_gt_handle_error()
1478 intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) { in intel_gt_handle_error()
1480 for_each_engine_masked(engine, gt, engine_mask, tmp) { in intel_gt_handle_error()
1483 &gt->reset.flags)) in intel_gt_handle_error()
1490 &gt->reset.flags); in intel_gt_handle_error()
1499 if (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags)) { in intel_gt_handle_error()
1500 wait_event(gt->reset.queue, in intel_gt_handle_error()
1501 !test_bit(I915_RESET_BACKOFF, &gt->reset.flags)); in intel_gt_handle_error()
1512 if (!intel_uc_uses_guc_submission(&gt->uc)) { in intel_gt_handle_error()
1513 for_each_engine(engine, gt, tmp) { in intel_gt_handle_error()
1515 &gt->reset.flags)) in intel_gt_handle_error()
1516 wait_on_bit(&gt->reset.flags, in intel_gt_handle_error()
1523 synchronize_srcu_expedited(&gt->reset.backoff_srcu); in intel_gt_handle_error()
1525 intel_gt_reset_global(gt, engine_mask, msg); in intel_gt_handle_error()
1527 if (!intel_uc_uses_guc_submission(&gt->uc)) { in intel_gt_handle_error()
1528 for_each_engine(engine, gt, tmp) in intel_gt_handle_error()
1530 &gt->reset.flags); in intel_gt_handle_error()
1532 clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags); in intel_gt_handle_error()
1534 wake_up_all(&gt->reset.queue); in intel_gt_handle_error()
1537 intel_runtime_pm_put(gt->uncore->rpm, wakeref); in intel_gt_handle_error()
1540 static int _intel_gt_reset_lock(struct intel_gt *gt, int *srcu, bool retry) in _intel_gt_reset_lock() argument
1542 might_lock(&gt->reset.backoff_srcu); in _intel_gt_reset_lock()
1547 while (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) { in _intel_gt_reset_lock()
1553 if (wait_event_interruptible(gt->reset.queue, in _intel_gt_reset_lock()
1555 &gt->reset.flags))) in _intel_gt_reset_lock()
1560 *srcu = srcu_read_lock(&gt->reset.backoff_srcu); in _intel_gt_reset_lock()
1566 int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu) in intel_gt_reset_trylock() argument
1568 return _intel_gt_reset_lock(gt, srcu, false); in intel_gt_reset_trylock()
1571 int intel_gt_reset_lock_interruptible(struct intel_gt *gt, int *srcu) in intel_gt_reset_lock_interruptible() argument
1573 return _intel_gt_reset_lock(gt, srcu, true); in intel_gt_reset_lock_interruptible()
1576 void intel_gt_reset_unlock(struct intel_gt *gt, int tag) in intel_gt_reset_unlock() argument
1577 __releases(&gt->reset.backoff_srcu) in intel_gt_reset_unlock()
1579 srcu_read_unlock(&gt->reset.backoff_srcu, tag); in intel_gt_reset_unlock()
1582 int intel_gt_terminally_wedged(struct intel_gt *gt) in intel_gt_terminally_wedged() argument
1586 if (!intel_gt_is_wedged(gt)) in intel_gt_terminally_wedged()
1589 if (intel_gt_has_unrecoverable_error(gt)) in intel_gt_terminally_wedged()
1593 if (wait_event_interruptible(gt->reset.queue, in intel_gt_terminally_wedged()
1595 &gt->reset.flags))) in intel_gt_terminally_wedged()
1598 return intel_gt_is_wedged(gt) ? -EIO : 0; in intel_gt_terminally_wedged()
1601 void intel_gt_set_wedged_on_init(struct intel_gt *gt) in intel_gt_set_wedged_on_init() argument
1605 intel_gt_set_wedged(gt); in intel_gt_set_wedged_on_init()
1606 i915_disable_error_state(gt->i915, -ENODEV); in intel_gt_set_wedged_on_init()
1607 set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags); in intel_gt_set_wedged_on_init()
1610 add_taint_for_CI(gt->i915, TAINT_WARN); in intel_gt_set_wedged_on_init()
1613 void intel_gt_set_wedged_on_fini(struct intel_gt *gt) in intel_gt_set_wedged_on_fini() argument
1615 intel_gt_set_wedged(gt); in intel_gt_set_wedged_on_fini()
1616 i915_disable_error_state(gt->i915, -ENODEV); in intel_gt_set_wedged_on_fini()
1617 set_bit(I915_WEDGED_ON_FINI, &gt->reset.flags); in intel_gt_set_wedged_on_fini()
1618 intel_gt_retire_requests(gt); /* cleanup any wedged requests */ in intel_gt_set_wedged_on_fini()
1621 void intel_gt_init_reset(struct intel_gt *gt) in intel_gt_init_reset() argument
1623 init_waitqueue_head(&gt->reset.queue); in intel_gt_init_reset()
1624 mutex_init(&gt->reset.mutex); in intel_gt_init_reset()
1625 init_srcu_struct(&gt->reset.backoff_srcu); in intel_gt_init_reset()
1626 INIT_WORK(&gt->wedge, set_wedged_work); in intel_gt_init_reset()
1637 i915_gem_shrinker_taints_mutex(gt->i915, &gt->reset.mutex); in intel_gt_init_reset()
1640 __set_bit(I915_WEDGED, &gt->reset.flags); in intel_gt_init_reset()
1643 void intel_gt_fini_reset(struct intel_gt *gt) in intel_gt_fini_reset() argument
1645 cleanup_srcu_struct(&gt->reset.backoff_srcu); in intel_gt_fini_reset()
1652 gt_err(w->gt, "%s timed out, cancelling all in-flight rendering.\n", w->name); in intel_wedge_me()
1653 set_wedged_work(&w->gt->wedge); in intel_wedge_me()
1657 struct intel_gt *gt, in __intel_init_wedge() argument
1661 w->gt = gt; in __intel_init_wedge()
1665 queue_delayed_work(gt->i915->unordered_wq, &w->work, timeout); in __intel_init_wedge()
1672 w->gt = NULL; in __intel_fini_wedge()
1679 bool intel_engine_reset_needs_wa_22011802037(struct intel_gt *gt) in intel_engine_reset_needs_wa_22011802037() argument
1681 if (GRAPHICS_VER(gt->i915) < 11) in intel_engine_reset_needs_wa_22011802037()
1684 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0)) in intel_engine_reset_needs_wa_22011802037()
1687 if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) in intel_engine_reset_needs_wa_22011802037()