Home
last modified time | relevance | path

Searched refs:guc (Results 1 – 25 of 92) sorted by relevance

1234

/linux-6.12.1/drivers/gpu/drm/i915/gt/uc/
Dintel_guc.c42 void intel_guc_notify(struct intel_guc *guc) in intel_guc_notify() argument
44 struct intel_gt *gt = guc_to_gt(guc); in intel_guc_notify()
52 intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER); in intel_guc_notify()
55 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i) in guc_send_reg() argument
57 GEM_BUG_ON(!guc->send_regs.base); in guc_send_reg()
58 GEM_BUG_ON(!guc->send_regs.count); in guc_send_reg()
59 GEM_BUG_ON(i >= guc->send_regs.count); in guc_send_reg()
61 return _MMIO(guc->send_regs.base + 4 * i); in guc_send_reg()
64 void intel_guc_init_send_regs(struct intel_guc *guc) in intel_guc_init_send_regs() argument
66 struct intel_gt *gt = guc_to_gt(guc); in intel_guc_init_send_regs()
[all …]
Dintel_guc.h97 void (*reset)(struct intel_guc *guc);
98 void (*enable)(struct intel_guc *guc);
99 void (*disable)(struct intel_guc *guc);
327 #define GUC_SUBMIT_VER(guc) MAKE_GUC_VER_STRUCT((guc)->submission_version) argument
328 #define GUC_FIRMWARE_VER(guc) MAKE_GUC_VER_STRUCT((guc)->fw.file_selected.ver) argument
336 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) in intel_guc_send() argument
338 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 0); in intel_guc_send()
342 inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len, in intel_guc_send_nb() argument
345 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, in intel_guc_send_nb()
350 intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len, in intel_guc_send_and_receive() argument
[all …]
Dintel_guc_ads.c84 static u32 guc_ads_regset_size(struct intel_guc *guc) in guc_ads_regset_size() argument
86 GEM_BUG_ON(!guc->ads_regset_size); in guc_ads_regset_size()
87 return guc->ads_regset_size; in guc_ads_regset_size()
90 static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc) in guc_ads_golden_ctxt_size() argument
92 return PAGE_ALIGN(guc->ads_golden_ctxt_size); in guc_ads_golden_ctxt_size()
95 static u32 guc_ads_waklv_size(struct intel_guc *guc) in guc_ads_waklv_size() argument
97 return PAGE_ALIGN(guc->ads_waklv_size); in guc_ads_waklv_size()
100 static u32 guc_ads_capture_size(struct intel_guc *guc) in guc_ads_capture_size() argument
102 return PAGE_ALIGN(guc->ads_capture_size); in guc_ads_capture_size()
105 static u32 guc_ads_private_data_size(struct intel_guc *guc) in guc_ads_private_data_size() argument
[all …]
Dintel_uc.c119 intel_guc_init_early(&uc->guc); in intel_uc_init_early()
133 intel_guc_init_late(&uc->guc); in intel_uc_init_late()
150 intel_guc_init_send_regs(&uc->guc); in intel_uc_init_mmio()
155 struct intel_guc *guc = &uc->guc; in __uc_capture_load_err_log() local
157 if (guc->log.vma && !uc->load_err_log) in __uc_capture_load_err_log()
158 uc->load_err_log = i915_gem_object_get(guc->log.vma->obj); in __uc_capture_load_err_log()
182 static void guc_clear_mmio_msg(struct intel_guc *guc) in guc_clear_mmio_msg() argument
184 intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0); in guc_clear_mmio_msg()
187 static void guc_get_mmio_msg(struct intel_guc *guc) in guc_get_mmio_msg() argument
191 spin_lock_irq(&guc->irq_lock); in guc_get_mmio_msg()
[all …]
Dintel_guc_debugfs.c19 struct intel_guc *guc = m->private; in guc_info_show() local
22 if (!intel_guc_is_supported(guc)) in guc_info_show()
25 intel_guc_load_status(guc, &p); in guc_info_show()
27 intel_guc_log_info(&guc->log, &p); in guc_info_show()
29 if (!intel_guc_submission_is_used(guc)) in guc_info_show()
32 intel_guc_ct_print_info(&guc->ct, &p); in guc_info_show()
33 intel_guc_submission_print_info(guc, &p); in guc_info_show()
34 intel_guc_ads_print_policy_info(guc, &p); in guc_info_show()
42 struct intel_guc *guc = m->private; in guc_registered_contexts_show() local
45 if (!intel_guc_submission_is_used(guc)) in guc_registered_contexts_show()
[all …]
Dintel_guc_submission.c161 #define NUMBER_MULTI_LRC_GUC_ID(guc) \ argument
162 ((guc)->submission_state.num_guc_ids / 16)
451 GEM_BUG_ON(!ce->parallel.guc.parent_page); in __get_parent_scratch_offset()
453 return ce->parallel.guc.parent_page * PAGE_SIZE; in __get_parent_scratch_offset()
504 CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE) in get_wq_pointer()
506 ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head); in get_wq_pointer()
513 return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)]; in get_wq_pointer()
516 static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id) in __get_context() argument
518 struct intel_context *ce = xa_load(&guc->context_lookup, id); in __get_context()
525 static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index) in __get_lrc_desc_v69() argument
[all …]
Dintel_guc_rc.c13 static bool __guc_rc_supported(struct intel_guc *guc) in __guc_rc_supported() argument
16 return guc->submission_supported && in __guc_rc_supported()
17 GRAPHICS_VER(guc_to_i915(guc)) >= 12; in __guc_rc_supported()
20 static bool __guc_rc_selected(struct intel_guc *guc) in __guc_rc_selected() argument
22 if (!intel_guc_rc_is_supported(guc)) in __guc_rc_selected()
25 return guc->submission_selected; in __guc_rc_selected()
28 void intel_guc_rc_init_early(struct intel_guc *guc) in intel_guc_rc_init_early() argument
30 guc->rc_supported = __guc_rc_supported(guc); in intel_guc_rc_init_early()
31 guc->rc_selected = __guc_rc_selected(guc); in intel_guc_rc_init_early()
34 static int guc_action_control_gucrc(struct intel_guc *guc, bool enable) in guc_action_control_gucrc() argument
[all …]
Dintel_guc_capture.c292 guc_capture_alloc_steered_lists(struct intel_guc *guc, in guc_capture_alloc_steered_lists() argument
295 struct intel_gt *gt = guc_to_gt(guc); in guc_capture_alloc_steered_lists()
307 if (!list || guc->capture->extlists) in guc_capture_alloc_steered_lists()
347 guc_dbg(guc, "capture found %d ext-regs.\n", num_tot_regs); in guc_capture_alloc_steered_lists()
348 guc->capture->extlists = extlists; in guc_capture_alloc_steered_lists()
352 guc_capture_get_device_reglist(struct intel_guc *guc) in guc_capture_get_device_reglist() argument
354 struct drm_i915_private *i915 = guc_to_i915(guc); in guc_capture_get_device_reglist()
368 guc_capture_alloc_steered_lists(guc, lists); in guc_capture_get_device_reglist()
412 guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid, in guc_capture_list_init() argument
416 const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists; in guc_capture_list_init()
[all …]
Dintel_guc_submission.h16 void intel_guc_submission_init_early(struct intel_guc *guc);
17 int intel_guc_submission_init(struct intel_guc *guc);
18 int intel_guc_submission_enable(struct intel_guc *guc);
19 void intel_guc_submission_disable(struct intel_guc *guc);
20 void intel_guc_submission_fini(struct intel_guc *guc);
21 int intel_guc_preempt_work_create(struct intel_guc *guc);
22 void intel_guc_preempt_work_destroy(struct intel_guc *guc);
24 void intel_guc_submission_print_info(struct intel_guc *guc,
26 void intel_guc_submission_print_context_info(struct intel_guc *guc,
36 int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
[all …]
Dintel_guc_rc.h11 void intel_guc_rc_init_early(struct intel_guc *guc);
13 static inline bool intel_guc_rc_is_supported(struct intel_guc *guc) in intel_guc_rc_is_supported() argument
15 return guc->rc_supported; in intel_guc_rc_is_supported()
18 static inline bool intel_guc_rc_is_wanted(struct intel_guc *guc) in intel_guc_rc_is_wanted() argument
20 return guc->submission_selected && intel_guc_rc_is_supported(guc); in intel_guc_rc_is_wanted()
23 static inline bool intel_guc_rc_is_used(struct intel_guc *guc) in intel_guc_rc_is_used() argument
25 return intel_guc_submission_is_used(guc) && intel_guc_rc_is_wanted(guc); in intel_guc_rc_is_used()
28 int intel_guc_rc_enable(struct intel_guc *guc);
29 int intel_guc_rc_disable(struct intel_guc *guc);
Dintel_guc_fw.c71 struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); in guc_xfer_rsa_vma() local
74 intel_guc_ggtt_offset(guc, guc_fw->rsa_data)); in guc_xfer_rsa_vma()
154 static int guc_wait_ucode(struct intel_guc *guc) in guc_wait_ucode() argument
156 struct intel_gt *gt = guc_to_gt(guc); in guc_wait_ucode()
195 guc_dbg(guc, "load still in progress, count = %d, freq = %dMHz, status = 0x%08X [0x%02X/%02X]\n", in guc_wait_ucode()
207 guc_info(guc, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz, ret = %d\n", in guc_wait_ucode()
209 …guc_info(guc, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, … in guc_wait_ucode()
217 guc_info(guc, "invalid key requested, header = 0x%08X\n", in guc_wait_ucode()
223 guc_info(guc, "firmware signature verification failed\n"); in guc_wait_ucode()
228 guc_info(guc, "firmware production part check failure\n"); in guc_wait_ucode()
[all …]
Dintel_guc_log.c42 struct intel_guc *guc = log_to_guc(log); in _guc_log_init_sizes() local
85 guc_err(guc, "Mis-aligned log %s size: 0x%X vs 0x%X!\n", in _guc_log_init_sizes()
90 guc_err(guc, "Zero log %s size!\n", sections[i].name); in _guc_log_init_sizes()
98 guc_err(guc, "log %s size too large: %d vs %d!\n", in _guc_log_init_sizes()
105 guc_err(guc, "Unit mismatch for crash and debug sections: %d vs %d!\n", in _guc_log_init_sizes()
182 static int guc_action_flush_log_complete(struct intel_guc *guc) in guc_action_flush_log_complete() argument
189 return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0); in guc_action_flush_log_complete()
192 static int guc_action_flush_log(struct intel_guc *guc) in guc_action_flush_log() argument
199 return intel_guc_send(guc, action, ARRAY_SIZE(action)); in guc_action_flush_log()
202 static int guc_action_control_log(struct intel_guc *guc, bool enable, in guc_action_control_log() argument
[all …]
Dintel_guc_slpc.c33 static bool __detect_slpc_supported(struct intel_guc *guc) in __detect_slpc_supported() argument
36 return guc->submission_supported && in __detect_slpc_supported()
37 GRAPHICS_VER(guc_to_i915(guc)) >= 12; in __detect_slpc_supported()
40 static bool __guc_slpc_selected(struct intel_guc *guc) in __guc_slpc_selected() argument
42 if (!intel_guc_slpc_is_supported(guc)) in __guc_slpc_selected()
45 return guc->submission_selected; in __guc_slpc_selected()
50 struct intel_guc *guc = slpc_to_guc(slpc); in intel_guc_slpc_init_early() local
52 slpc->supported = __detect_slpc_supported(guc); in intel_guc_slpc_init_early()
53 slpc->selected = __guc_slpc_selected(guc); in intel_guc_slpc_init_early()
102 static int guc_action_slpc_set_param_nb(struct intel_guc *guc, u8 id, u32 value) in guc_action_slpc_set_param_nb() argument
[all …]
Dselftest_guc.c147 struct intel_guc *guc = gt_to_guc(gt); in intel_guc_steal_guc_ids() local
154 int number_guc_id_stolen = guc->number_guc_id_stolen; in intel_guc_steal_guc_ids()
158 guc_err(guc, "Context array allocation failed\n"); in intel_guc_steal_guc_ids()
164 sv = guc->submission_state.num_guc_ids; in intel_guc_steal_guc_ids()
165 guc->submission_state.num_guc_ids = 512; in intel_guc_steal_guc_ids()
171 guc_err(guc, "Failed to create context: %pe\n", ce[context_index]); in intel_guc_steal_guc_ids()
177 guc_err(guc, "Failed to create spinner: %pe\n", ERR_PTR(ret)); in intel_guc_steal_guc_ids()
184 guc_err(guc, "Failed to create spinner request: %pe\n", spin_rq); in intel_guc_steal_guc_ids()
189 guc_err(guc, "Failed to add Spinner request: %pe\n", ERR_PTR(ret)); in intel_guc_steal_guc_ids()
198 guc_err(guc, "Failed to create context: %pe\n", ce[context_index]); in intel_guc_steal_guc_ids()
[all …]
Dintel_guc_hwconfig.c34 static int __guc_action_get_hwconfig(struct intel_guc *guc, in __guc_action_get_hwconfig() argument
45 ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); in __guc_action_get_hwconfig()
52 static int guc_hwconfig_discover_size(struct intel_guc *guc, struct intel_hwconfig *hwconfig) in guc_hwconfig_discover_size() argument
60 ret = __guc_action_get_hwconfig(guc, 0, 0); in guc_hwconfig_discover_size()
71 static int guc_hwconfig_fill_buffer(struct intel_guc *guc, struct intel_hwconfig *hwconfig) in guc_hwconfig_fill_buffer() argument
80 ret = intel_guc_allocate_and_map_vma(guc, hwconfig->size, &vma, &vaddr); in guc_hwconfig_fill_buffer()
84 ggtt_offset = intel_guc_ggtt_offset(guc, vma); in guc_hwconfig_fill_buffer()
86 ret = __guc_action_get_hwconfig(guc, ggtt_offset, hwconfig->size); in guc_hwconfig_fill_buffer()
114 struct intel_guc *guc = gt_to_guc(gt); in guc_hwconfig_init() local
120 ret = guc_hwconfig_discover_size(guc, hwconfig); in guc_hwconfig_init()
[all …]
Dintel_guc_ads.h16 int intel_guc_ads_create(struct intel_guc *guc);
17 void intel_guc_ads_destroy(struct intel_guc *guc);
18 void intel_guc_ads_init_late(struct intel_guc *guc);
19 void intel_guc_ads_reset(struct intel_guc *guc);
20 void intel_guc_ads_print_policy_info(struct intel_guc *guc,
23 u32 intel_guc_engine_usage_offset(struct intel_guc *guc);
Dintel_guc_slpc.h17 static inline bool intel_guc_slpc_is_supported(struct intel_guc *guc) in intel_guc_slpc_is_supported() argument
19 return guc->slpc.supported; in intel_guc_slpc_is_supported()
22 static inline bool intel_guc_slpc_is_wanted(struct intel_guc *guc) in intel_guc_slpc_is_wanted() argument
24 return guc->slpc.selected; in intel_guc_slpc_is_wanted()
27 static inline bool intel_guc_slpc_is_used(struct intel_guc *guc) in intel_guc_slpc_is_used() argument
29 return intel_guc_submission_is_used(guc) && intel_guc_slpc_is_wanted(guc); in intel_guc_slpc_is_used()
/linux-6.12.1/drivers/gpu/drm/xe/
Dxe_guc.c41 static u32 guc_bo_ggtt_addr(struct xe_guc *guc, in guc_bo_ggtt_addr() argument
44 struct xe_device *xe = guc_to_xe(guc); in guc_bo_ggtt_addr()
48 xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc))); in guc_bo_ggtt_addr()
55 static u32 guc_ctl_debug_flags(struct xe_guc *guc) in guc_ctl_debug_flags() argument
57 u32 level = xe_guc_log_get_level(&guc->log); in guc_ctl_debug_flags()
69 static u32 guc_ctl_feature_flags(struct xe_guc *guc) in guc_ctl_feature_flags() argument
73 if (!guc_to_xe(guc)->info.skip_guc_pc) in guc_ctl_feature_flags()
79 static u32 guc_ctl_log_params_flags(struct xe_guc *guc) in guc_ctl_log_params_flags() argument
81 u32 offset = guc_bo_ggtt_addr(guc, guc->log.bo) >> PAGE_SHIFT; in guc_ctl_log_params_flags()
132 static u32 guc_ctl_ads_flags(struct xe_guc *guc) in guc_ctl_ads_flags() argument
[all …]
Dxe_guc.h21 #define GUC_SUBMIT_VER(guc) \ argument
22 MAKE_GUC_VER_STRUCT((guc)->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY])
23 #define GUC_FIRMWARE_VER(guc) \ argument
24 MAKE_GUC_VER_STRUCT((guc)->fw.versions.found[XE_UC_FW_VER_RELEASE])
28 void xe_guc_comm_init_early(struct xe_guc *guc);
29 int xe_guc_init(struct xe_guc *guc);
30 int xe_guc_init_post_hwconfig(struct xe_guc *guc);
31 int xe_guc_post_load_init(struct xe_guc *guc);
32 int xe_guc_reset(struct xe_guc *guc);
33 int xe_guc_upload(struct xe_guc *guc);
[all …]
Dxe_guc_submit.c49 return &q->gt->uc.guc; in exec_queue_to_guc()
72 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED; in exec_queue_registered()
77 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in set_exec_queue_registered()
82 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in clear_exec_queue_registered()
87 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED; in exec_queue_enabled()
92 atomic_or(EXEC_QUEUE_STATE_ENABLED, &q->guc->state); in set_exec_queue_enabled()
97 atomic_and(~EXEC_QUEUE_STATE_ENABLED, &q->guc->state); in clear_exec_queue_enabled()
102 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE; in exec_queue_pending_enable()
107 atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state); in set_exec_queue_pending_enable()
112 atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state); in clear_exec_queue_pending_enable()
[all …]
Dxe_guc_hwconfig.c18 static int send_get_hwconfig(struct xe_guc *guc, u64 ggtt_addr, u32 size) in send_get_hwconfig() argument
27 return xe_guc_mmio_send(guc, action, ARRAY_SIZE(action)); in send_get_hwconfig()
30 static int guc_hwconfig_size(struct xe_guc *guc, u32 *size) in guc_hwconfig_size() argument
32 int ret = send_get_hwconfig(guc, 0, 0); in guc_hwconfig_size()
41 static int guc_hwconfig_copy(struct xe_guc *guc) in guc_hwconfig_copy() argument
43 int ret = send_get_hwconfig(guc, xe_bo_ggtt_addr(guc->hwconfig.bo), in guc_hwconfig_copy()
44 guc->hwconfig.size); in guc_hwconfig_copy()
52 int xe_guc_hwconfig_init(struct xe_guc *guc) in xe_guc_hwconfig_init() argument
54 struct xe_device *xe = guc_to_xe(guc); in xe_guc_hwconfig_init()
55 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_hwconfig_init()
[all …]
Dxe_guc_submit.h15 int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids);
17 int xe_guc_submit_reset_prepare(struct xe_guc *guc);
18 void xe_guc_submit_reset_wait(struct xe_guc *guc);
19 void xe_guc_submit_stop(struct xe_guc *guc);
20 int xe_guc_submit_start(struct xe_guc *guc);
21 void xe_guc_submit_wedge(struct xe_guc *guc);
23 int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
24 int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
25 int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len);
26 int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
[all …]
Dxe_uc.c43 ret = xe_guc_init(&uc->guc); in xe_uc_init()
90 err = xe_guc_init_post_hwconfig(&uc->guc); in xe_uc_init_post_hwconfig()
106 ret = xe_guc_reset(&uc->guc); in uc_reset()
118 xe_guc_sanitize(&uc->guc); in xe_uc_sanitize()
142 ret = xe_guc_min_load_for_hwconfig(&uc->guc); in xe_uc_init_hwconfig()
157 err = xe_guc_enable_communication(&uc->guc); in vf_uc_init_hw()
165 uc->guc.submission_state.enabled = true; in vf_uc_init_hw()
193 ret = xe_guc_upload(&uc->guc); in xe_uc_init_hw()
197 ret = xe_guc_enable_communication(&uc->guc); in xe_uc_init_hw()
205 ret = xe_guc_post_load_init(&uc->guc); in xe_uc_init_hw()
[all …]
Dxe_guc_debugfs.c26 struct xe_guc *guc = node_to_guc(m->private); in guc_info() local
27 struct xe_device *xe = guc_to_xe(guc); in guc_info()
31 xe_guc_print_info(guc, &p); in guc_info()
39 struct xe_guc *guc = node_to_guc(m->private); in guc_log() local
40 struct xe_device *xe = guc_to_xe(guc); in guc_log()
44 xe_guc_log_print(&guc->log, &p); in guc_log()
55 void xe_guc_debugfs_register(struct xe_guc *guc, struct dentry *parent) in xe_guc_debugfs_register() argument
57 struct drm_minor *minor = guc_to_xe(guc)->drm.primary; in xe_guc_debugfs_register()
62 local = drmm_kmalloc(&guc_to_xe(guc)->drm, DEBUGFS_SIZE, GFP_KERNEL); in xe_guc_debugfs_register()
70 local[i].data = guc; in xe_guc_debugfs_register()
Dxe_gt_sriov_pf_policy.c21 static int guc_action_update_vgt_policy(struct xe_guc *guc, u64 addr, u32 size) in guc_action_update_vgt_policy() argument
30 return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request)); in guc_action_update_vgt_policy()
42 struct xe_guc *guc = &gt->uc.guc; in pf_send_policy_klvs() local
56 ret = guc_action_update_vgt_policy(guc, xe_bo_ggtt_addr(bo), num_dwords); in pf_send_policy_klvs()
144 &gt->sriov.pf.policy.guc.sched_if_idle, in pf_provision_sched_if_idle()
153 return pf_provision_sched_if_idle(gt, gt->sriov.pf.policy.guc.sched_if_idle); in pf_reprovision_sched_if_idle()
161 gt->sriov.pf.policy.guc.sched_if_idle = false; in pf_sanitize_sched_if_idle()
199 enable = gt->sriov.pf.policy.guc.sched_if_idle; in xe_gt_sriov_pf_policy_get_sched_if_idle()
211 &gt->sriov.pf.policy.guc.reset_engine, enable); in pf_provision_reset_engine()
219 return pf_provision_reset_engine(gt, gt->sriov.pf.policy.guc.reset_engine); in pf_reprovision_reset_engine()
[all …]

1234