/linux-6.12.1/drivers/gpu/drm/xe/ |
D | xe_gt_debugfs.c | 89 struct xe_device *xe = gt_to_xe(gt); in hw_engines() 114 xe_pm_runtime_get(gt_to_xe(gt)); in force_reset() 116 xe_pm_runtime_put(gt_to_xe(gt)); in force_reset() 123 xe_pm_runtime_get(gt_to_xe(gt)); in force_reset_sync() 125 xe_pm_runtime_put(gt_to_xe(gt)); in force_reset_sync() 136 xe_pm_runtime_get(gt_to_xe(gt)); in sa_info() 139 xe_pm_runtime_put(gt_to_xe(gt)); in sa_info() 146 xe_pm_runtime_get(gt_to_xe(gt)); in topology() 148 xe_pm_runtime_put(gt_to_xe(gt)); in topology() 155 xe_pm_runtime_get(gt_to_xe(gt)); in steering() [all …]
|
D | xe_gt_sriov_pf_policy.c | 140 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_provision_sched_if_idle() 150 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_reprovision_sched_if_idle() 158 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_sanitize_sched_if_idle() 196 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_policy_get_sched_if_idle() 207 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_provision_reset_engine() 216 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_reprovision_reset_engine() 224 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_sanitize_reset_engine() 262 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_policy_get_reset_engine() 273 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_provision_sample_period() 282 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_reprovision_sample_period() [all …]
|
D | xe_gt_idle.c | 53 return gt_to_xe(gt); in pc_to_xe() 100 struct xe_device *xe = gt_to_xe(gt); in xe_gt_idle_enable_pg() 111 xe_device_assert_mem_access(gt_to_xe(gt)); in xe_gt_idle_enable_pg() 137 if (IS_SRIOV_VF(gt_to_xe(gt))) in xe_gt_idle_disable_pg() 140 xe_device_assert_mem_access(gt_to_xe(gt)); in xe_gt_idle_disable_pg() 207 if (gt_to_xe(gt)->info.skip_guc_pc) { in gt_idle_fini() 220 struct xe_device *xe = gt_to_xe(gt); in xe_gt_idle_init() 256 xe_device_assert_mem_access(gt_to_xe(gt)); in xe_gt_idle_enable_c6() 259 if (IS_SRIOV_VF(gt_to_xe(gt))) in xe_gt_idle_enable_c6() 271 xe_device_assert_mem_access(gt_to_xe(gt)); in xe_gt_idle_disable_c6() [all …]
|
D | xe_gt_sriov_vf.c | 95 struct xe_device *xe = gt_to_xe(gt); in vf_minimum_guc_version() 127 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); in vf_handshake_with_guc() 312 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); in xe_gt_sriov_vf_gmdid() 313 xe_gt_assert(gt, !GRAPHICS_VERx100(gt_to_xe(gt)) || has_gmdid(gt_to_xe(gt))); in xe_gt_sriov_vf_gmdid() 334 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); in vf_get_ggtt_info() 367 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); in vf_get_lmem_info() 394 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); in vf_get_submission_cfg() 425 xe_gt_assert(gt, has_gmdid(gt_to_xe(gt))); in vf_cache_gmdid() 426 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); in vf_cache_gmdid() 441 struct xe_device *xe = gt_to_xe(gt); in xe_gt_sriov_vf_query_config() [all …]
|
D | xe_gt.c | 82 err = drmm_add_action_or_reset(>_to_xe(gt)->drm, gt_fini, gt); in xe_gt_alloc() 284 struct xe_device *xe = gt_to_xe(gt); in xe_gt_record_default_lrcs() 370 if (IS_SRIOV_PF(gt_to_xe(gt))) { in xe_gt_init_early() 376 xe_reg_sr_init(>->reg_sr, "GT", gt_to_xe(gt)); in xe_gt_init_early() 398 p = drm_dbg_printer(>_to_xe(gt)->drm, DRM_UT_DRIVER, prefix); in dump_pat_on_error() 415 if (IS_SRIOV_PF(gt_to_xe(gt))) in gt_fw_domain_init() 491 if (gt_to_xe(gt)->info.has_usm) { in all_fw_domain_init() 492 struct xe_device *xe = gt_to_xe(gt); in all_fw_domain_init() 523 if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt)) in all_fw_domain_init() 526 if (IS_SRIOV_PF(gt_to_xe(gt))) in all_fw_domain_init() [all …]
|
D | xe_gt_mcr.c | 242 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { in init_steering_l3bank() 255 } else if (gt_to_xe(gt)->info.platform == XE_DG2) { in init_steering_l3bank() 332 if (gt_to_xe(gt)->info.platform == XE_PVC) in dss_per_group() 334 else if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250) in dss_per_group() 434 struct xe_device *xe = gt_to_xe(gt); in xe_gt_mcr_init() 488 struct xe_device *xe = gt_to_xe(gt); in xe_gt_mcr_set_implicit_defaults() 554 drm_WARN(>_to_xe(gt)->drm, true, in xe_gt_mcr_get_nonterminated_steering() 570 struct xe_device *xe = gt_to_xe(gt); in mcr_lock() 591 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) in mcr_unlock() 611 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { in rw_with_mcr_steering() [all …]
|
D | xe_gt_sriov_pf_helpers.h | 23 #define xe_gt_sriov_pf_assert_vfid(gt, vfid) xe_sriov_pf_assert_vfid(gt_to_xe(gt), (vfid)) 27 return xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); in xe_gt_sriov_pf_get_totalvfs() 32 return xe_sriov_pf_master_mutex(gt_to_xe(gt)); in xe_gt_sriov_pf_master_mutex()
|
D | xe_gt_tlb_invalidation.c | 45 xe_pm_runtime_put(gt_to_xe(fence->gt)); in xe_gt_tlb_invalidation_fence_fini() 72 struct xe_device *xe = gt_to_xe(gt); in xe_gt_tlb_fence_timeout() 155 invalidation_fence_signal(gt_to_xe(gt), fence); in xe_gt_tlb_invalidation_reset() 178 struct xe_device *xe = gt_to_xe(gt); in send_tlb_invalidation() 272 struct xe_device *xe = gt_to_xe(gt); in xe_gt_tlb_invalidation_ggtt() 326 struct xe_device *xe = gt_to_xe(gt); in xe_gt_tlb_invalidation_range() 334 if (gt_to_xe(gt)->info.force_execlist) { in xe_gt_tlb_invalidation_range() 432 struct xe_device *xe = gt_to_xe(gt); in xe_guc_tlb_invalidation_done_handler() 515 xe_pm_runtime_get_noresume(gt_to_xe(gt)); in xe_gt_tlb_invalidation_fence_init()
|
D | xe_gt_topology.c | 23 if (drm_WARN_ON(>_to_xe(gt)->drm, numregs > XE_MAX_DSS_FUSE_REGS)) in load_dss_mask() 37 struct xe_device *xe = gt_to_xe(gt); in load_eu_mask() 129 struct xe_device *xe = gt_to_xe(gt); in load_l3_bank_mask() 197 struct xe_device *xe = gt_to_xe(gt); in xe_gt_topology_init() 222 p = drm_dbg_printer(>_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology"); in xe_gt_topology_init() 286 struct xe_device *xe = gt_to_xe(gt); in xe_gt_topology_has_dss_in_quadrant()
|
D | xe_gt_sriov_pf_monitor.c | 28 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_monitor_flr() 38 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_update_event_counter() 80 struct xe_device *xe = gt_to_xe(gt); in xe_gt_sriov_pf_monitor_process_guc2pf() 124 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_monitor_print_events()
|
D | xe_force_wake.c | 37 struct xe_device *xe = gt_to_xe(gt); in xe_force_wake_init_gt() 43 xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11); in xe_force_wake_init_gt() 63 xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11); in xe_force_wake_init_engines() 100 if (IS_SRIOV_VF(gt_to_xe(gt))) in __domain_ctl() 111 if (IS_SRIOV_VF(gt_to_xe(gt))) in __domain_wait()
|
D | xe_execlist.c | 47 struct xe_device *xe = gt_to_xe(gt); in __start_lrc() 96 struct xe_device *xe = gt_to_xe(port->hwe->gt); in __xe_execlist_port_start() 298 spin_lock_irq(>_to_xe(port->hwe->gt)->irq.lock); in xe_execlist_port_destroy() 300 spin_unlock_irq(>_to_xe(port->hwe->gt)->irq.lock); in xe_execlist_port_destroy() 335 struct xe_device *xe = gt_to_xe(q->gt); in execlist_exec_queue_init() 352 gt_to_xe(q->gt)->drm.dev); in execlist_exec_queue_init() 384 struct xe_device *xe = gt_to_xe(q->gt); in execlist_exec_queue_fini_async() 472 if (xe_device_uc_enabled(gt_to_xe(gt))) in xe_execlist_init()
|
D | xe_gt_ccs_mode.c | 21 struct xe_device *xe = gt_to_xe(gt); in __xe_gt_apply_ccs_mode() 85 if (!gt->ccs_mode || IS_SRIOV_VF(gt_to_xe(gt))) in xe_gt_apply_ccs_mode() 116 struct xe_device *xe = gt_to_xe(gt); in ccs_mode_store() 191 struct xe_device *xe = gt_to_xe(gt); in xe_gt_ccs_mode_sysfs_init()
|
D | xe_pat.c | 179 struct xe_device *xe = gt_to_xe(gt); in xelp_dump() 208 struct xe_device *xe = gt_to_xe(gt); in xehp_dump() 239 struct xe_device *xe = gt_to_xe(gt); in xehpc_dump() 268 struct xe_device *xe = gt_to_xe(gt); in xelpg_dump() 311 if (IS_DGFX(gt_to_xe(gt))) in xe2lpg_program_pat() 321 if (IS_DGFX(gt_to_xe(gt))) in xe2lpm_program_pat() 327 struct xe_device *xe = gt_to_xe(gt); in xe2_dump() 456 struct xe_device *xe = gt_to_xe(gt); in xe_pat_init() 469 struct xe_device *xe = gt_to_xe(gt); in xe_pat_dump()
|
D | xe_gt_sriov_pf_config.c | 224 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_pick_vf_config() 225 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); in pf_pick_vf_config() 328 struct xe_device *xe = gt_to_xe(gt); in pf_get_ggtt_alignment() 345 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_get_spare_ggtt() 357 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_set_spare_ggtt() 412 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_provision_vf_ggtt() 680 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_get_spare_ctxs() 691 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_set_spare_ctxs() 978 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_get_spare_dbs() 989 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_set_spare_dbs() [all …]
|
D | xe_gt_printk.h | 14 drm_##_level(>_to_xe(_gt)->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) 38 drm_WARN(>_to_xe(_gt)->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) 41 drm_WARN_ONCE(>_to_xe(_gt)->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
|
D | xe_gt_sriov_pf_debugfs.c | 114 struct xe_device *xe = gt_to_xe(gt); \ 176 struct xe_device *xe = gt_to_xe(gt); \ 230 struct xe_device *xe = gt_to_xe(gt); in set_threshold() 280 if (IS_DGFX(gt_to_xe(gt))) in MAKE_XE_GUC_KLV_THRESHOLDS_SET() 322 struct xe_device *xe = gt_to_xe(gt); in control_write() 387 struct xe_device *xe = gt_to_xe(gt); in xe_gt_sriov_pf_debugfs_register()
|
D | xe_gt_pagefault.c | 205 struct xe_device *xe = gt_to_xe(gt); in handle_pagefault() 324 struct xe_device *xe = gt_to_xe(gt); in xe_guc_pagefault_handler() 362 struct xe_device *xe = gt_to_xe(gt); in pf_queue_work_func() 404 struct xe_device *xe = gt_to_xe(gt); in pagefault_fini() 415 struct xe_device *xe = gt_to_xe(gt); in xe_alloc_pf_queue() 444 struct xe_device *xe = gt_to_xe(gt); in xe_gt_pagefault_init() 479 struct xe_device *xe = gt_to_xe(gt); in xe_gt_pagefault_reset() 548 struct xe_device *xe = gt_to_xe(gt); in handle_acc() 633 struct xe_device *xe = gt_to_xe(gt); in acc_queue_work_func() 689 drm_warn(>_to_xe(gt)->drm, "ACC Queue full, dropping ACC"); in xe_guc_access_counter_notify_handler()
|
D | xe_hw_engine.c | 350 if (GRAPHICS_VER(gt_to_xe(gt)) < 20) in xe_rtp_cfeg_wmtp_disabled() 408 struct xe_device *xe = gt_to_xe(gt); in hw_engine_setup_default_state() 529 xe_reg_sr_init(&hwe->reg_sr, hwe->name, gt_to_xe(gt)); in hw_engine_init_early() 534 xe_reg_sr_init(&hwe->reg_whitelist, hwe->name, gt_to_xe(gt)); in hw_engine_init_early() 541 struct xe_device *xe = gt_to_xe(gt); in hw_engine_init() 607 struct xe_device *xe = gt_to_xe(gt); in read_media_fuses() 652 struct xe_device *xe = gt_to_xe(gt); in read_copy_fuses() 677 struct xe_device *xe = gt_to_xe(gt); in read_compute_fuses_from_dss() 704 struct xe_device *xe = gt_to_xe(gt); in read_compute_fuses_from_reg() 723 if (GRAPHICS_VER(gt_to_xe(gt)) >= 20) in read_compute_fuses() [all …]
|
D | xe_gt_sriov_pf.c | 33 gt->sriov.pf.vfs = drmm_kcalloc(>_to_xe(gt)->drm, 1 + num_vfs, in pf_alloc_metadata() 86 if (pf_needs_enable_ggtt_guest_update(gt_to_xe(gt))) in xe_gt_sriov_pf_init_hw()
|
D | xe_gsc.c | 47 struct xe_device *xe = gt_to_xe(gt); in memcpy_fw() 129 struct xe_device *xe = gt_to_xe(gt); in query_compatibility_version() 202 struct xe_device *xe = gt_to_xe(gt); in gsc_upload() 342 xe_device_declare_wedged(gt_to_xe(gt)); in gsc_er_complete() 353 struct xe_device *xe = gt_to_xe(gt); in gsc_work() 463 struct xe_device *xe = gt_to_xe(gt); in xe_gsc_init_post_hwconfig() 520 struct xe_device *xe = gt_to_xe(gt); in xe_gsc_load_start()
|
D | xe_gt.h | 81 xe_device_uc_enabled(gt_to_xe(gt)); in xe_gt_has_indirect_ring_state() 91 struct xe_device *xe = gt_to_xe(gt); in xe_gt_is_usm_hwe()
|
D | xe_huc.c | 37 return gt_to_xe(huc_to_gt(huc)); in huc_to_xe() 50 struct xe_device *xe = gt_to_xe(gt); in huc_alloc_gsc_pkt() 70 struct xe_device *xe = gt_to_xe(gt); in xe_huc_init() 156 struct xe_device *xe = gt_to_xe(gt); in huc_auth_via_gsccs()
|
D | xe_gt_throttle.c | 42 xe_pm_runtime_get(gt_to_xe(gt)); in xe_gt_throttle_get_limit_reasons() 47 xe_pm_runtime_put(gt_to_xe(gt)); in xe_gt_throttle_get_limit_reasons() 241 struct xe_device *xe = gt_to_xe(gt); in xe_gt_throttle_init()
|
D | xe_mocs.c | 262 struct xe_device *xe = gt_to_xe(gt); in regs_are_mcr() 741 get_mocs_settings(gt_to_xe(gt), &table); in xe_mocs_init_early() 751 if (IS_SRIOV_VF(gt_to_xe(gt))) in xe_mocs_init() 762 flags = get_mocs_settings(gt_to_xe(gt), &table); in xe_mocs_init() 765 if (IS_SRIOV_VF(gt_to_xe(gt))) in xe_mocs_init() 779 struct xe_device *xe = gt_to_xe(gt); in xe_mocs_dump()
|