/linux-6.12.1/drivers/gpu/drm/xe/display/ |
D | xe_display.c | 35 static bool has_display(struct xe_device *xe) in has_display() argument 37 return HAS_DISPLAY(xe); in has_display() 71 static void unset_display_features(struct xe_device *xe) in unset_display_features() argument 73 xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC); in unset_display_features() 78 struct xe_device *xe = to_xe_device(dev); in display_destroy() local 80 destroy_workqueue(xe->display.hotplug.dp_wq); in display_destroy() 94 int xe_display_create(struct xe_device *xe) in xe_display_create() argument 96 spin_lock_init(&xe->display.fb_tracking.lock); in xe_display_create() 98 xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0); in xe_display_create() 100 return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL); in xe_display_create() [all …]
|
D | xe_display.h | 17 void xe_display_driver_remove(struct xe_device *xe); 19 int xe_display_create(struct xe_device *xe); 21 int xe_display_probe(struct xe_device *xe); 23 int xe_display_init_nommio(struct xe_device *xe); 24 int xe_display_init_noirq(struct xe_device *xe); 25 int xe_display_init_noaccel(struct xe_device *xe); 26 int xe_display_init(struct xe_device *xe); 27 void xe_display_fini(struct xe_device *xe); 29 void xe_display_register(struct xe_device *xe); 30 void xe_display_unregister(struct xe_device *xe); [all …]
|
D | xe_hdcp_gsc.c | 33 bool intel_hdcp_gsc_cs_required(struct xe_device *xe) in intel_hdcp_gsc_cs_required() argument 35 return DISPLAY_VER(xe) >= 14; in intel_hdcp_gsc_cs_required() 38 bool intel_hdcp_gsc_check_status(struct xe_device *xe) in intel_hdcp_gsc_check_status() argument 40 struct xe_tile *tile = xe_device_get_root_tile(xe); in intel_hdcp_gsc_check_status() 46 drm_dbg_kms(&xe->drm, in intel_hdcp_gsc_check_status() 51 xe_pm_runtime_get(xe); in intel_hdcp_gsc_check_status() 53 drm_dbg_kms(&xe->drm, in intel_hdcp_gsc_check_status() 64 xe_pm_runtime_put(xe); in intel_hdcp_gsc_check_status() 69 static int intel_hdcp_gsc_initialize_message(struct xe_device *xe, in intel_hdcp_gsc_initialize_message() argument 77 bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2, in intel_hdcp_gsc_initialize_message() [all …]
|
/linux-6.12.1/drivers/gpu/drm/xe/ |
D | xe_pm.c | 89 bool xe_rpm_reclaim_safe(const struct xe_device *xe) in xe_rpm_reclaim_safe() argument 91 return !xe->d3cold.capable && !xe->info.has_sriov; in xe_rpm_reclaim_safe() 94 static void xe_rpm_lockmap_acquire(const struct xe_device *xe) in xe_rpm_lockmap_acquire() argument 96 lock_map_acquire(xe_rpm_reclaim_safe(xe) ? in xe_rpm_lockmap_acquire() 101 static void xe_rpm_lockmap_release(const struct xe_device *xe) in xe_rpm_lockmap_release() argument 103 lock_map_release(xe_rpm_reclaim_safe(xe) ? in xe_rpm_lockmap_release() 114 int xe_pm_suspend(struct xe_device *xe) in xe_pm_suspend() argument 120 drm_dbg(&xe->drm, "Suspending device\n"); in xe_pm_suspend() 121 trace_xe_pm_suspend(xe, __builtin_return_address(0)); in xe_pm_suspend() 123 for_each_gt(gt, xe, id) in xe_pm_suspend() [all …]
|
D | xe_device.c | 64 struct xe_device *xe = to_xe_device(dev); in xe_file_open() local 82 xef->xe = xe; in xe_file_open() 145 struct xe_device *xe = to_xe_device(dev); in xe_file_close() local 151 xe_pm_runtime_get(xe); in xe_file_close() 170 xe_pm_runtime_put(xe); in xe_file_close() 196 struct xe_device *xe = to_xe_device(file_priv->minor->dev); in xe_drm_ioctl() local 199 if (xe_device_wedged(xe)) in xe_drm_ioctl() 202 ret = xe_pm_runtime_get_ioctl(xe); in xe_drm_ioctl() 205 xe_pm_runtime_put(xe); in xe_drm_ioctl() 214 struct xe_device *xe = to_xe_device(file_priv->minor->dev); in xe_drm_compat_ioctl() local [all …]
|
D | xe_irq.c | 88 static u32 xelp_intr_disable(struct xe_device *xe) in xelp_intr_disable() argument 90 struct xe_gt *mmio = xe_root_mmio_gt(xe); in xelp_intr_disable() 104 gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl) in gu_misc_irq_ack() argument 106 struct xe_gt *mmio = xe_root_mmio_gt(xe); in gu_misc_irq_ack() 119 static inline void xelp_intr_enable(struct xe_device *xe, bool stall) in xelp_intr_enable() argument 121 struct xe_gt *mmio = xe_root_mmio_gt(xe); in xelp_intr_enable() 131 struct xe_device *xe = gt_to_xe(gt); in xe_irq_enable_hwe() local 137 if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) in xe_irq_enable_hwe() 140 if (xe_device_uc_enabled(xe)) { in xe_irq_enable_hwe() 179 if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) { in xe_irq_enable_hwe() [all …]
|
D | xe_sriov.c | 36 static bool test_is_vf(struct xe_device *xe) in test_is_vf() argument 38 u32 value = xe_mmio_read32(xe_root_mmio_gt(xe), VF_CAP_REG); in test_is_vf() 54 void xe_sriov_probe_early(struct xe_device *xe) in xe_sriov_probe_early() argument 56 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_sriov_probe_early() 58 bool has_sriov = xe->info.has_sriov; in xe_sriov_probe_early() 61 if (test_is_vf(xe)) in xe_sriov_probe_early() 63 else if (xe_sriov_pf_readiness(xe)) in xe_sriov_probe_early() 73 drm_info(&xe->drm, "Support for SR-IOV is not available\n"); in xe_sriov_probe_early() 77 xe_assert(xe, !xe->sriov.__mode); in xe_sriov_probe_early() 78 xe->sriov.__mode = mode; in xe_sriov_probe_early() [all …]
|
D | xe_pci_sriov.c | 27 static int pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs) in pf_provision_vfs() argument 33 for_each_gt(gt, xe, id) { in pf_provision_vfs() 43 static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs) in pf_unprovision_vfs() argument 49 for_each_gt(gt, xe, id) in pf_unprovision_vfs() 54 static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs) in pf_reset_vfs() argument 60 for_each_gt(gt, xe, id) in pf_reset_vfs() 65 static int pf_enable_vfs(struct xe_device *xe, int num_vfs) in pf_enable_vfs() argument 67 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in pf_enable_vfs() 68 int total_vfs = xe_sriov_pf_get_totalvfs(xe); in pf_enable_vfs() 71 xe_assert(xe, IS_SRIOV_PF(xe)); in pf_enable_vfs() [all …]
|
D | xe_pat.c | 151 u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index) in xe_pat_index_get_coh_mode() argument 153 WARN_ON(pat_index >= xe->pat.n_entries); in xe_pat_index_get_coh_mode() 154 return xe->pat.table[pat_index].coh_mode; in xe_pat_index_get_coh_mode() 179 struct xe_device *xe = gt_to_xe(gt); in xelp_dump() local 188 for (i = 0; i < xe->pat.n_entries; i++) { in xelp_dump() 198 xe_assert(xe, !err); in xelp_dump() 208 struct xe_device *xe = gt_to_xe(gt); in xehp_dump() local 217 for (i = 0; i < xe->pat.n_entries; i++) { in xehp_dump() 229 xe_assert(xe, !err); in xehp_dump() 239 struct xe_device *xe = gt_to_xe(gt); in xehpc_dump() local [all …]
|
D | xe_sriov_pf.c | 15 static unsigned int wanted_max_vfs(struct xe_device *xe) in wanted_max_vfs() argument 20 static int pf_reduce_totalvfs(struct xe_device *xe, int limit) in pf_reduce_totalvfs() argument 22 struct device *dev = xe->drm.dev; in pf_reduce_totalvfs() 28 xe_sriov_notice(xe, "Failed to set number of VFs to %d (%pe)\n", in pf_reduce_totalvfs() 33 static bool pf_continue_as_native(struct xe_device *xe, const char *why) in pf_continue_as_native() argument 35 xe_sriov_dbg(xe, "%s, continuing as native\n", why); in pf_continue_as_native() 36 pf_reduce_totalvfs(xe, 0); in pf_continue_as_native() 49 bool xe_sriov_pf_readiness(struct xe_device *xe) in xe_sriov_pf_readiness() argument 51 struct device *dev = xe->drm.dev; in xe_sriov_pf_readiness() 54 int newlimit = min_t(u16, wanted_max_vfs(xe), totalvfs); in xe_sriov_pf_readiness() [all …]
|
D | xe_bo_evict.c | 27 int xe_bo_evict_all(struct xe_device *xe) in xe_bo_evict_all() argument 29 struct ttm_device *bdev = &xe->ttm; in xe_bo_evict_all() 50 if (mem_type == XE_PL_TT && (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe))) in xe_bo_evict_all() 62 spin_lock(&xe->pinned.lock); in xe_bo_evict_all() 64 bo = list_first_entry_or_null(&xe->pinned.external_vram, in xe_bo_evict_all() 70 spin_unlock(&xe->pinned.lock); in xe_bo_evict_all() 77 spin_lock(&xe->pinned.lock); in xe_bo_evict_all() 79 &xe->pinned.external_vram); in xe_bo_evict_all() 80 spin_unlock(&xe->pinned.lock); in xe_bo_evict_all() 84 spin_lock(&xe->pinned.lock); in xe_bo_evict_all() [all …]
|
D | xe_pci.c | 450 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc) in find_subplatform() argument 457 if (*id == xe->info.devid) in find_subplatform() 468 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) in read_gmdid() argument 470 struct xe_gt *gt = xe_root_mmio_gt(xe); in read_gmdid() 474 KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); in read_gmdid() 476 if (IS_SRIOV_VF(xe)) { in read_gmdid() 532 static void handle_pre_gmdid(struct xe_device *xe, in handle_pre_gmdid() argument 536 xe->info.graphics_verx100 = graphics->ver * 100 + graphics->rel; in handle_pre_gmdid() 539 xe->info.media_verx100 = media->ver * 100 + media->rel; in handle_pre_gmdid() 547 static void handle_gmdid(struct xe_device *xe, in handle_gmdid() argument [all …]
|
D | xe_debugfs.c | 39 struct xe_device *xe = node_to_xe(m->private); in info() local 44 xe_pm_runtime_get(xe); in info() 46 drm_printf(&p, "graphics_verx100 %d\n", xe->info.graphics_verx100); in info() 47 drm_printf(&p, "media_verx100 %d\n", xe->info.media_verx100); in info() 49 xe_step_name(xe->info.step.graphics), in info() 50 xe_step_name(xe->info.step.media), in info() 51 xe_step_name(xe->info.step.basedie)); in info() 52 drm_printf(&p, "is_dgfx %s\n", str_yes_no(xe->info.is_dgfx)); in info() 53 drm_printf(&p, "platform %d\n", xe->info.platform); in info() 55 xe->info.subplatform > XE_SUBPLATFORM_NONE ? xe->info.subplatform : 0); in info() [all …]
|
D | xe_ttm_stolen_mgr.c | 55 bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe) in xe_ttm_stolen_cpu_access_needs_ggtt() argument 57 return GRAPHICS_VERx100(xe) < 1270 && !IS_DGFX(xe); in xe_ttm_stolen_cpu_access_needs_ggtt() 60 static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr) in detect_bar2_dgfx() argument 62 struct xe_tile *tile = xe_device_get_root_tile(xe); in detect_bar2_dgfx() 63 struct xe_gt *mmio = xe_root_mmio_gt(xe); in detect_bar2_dgfx() 64 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in detect_bar2_dgfx() 69 tile_offset = tile->mem.vram.io_start - xe->mem.vram.io_start; in detect_bar2_dgfx() 74 if (drm_WARN_ON(&xe->drm, tile_size < mgr->stolen_base)) in detect_bar2_dgfx() 92 static u32 get_wopcm_size(struct xe_device *xe) in get_wopcm_size() argument 97 val = xe_mmio_read64_2x32(xe_root_mmio_gt(xe), STOLEN_RESERVED); in get_wopcm_size() [all …]
|
D | xe_device.h | 32 static inline struct xe_device *xe_device_const_cast(const struct xe_device *xe) in xe_device_const_cast() argument 34 return (struct xe_device *)xe; in xe_device_const_cast() 44 int xe_device_probe_early(struct xe_device *xe); 45 int xe_device_probe(struct xe_device *xe); 46 void xe_device_remove(struct xe_device *xe); 47 void xe_device_shutdown(struct xe_device *xe); 49 void xe_device_wmb(struct xe_device *xe); 56 static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe) in xe_device_get_root_tile() argument 58 return &xe->tiles[0]; in xe_device_get_root_tile() 71 static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id) in xe_device_get_gt() argument [all …]
|
D | xe_sriov_printk.h | 14 #define xe_sriov_printk_prefix(xe) \ argument 15 ((xe)->sriov.__mode == XE_SRIOV_MODE_PF ? "PF: " : \ 16 (xe)->sriov.__mode == XE_SRIOV_MODE_VF ? "VF: " : "") 18 #define xe_sriov_printk(xe, _level, fmt, ...) \ argument 19 drm_##_level(&(xe)->drm, "%s" fmt, xe_sriov_printk_prefix(xe), ##__VA_ARGS__) 21 #define xe_sriov_err(xe, fmt, ...) \ argument 22 xe_sriov_printk((xe), err, fmt, ##__VA_ARGS__) 24 #define xe_sriov_err_ratelimited(xe, fmt, ...) \ argument 25 xe_sriov_printk((xe), err_ratelimited, fmt, ##__VA_ARGS__) 27 #define xe_sriov_warn(xe, fmt, ...) \ argument [all …]
|
D | xe_vram.c | 27 _resize_bar(struct xe_device *xe, int resno, resource_size_t size) in _resize_bar() argument 29 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in _resize_bar() 38 …drm_info(&xe->drm, "Failed to resize BAR%d to %dM (%pe). Consider enabling 'Resizable BAR' support… in _resize_bar() 43 drm_info(&xe->drm, "BAR%d resized to %dM\n", resno, 1 << bar_size); in _resize_bar() 50 static void resize_vram_bar(struct xe_device *xe) in resize_vram_bar() argument 53 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in resize_vram_bar() 78 drm_info(&xe->drm, in resize_vram_bar() 96 drm_info(&xe->drm, "Attempting to resize bar from %lluMiB -> %lluMiB\n", in resize_vram_bar() 109 …drm_info(&xe->drm, "Can't resize VRAM BAR - platform support is missing. Consider enabling 'Resiza… in resize_vram_bar() 116 _resize_bar(xe, LMEM_BAR, rebar_size); in resize_vram_bar() [all …]
|
D | xe_sriov.h | 18 void xe_sriov_probe_early(struct xe_device *xe); 19 void xe_sriov_print_info(struct xe_device *xe, struct drm_printer *p); 20 int xe_sriov_init(struct xe_device *xe); 22 static inline enum xe_sriov_mode xe_device_sriov_mode(const struct xe_device *xe) in xe_device_sriov_mode() argument 24 xe_assert(xe, xe->sriov.__mode); in xe_device_sriov_mode() 25 return xe->sriov.__mode; in xe_device_sriov_mode() 28 static inline bool xe_device_is_sriov_pf(const struct xe_device *xe) in xe_device_is_sriov_pf() argument 30 return xe_device_sriov_mode(xe) == XE_SRIOV_MODE_PF; in xe_device_is_sriov_pf() 33 static inline bool xe_device_is_sriov_vf(const struct xe_device *xe) in xe_device_is_sriov_vf() argument 35 return xe_device_sriov_mode(xe) == XE_SRIOV_MODE_VF; in xe_device_is_sriov_vf() [all …]
|
D | xe_pm.h | 15 int xe_pm_suspend(struct xe_device *xe); 16 int xe_pm_resume(struct xe_device *xe); 18 int xe_pm_init_early(struct xe_device *xe); 19 int xe_pm_init(struct xe_device *xe); 20 void xe_pm_runtime_fini(struct xe_device *xe); 21 bool xe_pm_runtime_suspended(struct xe_device *xe); 22 int xe_pm_runtime_suspend(struct xe_device *xe); 23 int xe_pm_runtime_resume(struct xe_device *xe); 24 void xe_pm_runtime_get(struct xe_device *xe); 25 int xe_pm_runtime_get_ioctl(struct xe_device *xe); [all …]
|
D | xe_heci_gsc.c | 91 void xe_heci_gsc_fini(struct xe_device *xe) in xe_heci_gsc_fini() argument 93 struct xe_heci_gsc *heci_gsc = &xe->heci_gsc; in xe_heci_gsc_fini() 95 if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe)) in xe_heci_gsc_fini() 111 static int heci_gsc_irq_setup(struct xe_device *xe) in heci_gsc_irq_setup() argument 113 struct xe_heci_gsc *heci_gsc = &xe->heci_gsc; in heci_gsc_irq_setup() 118 drm_err(&xe->drm, "gsc irq error %d\n", heci_gsc->irq); in heci_gsc_irq_setup() 124 drm_err(&xe->drm, "gsc irq init failed %d\n", ret); in heci_gsc_irq_setup() 129 static int heci_gsc_add_device(struct xe_device *xe, const struct heci_gsc_def *def) in heci_gsc_add_device() argument 131 struct xe_heci_gsc *heci_gsc = &xe->heci_gsc; in heci_gsc_add_device() 132 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in heci_gsc_add_device() [all …]
|
D | xe_rtp.c | 27 static bool has_samedia(const struct xe_device *xe) in has_samedia() argument 29 return xe->info.media_verx100 >= 1300; in has_samedia() 32 static bool rule_matches(const struct xe_device *xe, in rule_matches() argument 52 match = xe->info.platform == r->platform; in rule_matches() 55 match = xe->info.platform == r->platform && in rule_matches() 56 xe->info.subplatform == r->subplatform; in rule_matches() 59 match = xe->info.graphics_verx100 == r->ver_start && in rule_matches() 60 (!has_samedia(xe) || !xe_gt_is_media_type(gt)); in rule_matches() 63 match = xe->info.graphics_verx100 >= r->ver_start && in rule_matches() 64 xe->info.graphics_verx100 <= r->ver_end && in rule_matches() [all …]
|
D | xe_device_sysfs.c | 32 struct xe_device *xe = pdev_to_xe_device(pdev); in vram_d3cold_threshold_show() local 35 if (!xe) in vram_d3cold_threshold_show() 38 xe_pm_runtime_get(xe); in vram_d3cold_threshold_show() 39 ret = sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold); in vram_d3cold_threshold_show() 40 xe_pm_runtime_put(xe); in vram_d3cold_threshold_show() 50 struct xe_device *xe = pdev_to_xe_device(pdev); in vram_d3cold_threshold_store() local 54 if (!xe) in vram_d3cold_threshold_store() 61 drm_dbg(&xe->drm, "vram_d3cold_threshold: %u\n", vram_d3cold_threshold); in vram_d3cold_threshold_store() 63 xe_pm_runtime_get(xe); in vram_d3cold_threshold_store() 64 ret = xe_pm_set_vram_threshold(xe, vram_d3cold_threshold); in vram_d3cold_threshold_store() [all …]
|
/linux-6.12.1/drivers/gpu/drm/xe/compat-i915-headers/ |
D | i915_drv.h | 24 #define IS_PLATFORM(xe, x) ((xe)->info.platform == x) argument 76 #define IS_MOBILE(xe) (xe && 0) argument 78 #define IS_LP(xe) ((xe) && 0) argument 79 #define IS_GEN9_LP(xe) ((xe) && 0) argument 80 #define IS_GEN9_BC(xe) ((xe) && 0) argument 82 #define IS_TIGERLAKE_UY(xe) (xe && 0) argument 83 #define IS_COMETLAKE_ULX(xe) (xe && 0) argument 84 #define IS_COFFEELAKE_ULX(xe) (xe && 0) argument 85 #define IS_KABYLAKE_ULX(xe) (xe && 0) argument 86 #define IS_SKYLAKE_ULX(xe) (xe && 0) argument [all …]
|
/linux-6.12.1/drivers/gpu/drm/xe/tests/ |
D | xe_migrate.c | 15 static bool sanity_fence_failed(struct xe_device *xe, struct dma_fence *fence, in sanity_fence_failed() argument 37 static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe, in run_sanity_job() argument 41 u64 batch_base = xe_migrate_batch_base(m, xe->info.has_usm); in run_sanity_job() 57 if (sanity_fence_failed(xe, fence, str, test)) in run_sanity_job() 75 struct xe_device *xe = tile_to_xe(m->tile); in test_copy() local 82 struct xe_bo *remote = xe_bo_create_locked(xe, m->tile, NULL, in test_copy() 107 xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size); in test_copy() 110 if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" : in test_copy() 112 retval = xe_map_rd(xe, &remote->vmap, 0, u64); in test_copy() 115 retval = xe_map_rd(xe, &remote->vmap, remote->size - 8, u64); in test_copy() [all …]
|
D | xe_kunit_helpers.c | 36 struct xe_device *xe; in xe_kunit_helper_alloc_xe_device() local 38 xe = drm_kunit_helper_alloc_drm_device(test, dev, in xe_kunit_helper_alloc_xe_device() 41 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); in xe_kunit_helper_alloc_xe_device() 42 return xe; in xe_kunit_helper_alloc_xe_device() 73 struct xe_device *xe; in xe_kunit_helper_xe_device_test_init() local 80 xe = xe_kunit_helper_alloc_xe_device(test, dev); in xe_kunit_helper_xe_device_test_init() 81 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); in xe_kunit_helper_xe_device_test_init() 83 err = xe_pci_fake_device_init(xe); in xe_kunit_helper_xe_device_test_init() 89 test->priv = xe; in xe_kunit_helper_xe_device_test_init() 117 struct xe_device *xe = xe_device_const_cast(test->param_value); in xe_kunit_helper_xe_device_live_test_init() local [all …]
|