/linux-6.12.1/drivers/gpu/drm/i915/gvt/ |
D | vgpu.c | 39 void populate_pvinfo_page(struct intel_vgpu *vgpu) in populate_pvinfo_page() argument 41 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in populate_pvinfo_page() 43 vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC; in populate_pvinfo_page() 44 vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1; in populate_pvinfo_page() 45 vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0; in populate_pvinfo_page() 46 vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0; in populate_pvinfo_page() 47 vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id; in populate_pvinfo_page() 49 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT; in populate_pvinfo_page() 50 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION; in populate_pvinfo_page() 51 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT; in populate_pvinfo_page() [all …]
|
D | display.c | 46 static int get_edp_pipe(struct intel_vgpu *vgpu) in get_edp_pipe() argument 48 u32 data = vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP); in get_edp_pipe() 66 static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) in edp_pipe_is_enabled() argument 68 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in edp_pipe_is_enabled() 70 if (!(vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_EDP)) & TRANSCONF_ENABLE)) in edp_pipe_is_enabled() 73 if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE)) in edp_pipe_is_enabled() 78 int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) in pipe_is_enabled() argument 80 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in pipe_is_enabled() 86 if (vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, pipe)) & TRANSCONF_ENABLE) in pipe_is_enabled() 89 if (edp_pipe_is_enabled(vgpu) && in pipe_is_enabled() [all …]
|
D | aperture_gm.c | 42 static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) in alloc_gm() argument 44 struct intel_gvt *gvt = vgpu->gvt; in alloc_gm() 52 node = &vgpu->gm.high_gm_node; in alloc_gm() 53 size = vgpu_hidden_sz(vgpu); in alloc_gm() 58 node = &vgpu->gm.low_gm_node; in alloc_gm() 59 size = vgpu_aperture_sz(vgpu); in alloc_gm() 80 static int alloc_vgpu_gm(struct intel_vgpu *vgpu) in alloc_vgpu_gm() argument 82 struct intel_gvt *gvt = vgpu->gvt; in alloc_vgpu_gm() 86 ret = alloc_gm(vgpu, false); in alloc_vgpu_gm() 90 ret = alloc_gm(vgpu, true); in alloc_vgpu_gm() [all …]
|
D | cfg_space.c | 60 * @vgpu: target vgpu 69 static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, in vgpu_pci_cfg_mem_write() argument 72 u8 *cfg_base = vgpu_cfg_space(vgpu); in vgpu_pci_cfg_mem_write() 97 if (off == vgpu->cfg_space.pmcsr_off && vgpu->cfg_space.pmcsr_off) { in vgpu_pci_cfg_mem_write() 98 pwr = (pci_power_t __force)(*(u16*)(&vgpu_cfg_space(vgpu)[off]) in vgpu_pci_cfg_mem_write() 101 vgpu->d3_entered = true; in vgpu_pci_cfg_mem_write() 102 gvt_dbg_core("vgpu-%d power status changed to %d\n", in vgpu_pci_cfg_mem_write() 103 vgpu->id, pwr); in vgpu_pci_cfg_mem_write() 108 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read 109 * @vgpu: target vgpu [all …]
|
D | mmio.c | 47 * @vgpu: a vGPU 53 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) in intel_vgpu_gpa_to_mmio_offset() argument 55 u64 gttmmio_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0); in intel_vgpu_gpa_to_mmio_offset() 66 static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, u64 pa, in failsafe_emulate_mmio_rw() argument 73 if (!vgpu || !p_data) in failsafe_emulate_mmio_rw() 76 gvt = vgpu->gvt; in failsafe_emulate_mmio_rw() 77 mutex_lock(&vgpu->vgpu_lock); in failsafe_emulate_mmio_rw() 78 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); in failsafe_emulate_mmio_rw() 81 intel_vgpu_default_mmio_read(vgpu, offset, p_data, in failsafe_emulate_mmio_rw() 84 intel_vgpu_default_mmio_write(vgpu, offset, p_data, in failsafe_emulate_mmio_rw() [all …]
|
D | gvt.h | 79 /* GM resources owned by a vGPU */ 89 /* Fences owned by a vGPU */ 112 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) argument 125 #define vgpu_opregion(vgpu) (&(vgpu->opregion)) argument 145 int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); 146 void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); 147 void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); 193 * scheduler structure. So below 2 vgpu data are protected 306 * A vGPU with a weight of 8 will get twice as much GPU as a vGPU with 307 * a weight of 4 on a contended host, different vGPU type has different [all …]
|
D | kvmgt.c | 71 size_t (*rw)(struct intel_vgpu *vgpu, char *buf, 73 void (*release)(struct intel_vgpu *vgpu, 97 struct intel_vgpu *vgpu; member 128 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_unpin_guest_page() argument 131 vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT, in gvt_unpin_guest_page() 136 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_pin_guest_page() argument 152 ret = vfio_pin_pages(&vgpu->vfio_device, cur_iova, 1, in gvt_pin_guest_page() 173 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); in gvt_pin_guest_page() 177 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_dma_map_page() argument 180 struct device *dev = vgpu->gvt->gt->i915->drm.dev; in gvt_dma_map_page() [all …]
|
D | sched_policy.c | 37 static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) in vgpu_has_pending_workload() argument 42 for_each_engine(engine, vgpu->gvt->gt, i) { in vgpu_has_pending_workload() 43 if (!list_empty(workload_q_head(vgpu, engine))) in vgpu_has_pending_workload() 50 /* We give 2 seconds higher prio for vGPU during start */ 55 struct intel_vgpu *vgpu; member 75 static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time) in vgpu_update_timeslice() argument 80 if (!vgpu || vgpu == vgpu->gvt->idle_vgpu) in vgpu_update_timeslice() 83 vgpu_data = vgpu->sched_data; in vgpu_update_timeslice() 150 * stop dispatching workload for current vgpu in try_to_schedule_next_vgpu() 165 /* switch current vgpu */ in try_to_schedule_next_vgpu() [all …]
|
D | edid.c | 52 static unsigned char edid_get_byte(struct intel_vgpu *vgpu) in edid_get_byte() argument 54 struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid; in edid_get_byte() 71 if (intel_vgpu_has_monitor_on_port(vgpu, edid->port)) { in edid_get_byte() 73 intel_vgpu_port(vgpu, edid->port)->edid; in edid_get_byte() 129 static void reset_gmbus_controller(struct intel_vgpu *vgpu) in reset_gmbus_controller() argument 131 vgpu_vreg_t(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY; in reset_gmbus_controller() 132 if (!vgpu->display.i2c_edid.edid_available) in reset_gmbus_controller() 133 vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER; in reset_gmbus_controller() 134 vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE; in reset_gmbus_controller() 138 static int gmbus0_mmio_write(struct intel_vgpu *vgpu, in gmbus0_mmio_write() argument [all …]
|
D | gtt.c | 57 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) in intel_gvt_ggtt_validate_range() argument 60 return vgpu_gmadr_is_valid(vgpu, addr); in intel_gvt_ggtt_validate_range() 62 if (vgpu_gmadr_is_aperture(vgpu, addr) && in intel_gvt_ggtt_validate_range() 63 vgpu_gmadr_is_aperture(vgpu, addr + size - 1)) in intel_gvt_ggtt_validate_range() 65 else if (vgpu_gmadr_is_hidden(vgpu, addr) && in intel_gvt_ggtt_validate_range() 66 vgpu_gmadr_is_hidden(vgpu, addr + size - 1)) in intel_gvt_ggtt_validate_range() 75 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) in intel_gvt_ggtt_gmadr_g2h() argument 77 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in intel_gvt_ggtt_gmadr_g2h() 79 if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr), in intel_gvt_ggtt_gmadr_g2h() 83 if (vgpu_gmadr_is_aperture(vgpu, g_addr)) in intel_gvt_ggtt_gmadr_g2h() [all …]
|
D | handlers.c | 93 static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset, in read_vreg() argument 96 memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); in read_vreg() 99 static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset, in write_vreg() argument 102 memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes); in write_vreg() 179 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason) in enter_failsafe_mode() argument 194 pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id); in enter_failsafe_mode() 195 vgpu->failsafe = true; in enter_failsafe_mode() 198 static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, in sanitize_fence_mmio_access() argument 201 unsigned int max_fence = vgpu_fence_sz(vgpu); in sanitize_fence_mmio_access() 209 * and we will let vgpu enter failsafe mode. in sanitize_fence_mmio_access() [all …]
|
D | debugfs.c | 29 struct intel_vgpu *vgpu; member 66 vreg = vgpu_vreg(param->vgpu, offset); in mmio_diff_handler() 86 struct intel_vgpu *vgpu = s->private; in vgpu_mmio_diff_show() local 87 struct intel_gvt *gvt = vgpu->gvt; in vgpu_mmio_diff_show() 89 .vgpu = vgpu, in vgpu_mmio_diff_show() 111 seq_printf(s, "%-8s %-8s %-8s %-8s\n", "Offset", "HW", "vGPU", "Diff"); in vgpu_mmio_diff_show() 129 struct intel_vgpu *vgpu = (struct intel_vgpu *)data; in vgpu_scan_nonprivbb_get() local 131 *val = vgpu->scan_nonprivbb; in vgpu_scan_nonprivbb_get() 136 * set/unset bit engine_id of vgpu->scan_nonprivbb to turn on/off scanning 138 * if vgpu->scan_nonprivbb=3, then it will scan non-privileged batch buffer [all …]
|
D | page_track.c | 28 * @vgpu: a vGPU 35 struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_find_page_track() argument 37 return radix_tree_lookup(&vgpu->page_track_tree, gfn); in intel_vgpu_find_page_track() 42 * @vgpu: a vGPU 50 int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn, in intel_vgpu_register_page_track() argument 56 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_register_page_track() 67 ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track); in intel_vgpu_register_page_track() 78 * @vgpu: a vGPU 82 void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu, in intel_vgpu_unregister_page_track() argument 87 track = radix_tree_delete(&vgpu->page_track_tree, gfn); in intel_vgpu_unregister_page_track() [all …]
|
D | scheduler.c | 87 struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915; in sr_oa_regs() 129 struct intel_vgpu *vgpu = workload->vgpu; in populate_shadow_context() local 130 struct intel_gvt *gvt = vgpu->gvt; in populate_shadow_context() 138 struct intel_vgpu_submission *s = &vgpu->submission; in populate_shadow_context() 153 intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \ in populate_shadow_context() 156 intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \ in populate_shadow_context() 170 intel_gvt_read_gpa(vgpu, in populate_shadow_context() 181 intel_gvt_read_gpa(vgpu, in populate_shadow_context() 226 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, in populate_shadow_context() 248 intel_gvt_read_gpa(vgpu, gpa_base, dst, gpa_size); in populate_shadow_context() [all …]
|
D | interrupt.c | 70 static void update_upstream_irq(struct intel_vgpu *vgpu, 182 * @vgpu: a vGPU 194 int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu, in intel_vgpu_reg_imr_handler() argument 197 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_reg_imr_handler() 201 trace_write_ir(vgpu->id, "IMR", reg, imr, vgpu_vreg(vgpu, reg), in intel_vgpu_reg_imr_handler() 202 (vgpu_vreg(vgpu, reg) ^ imr)); in intel_vgpu_reg_imr_handler() 204 vgpu_vreg(vgpu, reg) = imr; in intel_vgpu_reg_imr_handler() 206 ops->check_pending_irq(vgpu); in intel_vgpu_reg_imr_handler() 213 * @vgpu: a vGPU 224 int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu, in intel_vgpu_reg_master_irq_handler() argument [all …]
|
D | dmabuf.c | 49 struct intel_vgpu *vgpu; in vgpu_gem_get_pages() local 65 vgpu = fb_info->obj->vgpu; in vgpu_gem_get_pages() 66 if (drm_WARN_ON(&dev_priv->drm, !vgpu)) in vgpu_gem_get_pages() 83 if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) { in vgpu_gem_get_pages() 102 intel_gvt_dma_unmap_guest_page(vgpu, dma_addr); in vgpu_gem_get_pages() 120 struct intel_vgpu *vgpu = obj->vgpu; in vgpu_gem_put_pages() local 124 intel_gvt_dma_unmap_guest_page(vgpu, in vgpu_gem_put_pages() 136 struct intel_vgpu *vgpu = obj->vgpu; in dmabuf_gem_object_free() local 140 if (vgpu && test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status) && in dmabuf_gem_object_free() 141 !list_empty(&vgpu->dmabuf_obj_list_head)) { in dmabuf_gem_object_free() [all …]
|
D | execlist.c | 93 struct intel_vgpu *vgpu = execlist->vgpu; in emulate_execlist_status() local 98 status.ldw = vgpu_vreg(vgpu, status_reg); in emulate_execlist_status() 99 status.udw = vgpu_vreg(vgpu, status_reg + 4); in emulate_execlist_status() 117 vgpu_vreg(vgpu, status_reg) = status.ldw; in emulate_execlist_status() 118 vgpu_vreg(vgpu, status_reg + 4) = status.udw; in emulate_execlist_status() 120 gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n", in emulate_execlist_status() 121 vgpu->id, status_reg, status.ldw, status.udw); in emulate_execlist_status() 128 struct intel_vgpu *vgpu = execlist->vgpu; in emulate_csb_update() local 139 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); in emulate_csb_update() 152 vgpu_vreg(vgpu, offset) = status->ldw; in emulate_csb_update() [all …]
|
D | fb_decoder.c | 153 static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, in intel_vgpu_get_stride() argument 156 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in intel_vgpu_get_stride() 158 u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(dev_priv, pipe)) & stride_mask; in intel_vgpu_get_stride() 189 static int get_active_pipe(struct intel_vgpu *vgpu) in get_active_pipe() argument 194 if (pipe_is_enabled(vgpu, i)) in get_active_pipe() 202 * @vgpu: input vgpu 209 int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, in intel_vgpu_decode_primary_plane() argument 212 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in intel_vgpu_decode_primary_plane() 216 pipe = get_active_pipe(vgpu); in intel_vgpu_decode_primary_plane() 220 val = vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe)); in intel_vgpu_decode_primary_plane() [all …]
|
D | opregion.c | 215 * @vgpu: a vGPU 220 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu) in intel_vgpu_init_opregion() argument 227 gvt_dbg_core("init vgpu%d opregion\n", vgpu->id); in intel_vgpu_init_opregion() 228 vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | in intel_vgpu_init_opregion() 231 if (!vgpu_opregion(vgpu)->va) { in intel_vgpu_init_opregion() 232 gvt_err("fail to get memory for vgpu virt opregion\n"); in intel_vgpu_init_opregion() 237 buf = (u8 *)vgpu_opregion(vgpu)->va; in intel_vgpu_init_opregion() 261 * @vgpu: a vGPU 267 int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa) in intel_vgpu_opregion_base_write_handler() argument 275 vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; in intel_vgpu_opregion_base_write_handler() [all …]
|
D | display.h | 47 #define intel_vgpu_port(vgpu, port) \ argument 48 (&(vgpu->display.ports[port])) 50 #define intel_vgpu_has_monitor_on_port(vgpu, port) \ argument 51 (intel_vgpu_port(vgpu, port)->edid && \ 52 intel_vgpu_port(vgpu, port)->edid->data_valid) 54 #define intel_vgpu_port_is_dp(vgpu, port) \ argument 55 ((intel_vgpu_port(vgpu, port)->type == GVT_DP_A) || \ 56 (intel_vgpu_port(vgpu, port)->type == GVT_DP_B) || \ 57 (intel_vgpu_port(vgpu, port)->type == GVT_DP_C) || \ 58 (intel_vgpu_port(vgpu, port)->type == GVT_DP_D)) [all …]
|
D | gtt.h | 63 struct intel_vgpu *vgpu); 69 struct intel_vgpu *vgpu); 151 struct intel_vgpu *vgpu; member 182 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, 218 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); 219 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); 220 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old); 221 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); 226 struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, 240 /* Represent a vgpu shadow page table. */ [all …]
|
D | mmio.h | 83 int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); 84 void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr); 85 void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); 87 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); 89 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, 91 int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, 94 int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, 96 int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 102 int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, 105 int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
D | sched_policy.h | 43 int (*init_vgpu)(struct intel_vgpu *vgpu); 44 void (*clean_vgpu)(struct intel_vgpu *vgpu); 45 void (*start_schedule)(struct intel_vgpu *vgpu); 46 void (*stop_schedule)(struct intel_vgpu *vgpu); 55 int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu); 57 void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu); 59 void intel_vgpu_start_schedule(struct intel_vgpu *vgpu); 61 void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
|
D | scheduler.h | 86 struct intel_vgpu *vgpu; member 134 #define workload_q_head(vgpu, e) \ argument 135 (&(vgpu)->submission.workload_q_head[(e)->id]) 143 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu); 145 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu); 147 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, 150 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); 152 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, 160 intel_vgpu_create_workload(struct intel_vgpu *vgpu, 166 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
|
/linux-6.12.1/drivers/gpu/drm/i915/ |
D | intel_gvt.c | 38 * virtual machine is presented a virtual GPU (vGPU), which has equivalent 72 struct i915_virtual_gpu *vgpu = &dev_priv->vgpu; in free_initial_hw_state() local 74 vfree(vgpu->initial_mmio); in free_initial_hw_state() 75 vgpu->initial_mmio = NULL; in free_initial_hw_state() 77 kfree(vgpu->initial_cfg_space); in free_initial_hw_state() 78 vgpu->initial_cfg_space = NULL; in free_initial_hw_state() 107 struct i915_virtual_gpu *vgpu = &dev_priv->vgpu; in save_initial_hw_state() local 116 vgpu->initial_cfg_space = mem; in save_initial_hw_state() 127 vgpu->initial_mmio = mem; in save_initial_hw_state() 130 iter.data = vgpu->initial_mmio; in save_initial_hw_state() [all …]
|