Lines Matching full:vgpu

87 	struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;  in sr_oa_regs()
129 struct intel_vgpu *vgpu = workload->vgpu; in populate_shadow_context() local
130 struct intel_gvt *gvt = vgpu->gvt; in populate_shadow_context()
138 struct intel_vgpu_submission *s = &vgpu->submission; in populate_shadow_context()
153 intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \ in populate_shadow_context()
156 intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \ in populate_shadow_context()
170 intel_gvt_read_gpa(vgpu, in populate_shadow_context()
181 intel_gvt_read_gpa(vgpu, in populate_shadow_context()
226 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, in populate_shadow_context()
248 intel_gvt_read_gpa(vgpu, gpa_base, dst, gpa_size); in populate_shadow_context()
267 static void save_ring_hw_state(struct intel_vgpu *vgpu, in save_ring_hw_state() argument
274 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = in save_ring_hw_state()
278 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = in save_ring_hw_state()
282 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = in save_ring_hw_state()
301 /* Switch ring from vGPU to host. */ in shadow_context_status_change()
318 if (workload->vgpu != scheduler->engine_owner[ring_id]) { in shadow_context_status_change()
319 /* Switch ring from host to vGPU or vGPU to vGPU. */ in shadow_context_status_change()
321 workload->vgpu, rq->engine); in shadow_context_status_change()
322 scheduler->engine_owner[ring_id] = workload->vgpu; in shadow_context_status_change()
324 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n", in shadow_context_status_change()
325 ring_id, workload->vgpu->id); in shadow_context_status_change()
330 save_ring_hw_state(workload->vgpu, rq->engine); in shadow_context_status_change()
334 save_ring_hw_state(workload->vgpu, rq->engine); in shadow_context_status_change()
363 struct intel_vgpu *vgpu = workload->vgpu; in copy_workload_to_ring_buffer() local
370 intel_vgpu_restore_inhibit_context(vgpu, req); in copy_workload_to_ring_buffer()
461 struct intel_vgpu *vgpu = workload->vgpu; in intel_gvt_workload_req_alloc() local
462 struct intel_vgpu_submission *s = &vgpu->submission; in intel_gvt_workload_req_alloc()
488 struct intel_vgpu *vgpu = workload->vgpu; in intel_gvt_scan_and_shadow_workload() local
489 struct intel_vgpu_submission *s = &vgpu->submission; in intel_gvt_scan_and_shadow_workload()
492 lockdep_assert_held(&vgpu->vgpu_lock); in intel_gvt_scan_and_shadow_workload()
524 struct intel_gvt *gvt = workload->vgpu->gvt; in prepare_shadow_batch_buffer()
652 vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) = in update_vreg_in_ctx()
686 struct intel_vgpu *vgpu = workload->vgpu; in intel_vgpu_shadow_mm_pin() local
692 gvt_vgpu_err("fail to vgpu pin mm\n"); in intel_vgpu_shadow_mm_pin()
739 struct intel_vgpu *vgpu = workload->vgpu; in prepare_workload() local
740 struct intel_vgpu_submission *s = &vgpu->submission; in prepare_workload()
753 ret = intel_vgpu_sync_oos_pages(workload->vgpu); in prepare_workload()
755 gvt_vgpu_err("fail to vgpu sync oos pages\n"); in prepare_workload()
759 ret = intel_vgpu_flush_post_shadow(workload->vgpu); in prepare_workload()
801 struct intel_vgpu *vgpu = workload->vgpu; in dispatch_workload() local
808 mutex_lock(&vgpu->vgpu_lock); in dispatch_workload()
843 mutex_unlock(&vgpu->vgpu_lock); in dispatch_workload()
856 * no current vgpu / will be scheduled out / no workload in pick_next_workload()
860 gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name); in pick_next_workload()
889 * schedule out a vgpu. in pick_next_workload()
900 atomic_inc(&workload->vgpu->submission.running_workload_num); in pick_next_workload()
906 static void update_guest_pdps(struct intel_vgpu *vgpu, in update_guest_pdps() argument
915 intel_gvt_write_gpa(vgpu, gpa + i * 8, &pdp[7 - i], 4); in update_guest_pdps()
939 struct intel_vgpu *vgpu = workload->vgpu; in update_guest_context() local
971 vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail; in update_guest_context()
972 vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head; in update_guest_context()
988 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, in update_guest_context()
1010 intel_gvt_write_gpa(vgpu, gpa_base, src, gpa_size); in update_guest_context()
1016 intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + in update_guest_context()
1026 update_guest_pdps(vgpu, workload->ring_context_gpa, in update_guest_context()
1031 intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + \ in update_guest_context()
1039 intel_gvt_write_gpa(vgpu, in update_guest_context()
1047 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, in intel_vgpu_clean_workloads() argument
1050 struct intel_vgpu_submission *s = &vgpu->submission; in intel_vgpu_clean_workloads()
1056 for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) { in intel_vgpu_clean_workloads()
1071 struct intel_vgpu *vgpu = workload->vgpu; in complete_current_workload() local
1072 struct intel_vgpu_submission *s = &vgpu->submission; in complete_current_workload()
1076 mutex_lock(&vgpu->vgpu_lock); in complete_current_workload()
1100 !(vgpu->resetting_eng & BIT(ring_id))) { in complete_current_workload()
1105 intel_vgpu_trigger_virtual_event(vgpu, event); in complete_current_workload()
1118 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) { in complete_current_workload()
1122 * So this error is a vGPU hang actually to the guest. in complete_current_workload()
1123 * According to this we should emunlate a vGPU hang. If in complete_current_workload()
1132 intel_vgpu_clean_workloads(vgpu, BIT(ring_id)); in complete_current_workload()
1147 mutex_unlock(&vgpu->vgpu_lock); in complete_current_workload()
1157 struct intel_vgpu *vgpu = NULL; in workload_thread() local
1179 gvt_dbg_sched("ring %s next workload %p vgpu %d\n", in workload_thread()
1181 workload->vgpu->id); in workload_thread()
1192 * Update the vReg of the vGPU which submitted this in workload_thread()
1193 * workload. The vGPU may use these registers for checking in workload_thread()
1202 vgpu = workload->vgpu; in workload_thread()
1223 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); in workload_thread()
1228 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu) in intel_gvt_wait_vgpu_idle() argument
1230 struct intel_vgpu_submission *s = &vgpu->submission; in intel_gvt_wait_vgpu_idle()
1231 struct intel_gvt *gvt = vgpu->gvt; in intel_gvt_wait_vgpu_idle()
1235 gvt_dbg_sched("wait vgpu idle\n"); in intel_gvt_wait_vgpu_idle()
1312 * intel_vgpu_clean_submission - free submission-related resource for vGPU
1313 * @vgpu: a vGPU
1315 * This function is called when a vGPU is being destroyed.
1318 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) in intel_vgpu_clean_submission() argument
1320 struct intel_vgpu_submission *s = &vgpu->submission; in intel_vgpu_clean_submission()
1324 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); in intel_vgpu_clean_submission()
1327 for_each_engine(engine, vgpu->gvt->gt, id) in intel_vgpu_clean_submission()
1335 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1336 * @vgpu: a vGPU
1339 * This function is called when a vGPU is being destroyed.
1342 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, in intel_vgpu_reset_submission() argument
1345 struct intel_vgpu_submission *s = &vgpu->submission; in intel_vgpu_reset_submission()
1350 intel_vgpu_clean_workloads(vgpu, engine_mask); in intel_vgpu_reset_submission()
1351 s->ops->reset(vgpu, engine_mask); in intel_vgpu_reset_submission()
1373 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1374 * @vgpu: a vGPU
1376 * This function is called when a vGPU is being created.
1382 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) in intel_vgpu_setup_submission() argument
1384 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in intel_vgpu_setup_submission()
1385 struct intel_vgpu_submission *s = &vgpu->submission; in intel_vgpu_setup_submission()
1397 for_each_engine(engine, vgpu->gvt->gt, i) { in intel_vgpu_setup_submission()
1444 for_each_engine(engine, vgpu->gvt->gt, i) { in intel_vgpu_setup_submission()
1456 * @vgpu: a vGPU
1458 * @interface: expected vGPU virtual submission interface
1466 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, in intel_vgpu_select_submission_ops() argument
1470 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in intel_vgpu_select_submission_ops()
1471 struct intel_vgpu_submission *s = &vgpu->submission; in intel_vgpu_select_submission_ops()
1486 s->ops->clean(vgpu, engine_mask); in intel_vgpu_select_submission_ops()
1492 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id); in intel_vgpu_select_submission_ops()
1496 ret = ops[interface]->init(vgpu, engine_mask); in intel_vgpu_select_submission_ops()
1504 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n", in intel_vgpu_select_submission_ops()
1505 vgpu->id, s->ops->name); in intel_vgpu_select_submission_ops()
1511 * intel_vgpu_destroy_workload - destroy a vGPU workload
1514 * This function is called when destroy a vGPU workload.
1519 struct intel_vgpu_submission *s = &workload->vgpu->submission; in intel_vgpu_destroy_workload()
1542 alloc_workload(struct intel_vgpu *vgpu) in alloc_workload() argument
1544 struct intel_vgpu_submission *s = &vgpu->submission; in alloc_workload()
1559 workload->vgpu = vgpu; in alloc_workload()
1567 static void read_guest_pdps(struct intel_vgpu *vgpu, in read_guest_pdps() argument
1576 intel_gvt_read_gpa(vgpu, in read_guest_pdps()
1584 struct intel_vgpu *vgpu = workload->vgpu; in prepare_mm() local
1600 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps); in prepare_mm()
1602 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps); in prepare_mm()
1614 * intel_vgpu_create_workload - create a vGPU workload
1615 * @vgpu: a vGPU
1619 * This function is called when creating a vGPU workload.
1627 intel_vgpu_create_workload(struct intel_vgpu *vgpu, in intel_vgpu_create_workload() argument
1631 struct intel_vgpu_submission *s = &vgpu->submission; in intel_vgpu_create_workload()
1632 struct list_head *q = workload_q_head(vgpu, engine); in intel_vgpu_create_workload()
1640 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, in intel_vgpu_create_workload()
1647 intel_gvt_read_gpa(vgpu, ring_context_gpa + in intel_vgpu_create_workload()
1650 intel_gvt_read_gpa(vgpu, ring_context_gpa + in intel_vgpu_create_workload()
1677 intel_gvt_read_gpa(vgpu, ring_context_gpa + in intel_vgpu_create_workload()
1679 intel_gvt_read_gpa(vgpu, ring_context_gpa + in intel_vgpu_create_workload()
1681 intel_gvt_read_gpa(vgpu, ring_context_gpa + in intel_vgpu_create_workload()
1684 if (!intel_gvt_ggtt_validate_range(vgpu, start, in intel_vgpu_create_workload()
1690 workload = alloc_workload(vgpu); in intel_vgpu_create_workload()
1704 intel_gvt_read_gpa(vgpu, ring_context_gpa + in intel_vgpu_create_workload()
1706 intel_gvt_read_gpa(vgpu, ring_context_gpa + in intel_vgpu_create_workload()
1716 if (!intel_gvt_ggtt_validate_range(vgpu, in intel_vgpu_create_workload()
1730 if (!intel_gvt_ggtt_validate_range(vgpu, in intel_vgpu_create_workload()
1762 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); in intel_vgpu_create_workload()
1777 * intel_vgpu_queue_workload - Qeue a vGPU workload
1783 workload_q_head(workload->vgpu, workload->engine)); in intel_vgpu_queue_workload()
1784 intel_gvt_kick_schedule(workload->vgpu->gvt); in intel_vgpu_queue_workload()
1785 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]); in intel_vgpu_queue_workload()