Lines Matching full:vgpu
79 /* GM resources owned by a vGPU */
89 /* Fences owned by a vGPU */
112 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) argument
125 #define vgpu_opregion(vgpu) (&(vgpu->opregion)) argument
145 int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
146 void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
147 void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
193 * scheduler structure. So below 2 vgpu data are protected
306 * A vGPU with a weight of 8 will get twice as much GPU as a vGPU with
307 * a weight of 4 on a contended host, different vGPU type has different
323 * not yet protected by special locks(vgpu and scheduler lock).
326 /* scheduler scope lock, protect gvt and vgpu schedule related data */
330 struct idr vgpu_idr; /* vGPU IDR pool */
376 /* per-vGPU vblank emulation request */
421 /* Aperture/GM space definitions for vGPU */
422 #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) argument
423 #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start) argument
424 #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz) argument
425 #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz) argument
427 #define vgpu_aperture_pa_base(vgpu) \ argument
428 (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
430 #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz) argument
432 #define vgpu_aperture_pa_end(vgpu) \ argument
433 (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
435 #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu)) argument
436 #define vgpu_aperture_gmadr_end(vgpu) \ argument
437 (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
439 #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu)) argument
440 #define vgpu_hidden_gmadr_end(vgpu) \ argument
441 (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
443 #define vgpu_fence_sz(vgpu) (vgpu->fence.size) argument
448 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
450 void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
451 void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
452 void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
455 /* Macros for easily accessing vGPU virtual/shadow register.
457 #define vgpu_vreg_t(vgpu, reg) \ argument
458 (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
459 #define vgpu_vreg(vgpu, offset) \ argument
460 (*(u32 *)(vgpu->mmio.vreg + (offset)))
461 #define vgpu_vreg64_t(vgpu, reg) \ argument
462 (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
463 #define vgpu_vreg64(vgpu, offset) \ argument
464 (*(u64 *)(vgpu->mmio.vreg + (offset)))
466 #define for_each_active_vgpu(gvt, vgpu, id) \ argument
467 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
468 for_each_if(test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
470 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, in intel_vgpu_write_pci_bar() argument
477 pval = (u32 *)(vgpu_cfg_space(vgpu) + offset); in intel_vgpu_write_pci_bar()
494 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
495 int intel_gvt_create_vgpu(struct intel_vgpu *vgpu,
497 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
498 void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
499 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
501 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
502 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
503 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
505 int intel_gvt_set_opregion(struct intel_vgpu *vgpu);
506 int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num);
509 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \ argument
510 ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
511 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
513 #define vgpu_gmadr_is_hidden(vgpu, gmadr) \ argument
514 ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
515 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
517 #define vgpu_gmadr_is_valid(vgpu, gmadr) \ argument
518 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
519 (vgpu_gmadr_is_hidden(vgpu, gmadr))))
533 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
534 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
535 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
536 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
538 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
541 void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
543 void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
545 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
548 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
551 void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected);
553 static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar) in intel_vgpu_get_bar_gpa() argument
556 return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & in intel_vgpu_get_bar_gpa()
560 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
561 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
562 int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
564 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
565 void populate_pvinfo_page(struct intel_vgpu *vgpu);
568 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
569 void intel_vgpu_detach_regions(struct intel_vgpu *vgpu);
683 void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
715 * @vgpu: a vGPU
723 static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa, in intel_gvt_read_gpa() argument
726 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) in intel_gvt_read_gpa()
728 return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false); in intel_gvt_read_gpa()
733 * @vgpu: a vGPU
741 static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu, in intel_gvt_write_gpa() argument
744 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) in intel_gvt_write_gpa()
746 return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true); in intel_gvt_write_gpa()
749 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
755 int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr);
756 int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
758 void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,