Lines Matching refs:kvm_vm
53 struct kvm_vm *vm;
77 struct kvm_vm { struct
144 memslot2region(struct kvm_vm *vm, uint32_t memslot);
146 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, in vm_get_mem_region()
293 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } in static_assert_is_vm()
356 static inline int vm_check_cap(struct kvm_vm *vm, long cap) in vm_check_cap()
364 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) in __vm_enable_cap()
370 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) in vm_enable_cap()
377 static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, in vm_set_memory_attributes()
398 static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa, in vm_mem_set_private()
404 static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa, in vm_mem_set_shared()
410 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
413 static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa, in vm_guest_mem_punch_hole()
419 static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa, in vm_guest_mem_allocate()
425 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
428 void kvm_vm_free(struct kvm_vm *vmp);
429 void kvm_vm_restart(struct kvm_vm *vmp);
430 void kvm_vm_release(struct kvm_vm *vmp);
431 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
434 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
436 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) in kvm_vm_get_dirty_log()
443 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, in kvm_vm_clear_dirty_log()
456 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) in kvm_vm_reset_dirty_ring()
461 static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm, in kvm_vm_register_coalesced_io()
474 static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm, in kvm_vm_unregister_coalesced_io()
487 static inline int vm_get_stats_fd(struct kvm_vm *vm) in vm_get_stats_fd()
534 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
537 static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name) in vm_get_stat()
545 void vm_create_irqchip(struct kvm_vm *vm);
547 static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, in __vm_create_guest_memfd()
558 static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, in vm_create_guest_memfd()
567 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
569 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
571 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
574 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
578 void vm_userspace_mem_region_add(struct kvm_vm *vm,
582 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
587 static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) in vm_arch_has_protected_memory()
593 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
594 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
595 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
596 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
597 void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
598 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
599 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
600 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
602 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
605 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
606 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
608 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
610 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
612 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
613 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
614 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
615 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
621 static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa) in vm_untag_gpa()
821 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
822 int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
824 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type) in kvm_create_device()
853 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
854 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
861 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
862 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
866 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
868 vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
871 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
873 static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, in vm_phy_pages_alloc()
891 struct kvm_vm *____vm_create(struct vm_shape shape);
892 struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
895 static inline struct kvm_vm *vm_create_barebones(void) in vm_create_barebones()
900 static inline struct kvm_vm *vm_create_barebones_type(unsigned long type) in vm_create_barebones_type()
910 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus) in vm_create()
915 struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
919 static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus, in vm_create_with_vcpus()
928 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
937 static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, in __vm_create_with_one_vcpu()
945 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, in vm_create_with_one_vcpu()
951 static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape, in vm_create_shape_with_one_vcpu()
958 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
965 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
1022 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
1025 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, in vm_vcpu_add()
1036 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
1038 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm, in vm_vcpu_recreate()
1046 void virt_arch_pgd_alloc(struct kvm_vm *vm);
1048 static inline void virt_pgd_alloc(struct kvm_vm *vm) in virt_pgd_alloc()
1069 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
1071 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) in virt_pg_map()
1092 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
1094 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2gpa()
1114 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
1116 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in virt_dump()
1122 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm) in __vm_disable_nx_huge_pages()
1134 void kvm_arch_vm_post_create(struct kvm_vm *vm);
1136 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);