Lines Matching refs:kvm

26 static inline void kvm_ptw_prepare(struct kvm *kvm, kvm_ptw_ctx *ctx)  in kvm_ptw_prepare()  argument
28 ctx->level = kvm->arch.root_level; in kvm_ptw_prepare()
30 ctx->invalid_ptes = kvm->arch.invalid_ptes; in kvm_ptw_prepare()
31 ctx->pte_shifts = kvm->arch.pte_shifts; in kvm_ptw_prepare()
34 ctx->opaque = kvm; in kvm_ptw_prepare()
90 struct kvm *kvm; in kvm_flush_pte() local
92 kvm = ctx->opaque; in kvm_flush_pte()
94 kvm->stat.hugepages--; in kvm_flush_pte()
96 kvm->stat.pages--; in kvm_flush_pte()
149 static kvm_pte_t *kvm_populate_gpa(struct kvm *kvm, in kvm_populate_gpa() argument
156 kvm_ptw_prepare(kvm, &ctx); in kvm_populate_gpa()
157 child = kvm->arch.pgd; in kvm_populate_gpa()
292 static void kvm_flush_range(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn, int lock) in kvm_flush_range() argument
300 kvm_ptw_prepare(kvm, &ctx); in kvm_flush_range()
304 spin_lock(&kvm->mmu_lock); in kvm_flush_range()
305 ret = kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, in kvm_flush_range()
307 spin_unlock(&kvm->mmu_lock); in kvm_flush_range()
309 ret = kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, in kvm_flush_range()
314 kvm_flush_remote_tlbs(kvm); in kvm_flush_range()
341 static int kvm_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) in kvm_mkclean_gpa_pt() argument
347 kvm_ptw_prepare(kvm, &ctx); in kvm_mkclean_gpa_pt()
348 return kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, end_gfn << PAGE_SHIFT, &ctx); in kvm_mkclean_gpa_pt()
362 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, in kvm_arch_mmu_enable_log_dirty_pt_masked() argument
374 kvm_ptw_prepare(kvm, &ctx); in kvm_arch_mmu_enable_log_dirty_pt_masked()
376 kvm_ptw_top(kvm->arch.pgd, start << PAGE_SHIFT, end << PAGE_SHIFT, &ctx); in kvm_arch_mmu_enable_log_dirty_pt_masked()
379 int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, in kvm_arch_prepare_memory_region() argument
392 if ((new->base_gfn + new->npages) > (kvm->arch.gpa_size >> PAGE_SHIFT)) in kvm_arch_prepare_memory_region()
442 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
474 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) in kvm_arch_commit_memory_region()
477 spin_lock(&kvm->mmu_lock); in kvm_arch_commit_memory_region()
479 needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn, in kvm_arch_commit_memory_region()
481 spin_unlock(&kvm->mmu_lock); in kvm_arch_commit_memory_region()
483 kvm_flush_remote_tlbs(kvm); in kvm_arch_commit_memory_region()
487 void kvm_arch_flush_shadow_all(struct kvm *kvm) in kvm_arch_flush_shadow_all() argument
489 kvm_flush_range(kvm, 0, kvm->arch.gpa_size >> PAGE_SHIFT, 0); in kvm_arch_flush_shadow_all()
492 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) in kvm_arch_flush_shadow_memslot() argument
498 kvm_flush_range(kvm, slot->base_gfn, slot->base_gfn + slot->npages, 1); in kvm_arch_flush_shadow_memslot()
501 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_unmap_gfn_range() argument
507 kvm_ptw_prepare(kvm, &ctx); in kvm_unmap_gfn_range()
510 return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT, in kvm_unmap_gfn_range()
514 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_age_gfn() argument
520 kvm_ptw_prepare(kvm, &ctx); in kvm_age_gfn()
522 return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT, in kvm_age_gfn()
526 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_test_age_gfn() argument
529 kvm_pte_t *ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); in kvm_test_age_gfn()
558 struct kvm *kvm = vcpu->kvm; in kvm_map_page_fast() local
562 spin_lock(&kvm->mmu_lock); in kvm_map_page_fast()
565 ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); in kvm_map_page_fast()
586 slot = gfn_to_memslot(kvm, gfn); in kvm_map_page_fast()
605 spin_unlock(&kvm->mmu_lock); in kvm_map_page_fast()
612 mark_page_dirty(kvm, gfn); in kvm_map_page_fast()
620 spin_unlock(&kvm->mmu_lock); in kvm_map_page_fast()
682 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, in host_pfn_mapping_level() argument
717 pgd = pgdp_get(pgd_offset(kvm->mm, hva)); in host_pfn_mapping_level()
748 struct kvm *kvm = vcpu->kvm; in kvm_split_huge() local
763 kvm->stat.hugepages--; in kvm_split_huge()
764 kvm->stat.pages += PTRS_PER_PTE; in kvm_split_huge()
796 struct kvm *kvm = vcpu->kvm; in kvm_map_page() local
801 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_map_page()
806 memslot = gfn_to_memslot(kvm, gfn); in kvm_map_page()
823 mmu_seq = kvm->mmu_invalidate_seq; in kvm_map_page()
838 pfn = gfn_to_pfn_prot(kvm, gfn, write, &writeable); in kvm_map_page()
845 spin_lock(&kvm->mmu_lock); in kvm_map_page()
846 if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) { in kvm_map_page()
852 spin_unlock(&kvm->mmu_lock); in kvm_map_page()
883 level = host_pfn_mapping_level(kvm, gfn, memslot); in kvm_map_page()
890 ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); in kvm_map_page()
902 ptep = kvm_populate_gpa(kvm, memcache, gpa, level); in kvm_map_page()
912 ++kvm->stat.hugepages; in kvm_map_page()
916 ++kvm->stat.pages; in kvm_map_page()
918 spin_unlock(&kvm->mmu_lock); in kvm_map_page()
921 mark_page_dirty_in_slot(kvm, memslot, gfn); in kvm_map_page()
927 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_map_page()
946 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument
950 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, in kvm_arch_flush_remote_tlbs_memslot() argument
953 kvm_flush_remote_tlbs(kvm); in kvm_arch_flush_remote_tlbs_memslot()