Lines Matching refs:kvm

15 void kvm_mmu_init_tdp_mmu(struct kvm *kvm)  in kvm_mmu_init_tdp_mmu()  argument
17 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); in kvm_mmu_init_tdp_mmu()
18 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); in kvm_mmu_init_tdp_mmu()
22 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, in kvm_lockdep_assert_mmu_lock_held() argument
26 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held()
28 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held()
33 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) in kvm_mmu_uninit_tdp_mmu() argument
40 kvm_tdp_mmu_invalidate_all_roots(kvm); in kvm_mmu_uninit_tdp_mmu()
41 kvm_tdp_mmu_zap_invalidated_roots(kvm); in kvm_mmu_uninit_tdp_mmu()
43 WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages)); in kvm_mmu_uninit_tdp_mmu()
44 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); in kvm_mmu_uninit_tdp_mmu()
76 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root) in kvm_tdp_mmu_put_root() argument
86 KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm); in kvm_tdp_mmu_put_root()
88 spin_lock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_put_root()
90 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_put_root()
104 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, in tdp_mmu_next_root() argument
114 lockdep_assert_held(&kvm->mmu_lock); in tdp_mmu_next_root()
119 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, in tdp_mmu_next_root()
123 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, in tdp_mmu_next_root()
131 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, in tdp_mmu_next_root()
138 kvm_tdp_mmu_put_root(kvm, prev_root); in tdp_mmu_next_root()
231 struct kvm *kvm = vcpu->kvm; in kvm_tdp_mmu_alloc_root() local
240 read_lock(&kvm->mmu_lock); in kvm_tdp_mmu_alloc_root()
242 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, as_id) { in kvm_tdp_mmu_alloc_root()
247 spin_lock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_alloc_root()
257 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) { in kvm_tdp_mmu_alloc_root()
274 list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots); in kvm_tdp_mmu_alloc_root()
277 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_alloc_root()
279 read_unlock(&kvm->mmu_lock); in kvm_tdp_mmu_alloc_root()
290 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
294 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in tdp_account_mmu_page() argument
297 atomic64_inc(&kvm->arch.tdp_mmu_pages); in tdp_account_mmu_page()
300 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in tdp_unaccount_mmu_page() argument
303 atomic64_dec(&kvm->arch.tdp_mmu_pages); in tdp_unaccount_mmu_page()
312 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in tdp_mmu_unlink_sp() argument
314 tdp_unaccount_mmu_page(kvm, sp); in tdp_mmu_unlink_sp()
319 spin_lock(&kvm->arch.tdp_mmu_pages_lock); in tdp_mmu_unlink_sp()
321 untrack_possible_nx_huge_page(kvm, sp); in tdp_mmu_unlink_sp()
322 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); in tdp_mmu_unlink_sp()
342 static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) in handle_removed_pt() argument
351 tdp_mmu_unlink_sp(kvm, sp); in handle_removed_pt()
418 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, in handle_removed_pt()
441 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, in handle_changed_spte() argument
498 if (WARN_ON_ONCE(!is_mmio_spte(kvm, old_spte) && in handle_changed_spte()
499 !is_mmio_spte(kvm, new_spte) && in handle_changed_spte()
512 kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1); in handle_changed_spte()
526 handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared); in handle_changed_spte()
576 static inline int __must_check tdp_mmu_set_spte_atomic(struct kvm *kvm, in tdp_mmu_set_spte_atomic() argument
582 lockdep_assert_held_read(&kvm->mmu_lock); in tdp_mmu_set_spte_atomic()
588 handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, in tdp_mmu_set_spte_atomic()
594 static inline int __must_check tdp_mmu_zap_spte_atomic(struct kvm *kvm, in tdp_mmu_zap_spte_atomic() argument
599 lockdep_assert_held_read(&kvm->mmu_lock); in tdp_mmu_zap_spte_atomic()
613 kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level); in tdp_mmu_zap_spte_atomic()
629 handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, in tdp_mmu_zap_spte_atomic()
649 static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, in tdp_mmu_set_spte() argument
652 lockdep_assert_held_write(&kvm->mmu_lock); in tdp_mmu_set_spte()
665 handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); in tdp_mmu_set_spte()
669 static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_iter_set_spte() argument
673 iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, in tdp_mmu_iter_set_spte()
705 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, in tdp_mmu_iter_cond_resched() argument
715 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { in tdp_mmu_iter_cond_resched()
717 kvm_flush_remote_tlbs(kvm); in tdp_mmu_iter_cond_resched()
722 cond_resched_rwlock_read(&kvm->mmu_lock); in tdp_mmu_iter_cond_resched()
724 cond_resched_rwlock_write(&kvm->mmu_lock); in tdp_mmu_iter_cond_resched()
747 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, in __tdp_mmu_zap_root() argument
757 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) in __tdp_mmu_zap_root()
767 tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE); in __tdp_mmu_zap_root()
768 else if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE)) in __tdp_mmu_zap_root()
773 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, in tdp_mmu_zap_root() argument
789 kvm_lockdep_assert_mmu_lock_held(kvm, shared); in tdp_mmu_zap_root()
811 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_4K); in tdp_mmu_zap_root()
812 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_2M); in tdp_mmu_zap_root()
814 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G); in tdp_mmu_zap_root()
815 __tdp_mmu_zap_root(kvm, root, shared, root->role.level); in tdp_mmu_zap_root()
820 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_tdp_mmu_zap_sp() argument
835 tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, in kvm_tdp_mmu_zap_sp()
848 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, in tdp_mmu_zap_leafs() argument
855 lockdep_assert_held_write(&kvm->mmu_lock); in tdp_mmu_zap_leafs()
861 tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) { in tdp_mmu_zap_leafs()
870 tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE); in tdp_mmu_zap_leafs()
894 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush) in kvm_tdp_mmu_zap_leafs() argument
898 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_tdp_mmu_zap_leafs()
899 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, -1) in kvm_tdp_mmu_zap_leafs()
900 flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush); in kvm_tdp_mmu_zap_leafs()
905 void kvm_tdp_mmu_zap_all(struct kvm *kvm) in kvm_tdp_mmu_zap_all() argument
921 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_tdp_mmu_zap_all()
922 for_each_tdp_mmu_root_yield_safe(kvm, root) in kvm_tdp_mmu_zap_all()
923 tdp_mmu_zap_root(kvm, root, false); in kvm_tdp_mmu_zap_all()
930 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) in kvm_tdp_mmu_zap_invalidated_roots() argument
934 read_lock(&kvm->mmu_lock); in kvm_tdp_mmu_zap_invalidated_roots()
936 for_each_tdp_mmu_root_yield_safe(kvm, root) { in kvm_tdp_mmu_zap_invalidated_roots()
941 KVM_BUG_ON(!root->role.invalid, kvm); in kvm_tdp_mmu_zap_invalidated_roots()
952 tdp_mmu_zap_root(kvm, root, true); in kvm_tdp_mmu_zap_invalidated_roots()
959 kvm_tdp_mmu_put_root(kvm, root); in kvm_tdp_mmu_zap_invalidated_roots()
962 read_unlock(&kvm->mmu_lock); in kvm_tdp_mmu_zap_invalidated_roots()
975 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) in kvm_tdp_mmu_invalidate_all_roots() argument
991 refcount_read(&kvm->users_count) && kvm->created_vcpus) in kvm_tdp_mmu_invalidate_all_roots()
992 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_tdp_mmu_invalidate_all_roots()
999 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) { in kvm_tdp_mmu_invalidate_all_roots()
1038 else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) in tdp_mmu_map_handle_target_level()
1042 kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level); in tdp_mmu_map_handle_target_level()
1053 if (unlikely(is_mmio_spte(vcpu->kvm, new_spte))) { in tdp_mmu_map_handle_target_level()
1078 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_link_sp() argument
1085 ret = tdp_mmu_set_spte_atomic(kvm, iter, spte); in tdp_mmu_link_sp()
1089 tdp_mmu_iter_set_spte(kvm, iter, spte); in tdp_mmu_link_sp()
1092 tdp_account_mmu_page(kvm, sp); in tdp_mmu_link_sp()
1097 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1107 struct kvm *kvm = vcpu->kvm; in kvm_tdp_mmu_map() local
1149 r = tdp_mmu_split_huge_page(kvm, &iter, sp, true); in kvm_tdp_mmu_map()
1151 r = tdp_mmu_link_sp(kvm, &iter, sp, true); in kvm_tdp_mmu_map()
1164 spin_lock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_map()
1166 track_possible_nx_huge_page(kvm, sp); in kvm_tdp_mmu_map()
1167 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_map()
1186 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, in kvm_tdp_mmu_unmap_gfn_range() argument
1191 __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false) in kvm_tdp_mmu_unmap_gfn_range()
1192 flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end, in kvm_tdp_mmu_unmap_gfn_range()
1198 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1201 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, in kvm_tdp_mmu_handle_gfn() argument
1213 for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { in kvm_tdp_mmu_handle_gfn()
1217 ret |= handler(kvm, &iter, range); in kvm_tdp_mmu_handle_gfn()
1233 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, in age_gfn_range() argument
1267 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_tdp_mmu_age_gfn_range() argument
1269 return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range); in kvm_tdp_mmu_age_gfn_range()
1272 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, in test_age_gfn() argument
1278 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_tdp_mmu_test_age_gfn() argument
1280 return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); in kvm_tdp_mmu_test_age_gfn()
1288 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in wrprot_gfn_range() argument
1301 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) in wrprot_gfn_range()
1311 if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) in wrprot_gfn_range()
1326 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, in kvm_tdp_mmu_wrprot_slot() argument
1332 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_tdp_mmu_wrprot_slot()
1334 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) in kvm_tdp_mmu_wrprot_slot()
1335 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_wrprot_slot()
1359 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_split_huge_page() argument
1371 sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i); in tdp_mmu_split_huge_page()
1381 ret = tdp_mmu_link_sp(kvm, iter, sp, shared); in tdp_mmu_split_huge_page()
1390 kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE); in tdp_mmu_split_huge_page()
1397 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm, in tdp_mmu_split_huge_pages_root() argument
1420 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) in tdp_mmu_split_huge_pages_root()
1430 read_unlock(&kvm->mmu_lock); in tdp_mmu_split_huge_pages_root()
1432 write_unlock(&kvm->mmu_lock); in tdp_mmu_split_huge_pages_root()
1437 read_lock(&kvm->mmu_lock); in tdp_mmu_split_huge_pages_root()
1439 write_lock(&kvm->mmu_lock); in tdp_mmu_split_huge_pages_root()
1456 if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared)) in tdp_mmu_split_huge_pages_root()
1479 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, in kvm_tdp_mmu_try_split_huge_pages() argument
1487 kvm_lockdep_assert_mmu_lock_held(kvm, shared); in kvm_tdp_mmu_try_split_huge_pages()
1488 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) { in kvm_tdp_mmu_try_split_huge_pages()
1489 r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared); in kvm_tdp_mmu_try_split_huge_pages()
1491 kvm_tdp_mmu_put_root(kvm, root); in kvm_tdp_mmu_try_split_huge_pages()
1507 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_gfn_range() argument
1523 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) in clear_dirty_gfn_range()
1532 if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit)) in clear_dirty_gfn_range()
1547 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, in kvm_tdp_mmu_clear_dirty_slot() argument
1553 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_tdp_mmu_clear_dirty_slot()
1554 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) in kvm_tdp_mmu_clear_dirty_slot()
1555 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_clear_dirty_slot()
1561 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_pt_masked() argument
1568 lockdep_assert_held_write(&kvm->mmu_lock); in clear_dirty_pt_masked()
1607 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, in kvm_tdp_mmu_clear_dirty_pt_masked() argument
1614 for_each_valid_tdp_mmu_root(kvm, root, slot->as_id) in kvm_tdp_mmu_clear_dirty_pt_masked()
1615 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); in kvm_tdp_mmu_clear_dirty_pt_masked()
1618 static void zap_collapsible_spte_range(struct kvm *kvm, in zap_collapsible_spte_range() argument
1631 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) in zap_collapsible_spte_range()
1656 max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot, in zap_collapsible_spte_range()
1662 if (tdp_mmu_zap_spte_atomic(kvm, &iter)) in zap_collapsible_spte_range()
1673 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, in kvm_tdp_mmu_zap_collapsible_sptes() argument
1678 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_tdp_mmu_zap_collapsible_sptes()
1679 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) in kvm_tdp_mmu_zap_collapsible_sptes()
1680 zap_collapsible_spte_range(kvm, root, slot); in kvm_tdp_mmu_zap_collapsible_sptes()
1688 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, in write_protect_gfn() argument
1710 tdp_mmu_iter_set_spte(kvm, &iter, new_spte); in write_protect_gfn()
1724 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, in kvm_tdp_mmu_write_protect_gfn() argument
1731 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_tdp_mmu_write_protect_gfn()
1732 for_each_valid_tdp_mmu_root(kvm, root, slot->as_id) in kvm_tdp_mmu_write_protect_gfn()
1733 spte_set |= write_protect_gfn(kvm, root, gfn, min_level); in kvm_tdp_mmu_write_protect_gfn()