Lines Matching refs:kvm_mmu_page
54 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) in tdp_mmu_free_sp()
70 struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page, in tdp_mmu_free_sp_rcu_callback()
76 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root) in kvm_tdp_mmu_put_root()
104 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, in tdp_mmu_next_root()
105 struct kvm_mmu_page *prev_root, in tdp_mmu_next_root()
108 struct kvm_mmu_page *next_root; in tdp_mmu_next_root()
187 static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu) in tdp_mmu_alloc_sp()
189 struct kvm_mmu_page *sp; in tdp_mmu_alloc_sp()
197 static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep, in tdp_mmu_init_sp()
212 static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp, in tdp_mmu_init_child_sp()
215 struct kvm_mmu_page *parent_sp; in tdp_mmu_init_child_sp()
232 struct kvm_mmu_page *root; in kvm_tdp_mmu_alloc_root()
294 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in tdp_account_mmu_page()
300 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in tdp_unaccount_mmu_page()
312 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in tdp_mmu_unlink_sp()
344 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); in handle_removed_pt()
747 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, in __tdp_mmu_zap_root()
773 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, in tdp_mmu_zap_root()
820 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_tdp_mmu_zap_sp()
848 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, in tdp_mmu_zap_leafs()
896 struct kvm_mmu_page *root; in kvm_tdp_mmu_zap_leafs()
907 struct kvm_mmu_page *root; in kvm_tdp_mmu_zap_all()
932 struct kvm_mmu_page *root; in kvm_tdp_mmu_zap_invalidated_roots()
977 struct kvm_mmu_page *root; in kvm_tdp_mmu_invalidate_all_roots()
1021 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep)); in tdp_mmu_map_handle_target_level()
1079 struct kvm_mmu_page *sp, bool shared) in tdp_mmu_link_sp()
1098 struct kvm_mmu_page *sp, bool shared);
1109 struct kvm_mmu_page *sp; in kvm_tdp_mmu_map()
1189 struct kvm_mmu_page *root; in kvm_tdp_mmu_unmap_gfn_range()
1205 struct kvm_mmu_page *root; in kvm_tdp_mmu_handle_gfn()
1288 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in wrprot_gfn_range()
1329 struct kvm_mmu_page *root; in kvm_tdp_mmu_wrprot_slot()
1341 static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(void) in tdp_mmu_alloc_sp_for_split()
1343 struct kvm_mmu_page *sp; in tdp_mmu_alloc_sp_for_split()
1360 struct kvm_mmu_page *sp, bool shared) in tdp_mmu_split_huge_page()
1398 struct kvm_mmu_page *root, in tdp_mmu_split_huge_pages_root()
1402 struct kvm_mmu_page *sp = NULL; in tdp_mmu_split_huge_pages_root()
1484 struct kvm_mmu_page *root; in kvm_tdp_mmu_try_split_huge_pages()
1497 static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp) in tdp_mmu_need_write_protect()
1507 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_gfn_range()
1550 struct kvm_mmu_page *root; in kvm_tdp_mmu_clear_dirty_slot()
1561 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_pt_masked()
1612 struct kvm_mmu_page *root; in kvm_tdp_mmu_clear_dirty_pt_masked()
1619 struct kvm_mmu_page *root, in zap_collapsible_spte_range()
1676 struct kvm_mmu_page *root; in kvm_tdp_mmu_zap_collapsible_sptes()
1688 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, in write_protect_gfn()
1728 struct kvm_mmu_page *root; in kvm_tdp_mmu_write_protect_gfn()