Lines Matching refs:kvm_mmu_page
281 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
286 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in kvm_flush_remote_tlbs_sptep()
370 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in count_spte_clear()
454 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in __get_spte_lockless()
692 static bool sp_has_gptes(struct kvm_mmu_page *sp);
694 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) in kvm_mmu_page_get_gfn()
711 static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index) in kvm_mmu_page_get_access()
731 static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index, in kvm_mmu_page_set_translation()
750 static void kvm_mmu_page_set_access(struct kvm_mmu_page *sp, int index, in kvm_mmu_page_set_access()
804 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) in account_shadowed()
834 void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) in track_possible_nx_huge_page()
852 static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp, in account_nx_huge_page()
861 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) in unaccount_shadowed()
877 void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) in untrack_possible_nx_huge_page()
886 static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) in unaccount_nx_huge_page()
1086 struct kvm_mmu_page *sp; in rmap_remove()
1192 struct kvm_mmu_page *sp; in drop_large_spte()
1592 struct kvm_mmu_page *sp; in __rmap_add()
1685 static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp) in kvm_mmu_check_sptes_at_free()
1711 static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_account_mmu_page()
1717 static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unaccount_mmu_page()
1723 static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp) in kvm_mmu_free_shadow_page()
1740 struct kvm_mmu_page *sp, u64 *parent_pte) in mmu_page_add_parent_pte()
1748 static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in mmu_page_remove_parent_pte()
1754 static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in drop_parent_pte()
1762 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) in kvm_mmu_mark_parents_unsync()
1774 struct kvm_mmu_page *sp; in mark_unsync()
1788 struct kvm_mmu_page *sp;
1794 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, in mmu_pages_add()
1810 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx) in clear_unsync_child_bit()
1817 static int __mmu_unsync_walk(struct kvm_mmu_page *sp, in __mmu_unsync_walk()
1823 struct kvm_mmu_page *child; in __mmu_unsync_walk()
1858 static int mmu_unsync_walk(struct kvm_mmu_page *sp, in mmu_unsync_walk()
1869 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unlink_unsync_page()
1877 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1882 static bool sp_has_gptes(struct kvm_mmu_page *sp) in sp_has_gptes()
1903 static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) in kvm_sync_page_check()
1936 static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i) in kvm_sync_spte()
1945 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) in __kvm_sync_page()
1973 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in kvm_sync_page()
1997 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in is_obsolete_sp()
2008 struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
2024 struct kvm_mmu_page *sp = pvec->page[n].sp; in mmu_pages_next()
2041 struct kvm_mmu_page *sp; in mmu_pages_first()
2064 struct kvm_mmu_page *sp; in mmu_pages_clear_parents()
2080 struct kvm_mmu_page *parent, bool can_yield) in mmu_sync_children()
2083 struct kvm_mmu_page *sp; in mmu_sync_children()
2121 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) in __clear_sp_write_flooding_count()
2137 static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm, in kvm_mmu_find_shadow_page()
2143 struct kvm_mmu_page *sp; in kvm_mmu_find_shadow_page()
2222 static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm, in kvm_mmu_alloc_shadow_page()
2228 struct kvm_mmu_page *sp; in kvm_mmu_alloc_shadow_page()
2258 static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm, in __kvm_mmu_get_shadow_page()
2265 struct kvm_mmu_page *sp; in __kvm_mmu_get_shadow_page()
2280 static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu, in kvm_mmu_get_shadow_page()
2296 struct kvm_mmu_page *parent_sp = sptep_to_sp(sptep); in kvm_mmu_child_role()
2339 static struct kvm_mmu_page *kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu, in kvm_mmu_get_child_sp()
2417 struct kvm_mmu_page *sp, bool flush) in __link_shadow_page()
2451 struct kvm_mmu_page *sp) in link_shadow_page()
2460 struct kvm_mmu_page *child; in validate_direct_spte()
2479 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in mmu_page_zap_pte()
2483 struct kvm_mmu_page *child; in mmu_page_zap_pte()
2510 struct kvm_mmu_page *sp, in kvm_mmu_page_unlink_children()
2522 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_mmu_unlink_parents()
2532 struct kvm_mmu_page *parent, in mmu_zap_unsync_children()
2543 struct kvm_mmu_page *sp; in mmu_zap_unsync_children()
2556 struct kvm_mmu_page *sp, in __kvm_mmu_prepare_zap_page()
2620 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, in kvm_mmu_prepare_zap_page()
2632 struct kvm_mmu_page *sp, *nsp; in kvm_mmu_commit_zap_page()
2658 struct kvm_mmu_page *sp, *tmp; in kvm_mmu_zap_oldest_mmu_pages()
2748 struct kvm_mmu_page *sp; in __kvm_mmu_unprotect_gfn_and_retry()
2789 static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unsync_page()
2807 struct kvm_mmu_page *sp; in mmu_try_to_unsync_pages()
2909 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in mmu_set_spte()
2934 struct kvm_mmu_page *child; in mmu_set_spte()
2975 struct kvm_mmu_page *sp, in direct_pte_prefetch_many()
3003 struct kvm_mmu_page *sp, u64 *sptep) in __direct_pte_prefetch()
3029 struct kvm_mmu_page *sp; in direct_pte_prefetch()
3237 struct kvm_mmu_page *sp; in direct_map()
3473 struct kvm_mmu_page *sp; in fast_page_fault()
3596 struct kvm_mmu_page *sp; in mmu_free_root_page()
3686 struct kvm_mmu_page *sp; in kvm_mmu_free_guest_mode_roots()
3714 struct kvm_mmu_page *sp; in mmu_alloc_root()
4028 struct kvm_mmu_page *sp; in is_unsync_root()
4064 struct kvm_mmu_page *sp; in kvm_mmu_sync_roots()
4555 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa); in is_page_fault_stale()
4822 struct kvm_mmu_page *sp; in is_root_usable()
4954 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa); in kvm_mmu_new_pgd()
5802 struct kvm_mmu_page *sp; in is_obsolete_root()
5880 static bool detect_write_flooding(struct kvm_mmu_page *sp) in detect_write_flooding()
5897 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, in detect_write_misaligned()
5918 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) in get_written_sptes()
5953 struct kvm_mmu_page *sp; in kvm_mmu_track_write()
6204 struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep); in __kvm_mmu_invalidate_addr()
6417 struct kvm_mmu_page *sp, *node; in kvm_zap_obsolete_pages()
6709 static struct kvm_mmu_page *shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *huge_sptep) in shadow_mmu_get_sp_for_split()
6711 struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep); in shadow_mmu_get_sp_for_split()
6743 struct kvm_mmu_page *sp; in shadow_mmu_split_huge_page()
6786 struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep); in shadow_mmu_try_split_huge_page()
6826 struct kvm_mmu_page *sp; in shadow_mmu_try_split_huge_pages()
6943 struct kvm_mmu_page *sp; in kvm_mmu_zap_collapsible_spte()
7033 struct kvm_mmu_page *sp, *node; in kvm_mmu_zap_all()
7078 struct kvm_mmu_page *sp; in kvm_mmu_zap_memslot_pages_and_flush()
7339 sizeof(struct kvm_mmu_page), in kvm_mmu_vendor_module_init()
7443 struct kvm_mmu_page *sp; in kvm_recover_nx_huge_pages()
7473 struct kvm_mmu_page, in kvm_recover_nx_huge_pages()