Lines Matching +refs:get +refs:addr +refs:attrs

79 	.get = get_nx_huge_pages,
84 .get = param_get_uint,
156 u64 addr; member
2354 u64 addr) in shadow_walk_init_using_root() argument
2356 iterator->addr = addr; in shadow_walk_init_using_root()
2373 = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; in shadow_walk_init_using_root()
2382 struct kvm_vcpu *vcpu, u64 addr) in shadow_walk_init() argument
2385 addr); in shadow_walk_init()
2393 iterator->index = SPTE_INDEX(iterator->addr, iterator->level); in shadow_walk_okay()
3244 for_each_shadow_entry(vcpu, fault->addr, it) { in direct_map()
3312 gva_t gva = fault->is_tdp ? 0 : fault->addr; in kvm_handle_noslot_fault()
3490 sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte); in fast_page_fault()
4124 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) in mmio_info_in_cache() argument
4134 return vcpu_match_mmio_gpa(vcpu, addr); in mmio_info_in_cache()
4136 return vcpu_match_mmio_gva(vcpu, addr); in mmio_info_in_cache()
4145 static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level) in get_walk() argument
4151 for (shadow_walk_init(&iterator, vcpu, addr), in get_walk()
4164 static int get_sptes_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, in get_sptes_lockless() argument
4172 leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root_level); in get_sptes_lockless()
4174 leaf = get_walk(vcpu, addr, sptes, root_level); in get_sptes_lockless()
4181 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) in get_mmio_spte() argument
4188 leaf = get_sptes_lockless(vcpu, addr, sptes, &root); in get_mmio_spte()
4212 __func__, addr); in get_mmio_spte()
4222 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) in handle_mmio_page_fault() argument
4227 if (mmio_info_in_cache(vcpu, addr, direct)) in handle_mmio_page_fault()
4230 reserved = get_mmio_spte(vcpu, addr, &spte); in handle_mmio_page_fault()
4242 addr = 0; in handle_mmio_page_fault()
4244 trace_handle_mmio_page_fault(addr, gfn, access); in handle_mmio_page_fault()
4245 vcpu_cache_mmio_info(vcpu, addr, gfn, access); in handle_mmio_page_fault()
4275 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) in shadow_page_table_clear_flood() argument
4281 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) in shadow_page_table_clear_flood()
4308 return kvm_setup_async_pf(vcpu, fault->addr, in kvm_arch_setup_async_pf()
4418 trace_kvm_try_async_get_page(fault->addr, fault->gfn); in __kvm_faultin_pfn()
4420 trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn); in __kvm_faultin_pfn()
6185 u64 addr, hpa_t root_hpa) in __kvm_mmu_invalidate_addr() argument
6189 vcpu_clear_mmio_info(vcpu, addr); in __kvm_mmu_invalidate_addr()
6203 for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) { in __kvm_mmu_invalidate_addr()
6222 u64 addr, unsigned long roots) in kvm_mmu_invalidate_addr() argument
6231 if (is_noncanonical_address(addr, vcpu)) in kvm_mmu_invalidate_addr()
6234 kvm_x86_call(flush_tlb_gva)(vcpu, addr); in kvm_mmu_invalidate_addr()
6241 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa); in kvm_mmu_invalidate_addr()
6245 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa); in kvm_mmu_invalidate_addr()
7631 gfn_t gfn, int level, unsigned long attrs) in hugepage_has_attrs() argument
7637 return kvm_range_has_memory_attributes(kvm, start, end, ~0, attrs); in hugepage_has_attrs()
7641 attrs != kvm_get_memory_attributes(kvm, gfn)) in hugepage_has_attrs()
7650 unsigned long attrs = range->arg.attributes; in kvm_arch_post_set_memory_attributes() local
7683 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs)) in kvm_arch_post_set_memory_attributes()
7705 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs)) in kvm_arch_post_set_memory_attributes()
7741 unsigned long attrs = kvm_get_memory_attributes(kvm, gfn); in kvm_mmu_init_memslot_memory_attributes() local
7743 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs)) in kvm_mmu_init_memslot_memory_attributes()