Searched refs:gfn_end (Results 1 – 3 of 3) sorted by relevance
/linux-6.12.1/arch/powerpc/kvm/ |
D | e500_mmu_host.c | 406 unsigned long gfn_start, gfn_end; in kvmppc_e500_shadow_map() local 410 gfn_end = gfn_start + tsize_pages; in kvmppc_e500_shadow_map() 414 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map()
|
/linux-6.12.1/arch/x86/kvm/mmu/ |
D | mmu.c | 6571 static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) in kvm_rmap_zap_gfn_range() argument 6586 kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) { in kvm_rmap_zap_gfn_range() 6589 end = min(gfn_end, memslot->base_gfn + memslot->npages); in kvm_rmap_zap_gfn_range() 6605 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) in kvm_zap_gfn_range() argument 6609 if (WARN_ON_ONCE(gfn_end <= gfn_start)) in kvm_zap_gfn_range() 6616 kvm_mmu_invalidate_range_add(kvm, gfn_start, gfn_end); in kvm_zap_gfn_range() 6618 flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end); in kvm_zap_gfn_range() 6621 flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush); in kvm_zap_gfn_range() 6624 kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start); in kvm_zap_gfn_range()
|
/linux-6.12.1/arch/x86/include/asm/ |
D | kvm_host.h | 1964 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
|