Lines Matching full:gp

27 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp);
546 static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp) in kvmhv_set_nested_ptbl() argument
551 __pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE; in kvmhv_set_nested_ptbl()
552 kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table); in kvmhv_set_nested_ptbl()
590 struct kvm_nested_guest *gp; in kvmhv_copy_tofrom_guest_nested() local
611 gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false); in kvmhv_copy_tofrom_guest_nested()
612 if (!gp) { in kvmhv_copy_tofrom_guest_nested()
617 mutex_lock(&gp->tlb_lock); in kvmhv_copy_tofrom_guest_nested()
621 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid, in kvmhv_copy_tofrom_guest_nested()
641 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid, in kvmhv_copy_tofrom_guest_nested()
648 mutex_unlock(&gp->tlb_lock); in kvmhv_copy_tofrom_guest_nested()
649 kvmhv_put_nested(gp); in kvmhv_copy_tofrom_guest_nested()
660 * Caller must hold gp->tlb_lock.
662 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp) in kvmhv_update_ptbl_cache() argument
667 struct kvm *kvm = gp->l1_host; in kvmhv_update_ptbl_cache()
670 ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4); in kvmhv_update_ptbl_cache()
671 if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) { in kvmhv_update_ptbl_cache()
678 gp->l1_gr_to_hr = 0; in kvmhv_update_ptbl_cache()
679 gp->process_table = 0; in kvmhv_update_ptbl_cache()
681 gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0); in kvmhv_update_ptbl_cache()
682 gp->process_table = be64_to_cpu(ptbl_entry.patb1); in kvmhv_update_ptbl_cache()
684 kvmhv_set_nested_ptbl(gp); in kvmhv_update_ptbl_cache()
705 static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest *gp) in __add_nested() argument
707 if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid)) in __add_nested()
718 struct kvm_nested_guest *gp; in kvmhv_alloc_nested() local
721 gp = kzalloc(sizeof(*gp), GFP_KERNEL); in kvmhv_alloc_nested()
722 if (!gp) in kvmhv_alloc_nested()
724 gp->l1_host = kvm; in kvmhv_alloc_nested()
725 gp->l1_lpid = lpid; in kvmhv_alloc_nested()
726 mutex_init(&gp->tlb_lock); in kvmhv_alloc_nested()
727 gp->shadow_pgtable = pgd_alloc(kvm->mm); in kvmhv_alloc_nested()
728 if (!gp->shadow_pgtable) in kvmhv_alloc_nested()
733 gp->shadow_lpid = shadow_lpid; in kvmhv_alloc_nested()
734 gp->radix = 1; in kvmhv_alloc_nested()
736 memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu)); in kvmhv_alloc_nested()
738 return gp; in kvmhv_alloc_nested()
741 pgd_free(kvm->mm, gp->shadow_pgtable); in kvmhv_alloc_nested()
743 kfree(gp); in kvmhv_alloc_nested()
750 static void kvmhv_release_nested(struct kvm_nested_guest *gp) in kvmhv_release_nested() argument
752 struct kvm *kvm = gp->l1_host; in kvmhv_release_nested()
754 if (gp->shadow_pgtable) { in kvmhv_release_nested()
760 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, in kvmhv_release_nested()
761 gp->shadow_lpid); in kvmhv_release_nested()
762 pgd_free(kvm->mm, gp->shadow_pgtable); in kvmhv_release_nested()
764 kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0); in kvmhv_release_nested()
765 kvmppc_free_lpid(gp->shadow_lpid); in kvmhv_release_nested()
766 kfree(gp); in kvmhv_release_nested()
769 static void kvmhv_remove_nested(struct kvm_nested_guest *gp) in kvmhv_remove_nested() argument
771 struct kvm *kvm = gp->l1_host; in kvmhv_remove_nested()
772 int lpid = gp->l1_lpid; in kvmhv_remove_nested()
776 if (gp == __find_nested(kvm, lpid)) { in kvmhv_remove_nested()
778 --gp->refcnt; in kvmhv_remove_nested()
780 ref = gp->refcnt; in kvmhv_remove_nested()
783 kvmhv_release_nested(gp); in kvmhv_remove_nested()
795 struct kvm_nested_guest *gp; in kvmhv_release_all_nested() local
801 idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) { in kvmhv_release_all_nested()
803 if (--gp->refcnt == 0) { in kvmhv_release_all_nested()
804 gp->next = freelist; in kvmhv_release_all_nested()
805 freelist = gp; in kvmhv_release_all_nested()
811 while ((gp = freelist) != NULL) { in kvmhv_release_all_nested()
812 freelist = gp->next; in kvmhv_release_all_nested()
813 kvmhv_release_nested(gp); in kvmhv_release_all_nested()
822 /* caller must hold gp->tlb_lock */
823 static void kvmhv_flush_nested(struct kvm_nested_guest *gp) in kvmhv_flush_nested() argument
825 struct kvm *kvm = gp->l1_host; in kvmhv_flush_nested()
828 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); in kvmhv_flush_nested()
830 kvmhv_flush_lpid(gp->shadow_lpid); in kvmhv_flush_nested()
831 kvmhv_update_ptbl_cache(gp); in kvmhv_flush_nested()
832 if (gp->l1_gr_to_hr == 0) in kvmhv_flush_nested()
833 kvmhv_remove_nested(gp); in kvmhv_flush_nested()
839 struct kvm_nested_guest *gp, *newgp; in kvmhv_get_nested() local
845 gp = __find_nested(kvm, l1_lpid); in kvmhv_get_nested()
846 if (gp) in kvmhv_get_nested()
847 ++gp->refcnt; in kvmhv_get_nested()
850 if (gp || !create) in kvmhv_get_nested()
851 return gp; in kvmhv_get_nested()
863 gp = __find_nested(kvm, l1_lpid); in kvmhv_get_nested()
864 if (!gp) { in kvmhv_get_nested()
867 gp = newgp; in kvmhv_get_nested()
870 ++gp->refcnt; in kvmhv_get_nested()
876 return gp; in kvmhv_get_nested()
879 void kvmhv_put_nested(struct kvm_nested_guest *gp) in kvmhv_put_nested() argument
881 struct kvm *kvm = gp->l1_host; in kvmhv_put_nested()
885 ref = --gp->refcnt; in kvmhv_put_nested()
888 kvmhv_release_nested(gp); in kvmhv_put_nested()
894 struct kvm_nested_guest *gp; in find_kvm_nested_guest_pte() local
897 gp = __find_nested(kvm, lpid); in find_kvm_nested_guest_pte()
898 if (!gp) in find_kvm_nested_guest_pte()
903 pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift); in find_kvm_nested_guest_pte()
996 struct kvm_nested_guest *gp; in kvmhv_remove_nest_rmap() local
1003 gp = __find_nested(kvm, lpid); in kvmhv_remove_nest_rmap()
1004 if (!gp) in kvmhv_remove_nest_rmap()
1011 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); in kvmhv_remove_nest_rmap()
1066 struct kvm_nested_guest *gp, in kvmhv_invalidate_shadow_pte() argument
1075 ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift); in kvmhv_invalidate_shadow_pte()
1079 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); in kvmhv_invalidate_shadow_pte()
1128 struct kvm_nested_guest *gp; in kvmhv_emulate_tlbie_tlb_addr() local
1142 gp = kvmhv_get_nested(kvm, lpid, false); in kvmhv_emulate_tlbie_tlb_addr()
1143 if (!gp) /* No such guest -> nothing to do */ in kvmhv_emulate_tlbie_tlb_addr()
1145 mutex_lock(&gp->tlb_lock); in kvmhv_emulate_tlbie_tlb_addr()
1149 kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift); in kvmhv_emulate_tlbie_tlb_addr()
1155 mutex_unlock(&gp->tlb_lock); in kvmhv_emulate_tlbie_tlb_addr()
1156 kvmhv_put_nested(gp); in kvmhv_emulate_tlbie_tlb_addr()
1161 struct kvm_nested_guest *gp, int ric) in kvmhv_emulate_tlbie_lpid() argument
1165 mutex_lock(&gp->tlb_lock); in kvmhv_emulate_tlbie_lpid()
1170 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, in kvmhv_emulate_tlbie_lpid()
1171 gp->shadow_lpid); in kvmhv_emulate_tlbie_lpid()
1172 kvmhv_flush_lpid(gp->shadow_lpid); in kvmhv_emulate_tlbie_lpid()
1183 kvmhv_flush_nested(gp); in kvmhv_emulate_tlbie_lpid()
1188 mutex_unlock(&gp->tlb_lock); in kvmhv_emulate_tlbie_lpid()
1194 struct kvm_nested_guest *gp; in kvmhv_emulate_tlbie_all_lpid() local
1198 idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) { in kvmhv_emulate_tlbie_all_lpid()
1200 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); in kvmhv_emulate_tlbie_all_lpid()
1210 struct kvm_nested_guest *gp; in kvmhv_emulate_priv_tlbie() local
1246 gp = kvmhv_get_nested(kvm, lpid, false); in kvmhv_emulate_priv_tlbie()
1247 if (gp) { in kvmhv_emulate_priv_tlbie()
1248 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); in kvmhv_emulate_priv_tlbie()
1249 kvmhv_put_nested(gp); in kvmhv_emulate_priv_tlbie()
1284 struct kvm_nested_guest *gp; in do_tlb_invalidate_nested_all() local
1286 gp = kvmhv_get_nested(kvm, lpid, false); in do_tlb_invalidate_nested_all()
1287 if (gp) { in do_tlb_invalidate_nested_all()
1288 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); in do_tlb_invalidate_nested_all()
1289 kvmhv_put_nested(gp); in do_tlb_invalidate_nested_all()
1389 struct kvm_nested_guest *gp, in kvmhv_translate_addr_nested() argument
1396 ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr, in kvmhv_translate_addr_nested()
1451 struct kvm_nested_guest *gp, in kvmhv_handle_nested_set_rc() argument
1479 n_gpa, gp->l1_lpid); in kvmhv_handle_nested_set_rc()
1514 /* called with gp->tlb_lock held */
1516 struct kvm_nested_guest *gp) in __kvmhv_nested_page_fault() argument
1533 if (!gp->l1_gr_to_hr) { in __kvmhv_nested_page_fault()
1534 kvmhv_update_ptbl_cache(gp); in __kvmhv_nested_page_fault()
1535 if (!gp->l1_gr_to_hr) in __kvmhv_nested_page_fault()
1544 ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte); in __kvmhv_nested_page_fault()
1560 ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr); in __kvmhv_nested_page_fault()
1672 (((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT); in __kvmhv_nested_page_fault()
1674 ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level, in __kvmhv_nested_page_fault()
1675 mmu_seq, gp->shadow_lpid, rmapp, &n_rmap); in __kvmhv_nested_page_fault()
1683 kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL); in __kvmhv_nested_page_fault()
1689 struct kvm_nested_guest *gp = vcpu->arch.nested; in kvmhv_nested_page_fault() local
1692 mutex_lock(&gp->tlb_lock); in kvmhv_nested_page_fault()
1693 ret = __kvmhv_nested_page_fault(vcpu, gp); in kvmhv_nested_page_fault()
1694 mutex_unlock(&gp->tlb_lock); in kvmhv_nested_page_fault()