Lines Matching refs:hpt
69 struct kvm_hpt_info hpt; member
74 unsigned long hpt = 0; in kvmppc_allocate_hpt() local
85 hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); in kvmppc_allocate_hpt()
86 memset((void *)hpt, 0, (1ul << order)); in kvmppc_allocate_hpt()
90 if (!hpt) in kvmppc_allocate_hpt()
91 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_RETRY_MAYFAIL in kvmppc_allocate_hpt()
94 if (!hpt) in kvmppc_allocate_hpt()
106 free_pages(hpt, order - PAGE_SHIFT); in kvmppc_allocate_hpt()
111 info->virt = hpt; in kvmppc_allocate_hpt()
121 kvm->arch.hpt = *info; in kvmppc_set_hpt()
149 if (kvm->arch.hpt.order == order) { in kvmppc_alloc_reset_hpt()
153 memset((void *)kvm->arch.hpt.virt, 0, 1ul << order); in kvmppc_alloc_reset_hpt()
162 if (kvm->arch.hpt.virt) { in kvmppc_alloc_reset_hpt()
163 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_alloc_reset_hpt()
226 if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1) in kvmppc_map_vrma()
227 npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1; in kvmppc_map_vrma()
238 & kvmppc_hpt_mask(&kvm->arch.hpt); in kvmppc_map_vrma()
375 hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); in kvmppc_mmu_book3s_64_hv_xlate()
379 gr = kvm->arch.hpt.rev[index].guest_rpte; in kvmppc_mmu_book3s_64_hv_xlate()
554 hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); in kvmppc_book3s_hv_page_fault()
555 rev = &kvm->arch.hpt.rev[index]; in kvmppc_book3s_hv_page_fault()
783 __be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvmppc_unmap_hpte()
784 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvmppc_unmap_hpte()
842 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvm_unmap_rmapp()
902 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvm_age_rmapp()
922 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvm_age_rmapp()
973 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvm_test_age_rmapp()
990 hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4)); in kvm_test_age_rmapp()
1024 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvm_test_clear_dirty_npages()
1041 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvm_test_clear_dirty_npages()
1204 rc = kvmppc_allocate_hpt(&resize->hpt, resize->order); in resize_hpt_allocate()
1209 resize->hpt.virt); in resize_hpt_allocate()
1218 struct kvm_hpt_info *old = &kvm->arch.hpt; in resize_hpt_rehash_hpte()
1219 struct kvm_hpt_info *new = &resize->hpt; in resize_hpt_rehash_hpte()
1375 for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) { in resize_hpt_rehash()
1397 hpt_tmp = kvm->arch.hpt; in resize_hpt_pivot()
1398 kvmppc_set_hpt(kvm, &resize->hpt); in resize_hpt_pivot()
1399 resize->hpt = hpt_tmp; in resize_hpt_pivot()
1420 if (resize->hpt.virt) in resize_hpt_release()
1421 kvmppc_free_hpt(&resize->hpt); in resize_hpt_release()
1737 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); in kvm_htab_read()
1738 revp = kvm->arch.hpt.rev + i; in kvm_htab_read()
1753 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && in kvm_htab_read()
1763 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && in kvm_htab_read()
1779 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && in kvm_htab_read()
1800 if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) { in kvm_htab_read()
1862 if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) || in kvm_htab_write()
1863 i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt)) in kvm_htab_write()
1866 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); in kvm_htab_write()
2063 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); in debugfs_htab_read()
2064 for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt); in debugfs_htab_read()
2075 gr = kvm->arch.hpt.rev[i].guest_rpte; in debugfs_htab_read()