Lines Matching refs:slbe
63 static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe) in kvmppc_slb_sid_shift() argument
65 return slbe->tb ? SID_SHIFT_1T : SID_SHIFT; in kvmppc_slb_sid_shift()
68 static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe) in kvmppc_slb_offset_mask() argument
70 return (1ul << kvmppc_slb_sid_shift(slbe)) - 1; in kvmppc_slb_offset_mask()
104 static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) in kvmppc_mmu_book3s_64_get_pagesize() argument
106 return mmu_pagesize(slbe->base_page_size); in kvmppc_mmu_book3s_64_get_pagesize()
109 static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) in kvmppc_mmu_book3s_64_get_page() argument
111 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); in kvmppc_mmu_book3s_64_get_page()
113 return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p); in kvmppc_mmu_book3s_64_get_page()
117 struct kvmppc_slb *slbe, gva_t eaddr, in kvmppc_mmu_book3s_64_get_pteg() argument
128 vpn = kvmppc_slb_calc_vpn(slbe, eaddr); in kvmppc_mmu_book3s_64_get_pteg()
129 ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M; in kvmppc_mmu_book3s_64_get_pteg()
130 hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize); in kvmppc_mmu_book3s_64_get_pteg()
141 page, vcpu_book3s->sdr1, pteg, slbe->vsid); in kvmppc_mmu_book3s_64_get_pteg()
155 static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr) in kvmppc_mmu_book3s_64_get_avpn() argument
157 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); in kvmppc_mmu_book3s_64_get_avpn()
160 avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); in kvmppc_mmu_book3s_64_get_avpn()
161 avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p); in kvmppc_mmu_book3s_64_get_avpn()
176 static int decode_pagesize(struct kvmppc_slb *slbe, u64 r) in decode_pagesize() argument
178 switch (slbe->base_page_size) { in decode_pagesize()
195 struct kvmppc_slb *slbe; in kvmppc_mmu_book3s_64_xlate() local
226 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); in kvmppc_mmu_book3s_64_xlate()
227 if (!slbe) in kvmppc_mmu_book3s_64_xlate()
230 avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr); in kvmppc_mmu_book3s_64_xlate()
233 if (slbe->tb) in kvmppc_mmu_book3s_64_xlate()
235 if (slbe->large) in kvmppc_mmu_book3s_64_xlate()
242 pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K; in kvmppc_mmu_book3s_64_xlate()
247 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second); in kvmppc_mmu_book3s_64_xlate()
257 if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp) in kvmppc_mmu_book3s_64_xlate()
259 else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks) in kvmppc_mmu_book3s_64_xlate()
269 if (slbe->large && in kvmppc_mmu_book3s_64_xlate()
271 pgsize = decode_pagesize(slbe, pte1); in kvmppc_mmu_book3s_64_xlate()
367 struct kvmppc_slb *slbe; in kvmppc_mmu_book3s_64_slbmte() local
378 slbe = &vcpu->arch.slb[slb_nr]; in kvmppc_mmu_book3s_64_slbmte()
380 slbe->large = (rs & SLB_VSID_L) ? 1 : 0; in kvmppc_mmu_book3s_64_slbmte()
381 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; in kvmppc_mmu_book3s_64_slbmte()
382 slbe->esid = slbe->tb ? esid_1t : esid; in kvmppc_mmu_book3s_64_slbmte()
383 slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16); in kvmppc_mmu_book3s_64_slbmte()
384 slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; in kvmppc_mmu_book3s_64_slbmte()
385 slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0; in kvmppc_mmu_book3s_64_slbmte()
386 slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0; in kvmppc_mmu_book3s_64_slbmte()
387 slbe->nx = (rs & SLB_VSID_N) ? 1 : 0; in kvmppc_mmu_book3s_64_slbmte()
388 slbe->class = (rs & SLB_VSID_C) ? 1 : 0; in kvmppc_mmu_book3s_64_slbmte()
390 slbe->base_page_size = MMU_PAGE_4K; in kvmppc_mmu_book3s_64_slbmte()
391 if (slbe->large) { in kvmppc_mmu_book3s_64_slbmte()
395 slbe->base_page_size = MMU_PAGE_16M; in kvmppc_mmu_book3s_64_slbmte()
398 slbe->base_page_size = MMU_PAGE_64K; in kvmppc_mmu_book3s_64_slbmte()
402 slbe->base_page_size = MMU_PAGE_16M; in kvmppc_mmu_book3s_64_slbmte()
405 slbe->orige = rb & (ESID_MASK | SLB_ESID_V); in kvmppc_mmu_book3s_64_slbmte()
406 slbe->origv = rs; in kvmppc_mmu_book3s_64_slbmte()
415 struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); in kvmppc_mmu_book3s_64_slbfee() local
417 if (slbe) { in kvmppc_mmu_book3s_64_slbfee()
418 *ret_slb = slbe->origv; in kvmppc_mmu_book3s_64_slbfee()
427 struct kvmppc_slb *slbe; in kvmppc_mmu_book3s_64_slbmfee() local
432 slbe = &vcpu->arch.slb[slb_nr]; in kvmppc_mmu_book3s_64_slbmfee()
434 return slbe->orige; in kvmppc_mmu_book3s_64_slbmfee()
439 struct kvmppc_slb *slbe; in kvmppc_mmu_book3s_64_slbmfev() local
444 slbe = &vcpu->arch.slb[slb_nr]; in kvmppc_mmu_book3s_64_slbmfev()
446 return slbe->origv; in kvmppc_mmu_book3s_64_slbmfev()
451 struct kvmppc_slb *slbe; in kvmppc_mmu_book3s_64_slbie() local
456 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); in kvmppc_mmu_book3s_64_slbie()
458 if (!slbe) in kvmppc_mmu_book3s_64_slbie()
461 dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid); in kvmppc_mmu_book3s_64_slbie()
463 slbe->valid = false; in kvmppc_mmu_book3s_64_slbie()
464 slbe->orige = 0; in kvmppc_mmu_book3s_64_slbie()
465 slbe->origv = 0; in kvmppc_mmu_book3s_64_slbie()
467 seg_size = 1ull << kvmppc_slb_sid_shift(slbe); in kvmppc_mmu_book3s_64_slbie()