Lines Matching +full:llp +full:- +full:based

1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * Based on earlier code written by:
17 #include <asm/ppc-opcode.h>
27 #include <asm/code-patching.h>
57 * ignores all other bits from 0-27, so just clear them all. in assert_slb_presence()
59 ea &= ~((1UL << SID_SHIFT) - 1); in assert_slb_presence()
77 WRITE_ONCE(p->save_area[index].esid, 0); in slb_shadow_update()
78 WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); in slb_shadow_update()
79 WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); in slb_shadow_update()
84 WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index)); in slb_shadow_clear()
117 : "r" (be64_to_cpu(p->save_area[index].vsid)), in __slb_restore_bolted_realmode()
118 "r" (be64_to_cpu(p->save_area[index].esid))); in __slb_restore_bolted_realmode()
121 assert_slb_presence(true, local_paca->kstack); in __slb_restore_bolted_realmode()
130 get_paca()->slb_cache_ptr = 0; in slb_restore_bolted_realmode()
132 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; in slb_restore_bolted_realmode()
133 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; in slb_restore_bolted_realmode()
155 * Older processors will ignore this optimisation. Over-invalidation in __slb_flush_and_restore_bolted()
163 ksp_esid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].esid); in __slb_flush_and_restore_bolted()
164 ksp_vsid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].vsid); in __slb_flush_and_restore_bolted()
175 * This flushes non-bolted entries, it can be run in virtual mode. Must
194 assert_slb_presence(true, get_paca()->kstack); in slb_flush_and_restore_bolted()
196 get_paca()->slb_cache_ptr = 0; in slb_flush_and_restore_bolted()
198 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; in slb_flush_and_restore_bolted()
199 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; in slb_flush_and_restore_bolted()
208 get_paca()->slb_save_cache_ptr = get_paca()->slb_cache_ptr; in slb_save_contents()
216 slb_ptr->esid = e; in slb_save_contents()
217 slb_ptr->vsid = v; in slb_save_contents()
226 unsigned long llp; in slb_dump_contents() local
234 e = slb_ptr->esid; in slb_dump_contents()
235 v = slb_ptr->vsid; in slb_dump_contents()
247 llp = v & SLB_VSID_LLP; in slb_dump_contents()
249 pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n", in slb_dump_contents()
251 (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp); in slb_dump_contents()
253 pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n", in slb_dump_contents()
255 (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp); in slb_dump_contents()
261 pr_err("SLB RR allocator index %d\n", get_paca()->stab_rr); in slb_dump_contents()
264 pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr); in slb_dump_contents()
266 n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES); in slb_dump_contents()
268 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); in slb_dump_contents()
271 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); in slb_dump_contents()
278 * vmalloc is not bolted, so just have to flush non-bolted. in slb_vmalloc_update()
287 for (i = 0; i < ti->slb_preload_nr; i++) { in preload_hit()
290 idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR; in preload_hit()
291 if (esid == ti->slb_preload_esid[idx]) in preload_hit()
313 idx = (ti->slb_preload_tail + ti->slb_preload_nr) % SLB_PRELOAD_NR; in preload_add()
314 ti->slb_preload_esid[idx] = esid; in preload_add()
315 if (ti->slb_preload_nr == SLB_PRELOAD_NR) in preload_add()
316 ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; in preload_add()
318 ti->slb_preload_nr++; in preload_add()
325 if (!ti->slb_preload_nr) in preload_age()
327 ti->slb_preload_nr--; in preload_age()
328 ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; in preload_age()
334 struct mm_struct *mm = current->mm; in slb_setup_new_exec()
343 if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR) in slb_setup_new_exec()
370 if (!is_kernel_addr(mm->mmap_base)) { in slb_setup_new_exec()
371 if (preload_add(ti, mm->mmap_base)) in slb_setup_new_exec()
372 slb_allocate_user(mm, mm->mmap_base); in slb_setup_new_exec()
384 struct mm_struct *mm = current->mm; in preload_new_slb_context()
385 unsigned long heap = mm->start_brk; in preload_new_slb_context()
390 if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR) in preload_new_slb_context()
421 unsigned long slbie_data = get_paca()->slb_cache[index]; in slb_cache_slbie_kernel()
422 unsigned long ksp = get_paca()->kstack; in slb_cache_slbie_kernel()
435 unsigned long slbie_data = get_paca()->slb_cache[index]; in slb_cache_slbie_user()
451 * We need interrupts hard-disabled here, not just soft-disabled, in switch_slb()
461 get_paca()->slb_cache_ptr = 0; in switch_slb()
462 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; in switch_slb()
474 unsigned long offset = get_paca()->slb_cache_ptr; in switch_slb()
496 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; in switch_slb()
499 get_paca()->slb_cache_ptr = 0; in switch_slb()
501 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; in switch_slb()
511 tsk->thread.load_slb++; in switch_slb()
512 if (!tsk->thread.load_slb) { in switch_slb()
519 for (i = 0; i < ti->slb_preload_nr; i++) { in switch_slb()
523 idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR; in switch_slb()
524 ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT; in switch_slb()
551 /* Prepare our SLB miss handler based on our page size */ in slb_initialize()
555 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; in slb_initialize()
561 pr_devel("SLB: linear LLP = %04lx\n", linear_llp); in slb_initialize()
562 pr_devel("SLB: io LLP = %04lx\n", io_llp); in slb_initialize()
564 pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); in slb_initialize()
568 get_paca()->stab_rr = SLB_NUM_BOLTED - 1; in slb_initialize()
569 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; in slb_initialize()
570 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; in slb_initialize()
583 * get_paca()->kstack hasn't been initialized yet. in slb_initialize()
588 (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) in slb_initialize()
589 create_shadowed_slbe(get_paca()->kstack, in slb_initialize()
608 slb_cache_index = local_paca->slb_cache_ptr; in slb_cache_update()
614 local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT; in slb_cache_update()
615 local_paca->slb_cache_ptr++; in slb_cache_update()
622 local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1; in slb_cache_update()
646 if (local_paca->slb_used_bitmap != U32_MAX) { in alloc_slb_index()
647 index = ffz(local_paca->slb_used_bitmap); in alloc_slb_index()
648 local_paca->slb_used_bitmap |= 1U << index; in alloc_slb_index()
650 local_paca->slb_kern_bitmap |= 1U << index; in alloc_slb_index()
652 /* round-robin replacement of slb starting at SLB_NUM_BOLTED. */ in alloc_slb_index()
653 index = local_paca->stab_rr; in alloc_slb_index()
654 if (index < (mmu_slb_size - 1)) in alloc_slb_index()
658 local_paca->stab_rr = index; in alloc_slb_index()
661 local_paca->slb_kern_bitmap |= 1U << index; in alloc_slb_index()
663 local_paca->slb_kern_bitmap &= ~(1U << index); in alloc_slb_index()
680 return -EFAULT; in slb_insert_entry()
706 int slb_cache_index = local_paca->slb_cache_ptr; in slb_insert_entry()
710 * cache of inserted (non-bolted) kernel SLB entries. All in slb_insert_entry()
711 * non-bolted kernel entries are flushed on any user fault, in slb_insert_entry()
712 * or if there are already 3 non-boled kernel entries. in slb_insert_entry()
724 local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT; in slb_insert_entry()
725 local_paca->slb_cache_ptr = slb_cache_index; in slb_insert_entry()
747 return -EFAULT; in slb_allocate_kernel()
755 return -EFAULT; in slb_allocate_kernel()
762 return -EFAULT; in slb_allocate_kernel()
764 flags = local_paca->vmalloc_sllp; in slb_allocate_kernel()
769 return -EFAULT; in slb_allocate_kernel()
774 return -EFAULT; in slb_allocate_kernel()
797 if (ea >= mm_ctx_slb_addr_limit(&mm->context)) in slb_allocate_user()
798 return -EFAULT; in slb_allocate_user()
800 context = get_user_context(&mm->context, ea); in slb_allocate_user()
802 return -EFAULT; in slb_allocate_user()
806 return -EFAULT; in slb_allocate_user()
819 unsigned long ea = regs->dar; in DEFINE_INTERRUPT_HANDLER_RAW()
826 return -EINVAL; in DEFINE_INTERRUPT_HANDLER_RAW()
830 * not bolted. E.g., PACA and global variables are okay, mm->context in DEFINE_INTERRUPT_HANDLER_RAW()
841 * would make them first-class kernel code and able to be traced and in DEFINE_INTERRUPT_HANDLER_RAW()
849 BUG_ON(local_paca->in_kernel_slb_handler); in DEFINE_INTERRUPT_HANDLER_RAW()
850 local_paca->in_kernel_slb_handler = 1; in DEFINE_INTERRUPT_HANDLER_RAW()
854 local_paca->in_kernel_slb_handler = 0; in DEFINE_INTERRUPT_HANDLER_RAW()
858 struct mm_struct *mm = current->mm; in DEFINE_INTERRUPT_HANDLER_RAW()
862 return -EFAULT; in DEFINE_INTERRUPT_HANDLER_RAW()