1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2005, Paul Mackerras, IBM Corporation.
4 * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation.
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 */
7
8 #include <linux/sched.h>
9 #include <linux/memblock.h>
10 #include <asm/pgalloc.h>
11 #include <asm/tlb.h>
12 #include <asm/dma.h>
13 #include <asm/code-patching.h>
14
15 #include <mm/mmu_decl.h>
16
17 #ifdef CONFIG_SPARSEMEM_VMEMMAP
18 /*
19 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
20 * the vmalloc space using normal page tables, though the size of
21 * pages encoded in the PTEs can be different
22 */
vmemmap_create_mapping(unsigned long start,unsigned long page_size,unsigned long phys)23 int __meminit vmemmap_create_mapping(unsigned long start,
24 unsigned long page_size,
25 unsigned long phys)
26 {
27 /* Create a PTE encoding without page size */
28 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
29 _PAGE_KERNEL_RW;
30
31 /* PTEs only contain page size encodings up to 32M */
32 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].shift - 10 > 0xf);
33
34 /* Encode the size in the PTE */
35 flags |= (mmu_psize_defs[mmu_vmemmap_psize].shift - 10) << 8;
36
37 /* For each PTE for that area, map things. Note that we don't
38 * increment phys because all PTEs are of the large size and
39 * thus must have the low bits clear
40 */
41 for (i = 0; i < page_size; i += PAGE_SIZE)
42 BUG_ON(map_kernel_page(start + i, phys, __pgprot(flags)));
43
44 return 0;
45 }
46
47 #ifdef CONFIG_MEMORY_HOTPLUG
vmemmap_remove_mapping(unsigned long start,unsigned long page_size)48 void vmemmap_remove_mapping(unsigned long start,
49 unsigned long page_size)
50 {
51 }
52 #endif
53 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
54
early_alloc_pgtable(unsigned long size)55 static void __init *early_alloc_pgtable(unsigned long size)
56 {
57 void *ptr;
58
59 ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
60 __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
61
62 if (!ptr)
63 panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n",
64 __func__, size, size, __pa(MAX_DMA_ADDRESS));
65
66 return ptr;
67 }
68
69 /*
70 * map_kernel_page currently only called by __ioremap
71 * map_kernel_page adds an entry to the ioremap page table
72 * and adds an entry to the HPT, possibly bolting it
73 */
map_kernel_page(unsigned long ea,phys_addr_t pa,pgprot_t prot)74 int __ref map_kernel_page(unsigned long ea, phys_addr_t pa, pgprot_t prot)
75 {
76 pgd_t *pgdp;
77 p4d_t *p4dp;
78 pud_t *pudp;
79 pmd_t *pmdp;
80 pte_t *ptep;
81
82 BUILD_BUG_ON(TASK_SIZE_USER64 > PGTABLE_RANGE);
83 if (slab_is_available()) {
84 pgdp = pgd_offset_k(ea);
85 p4dp = p4d_offset(pgdp, ea);
86 pudp = pud_alloc(&init_mm, p4dp, ea);
87 if (!pudp)
88 return -ENOMEM;
89 pmdp = pmd_alloc(&init_mm, pudp, ea);
90 if (!pmdp)
91 return -ENOMEM;
92 ptep = pte_alloc_kernel(pmdp, ea);
93 if (!ptep)
94 return -ENOMEM;
95 } else {
96 pgdp = pgd_offset_k(ea);
97 p4dp = p4d_offset(pgdp, ea);
98 if (p4d_none(*p4dp)) {
99 pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
100 p4d_populate(&init_mm, p4dp, pudp);
101 }
102 pudp = pud_offset(p4dp, ea);
103 if (pud_none(*pudp)) {
104 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
105 pud_populate(&init_mm, pudp, pmdp);
106 }
107 pmdp = pmd_offset(pudp, ea);
108 if (!pmd_present(*pmdp)) {
109 ptep = early_alloc_pgtable(PTE_TABLE_SIZE);
110 pmd_populate_kernel(&init_mm, pmdp, ptep);
111 }
112 ptep = pte_offset_kernel(pmdp, ea);
113 }
114 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
115
116 smp_wmb();
117 return 0;
118 }
119
__patch_exception(int exc,unsigned long addr)120 void __patch_exception(int exc, unsigned long addr)
121 {
122 unsigned int *ibase = &interrupt_base_book3e;
123
124 /*
125 * Our exceptions vectors start with a NOP and -then- a branch
126 * to deal with single stepping from userspace which stops on
127 * the second instruction. Thus we need to patch the second
128 * instruction of the exception, not the first one.
129 */
130
131 patch_branch(ibase + (exc / 4) + 1, addr, 0);
132 }
133