1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Transitional page tables for kexec and hibernate
5  *
6  * This file derived from: arch/arm64/kernel/hibernate.c
7  *
8  * Copyright (c) 2021, Microsoft Corporation.
9  * Pasha Tatashin <pasha.tatashin@soleen.com>
10  *
11  */
12 
13 /*
14  * Transitional tables are used during system transferring from one world to
15  * another: such as during hibernate restore, and kexec reboots. During these
16  * phases one cannot rely on page table not being overwritten. This is because
17  * hibernate and kexec can overwrite the current page tables during transition.
18  */
19 
20 #include <asm/trans_pgd.h>
21 #include <asm/pgalloc.h>
22 #include <asm/pgtable.h>
23 #include <linux/suspend.h>
24 #include <linux/bug.h>
25 #include <linux/mm.h>
26 #include <linux/mmzone.h>
27 #include <linux/kfence.h>
28 
trans_alloc(struct trans_pgd_info * info)29 static void *trans_alloc(struct trans_pgd_info *info)
30 {
31 	return info->trans_alloc_page(info->trans_alloc_arg);
32 }
33 
_copy_pte(pte_t * dst_ptep,pte_t * src_ptep,unsigned long addr)34 static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
35 {
36 	pte_t pte = __ptep_get(src_ptep);
37 
38 	if (pte_valid(pte)) {
39 		/*
40 		 * Resume will overwrite areas that may be marked
41 		 * read only (code, rodata). Clear the RDONLY bit from
42 		 * the temporary mappings we use during restore.
43 		 */
44 		__set_pte(dst_ptep, pte_mkwrite_novma(pte));
45 	} else if (!pte_none(pte)) {
46 		/*
47 		 * debug_pagealloc will removed the PTE_VALID bit if
48 		 * the page isn't in use by the resume kernel. It may have
49 		 * been in use by the original kernel, in which case we need
50 		 * to put it back in our copy to do the restore.
51 		 *
52 		 * Other cases include kfence / vmalloc / memfd_secret which
53 		 * may call `set_direct_map_invalid_noflush()`.
54 		 *
55 		 * Before marking this entry valid, check the pfn should
56 		 * be mapped.
57 		 */
58 		BUG_ON(!pfn_valid(pte_pfn(pte)));
59 
60 		__set_pte(dst_ptep, pte_mkpresent(pte_mkwrite_novma(pte)));
61 	}
62 }
63 
copy_pte(struct trans_pgd_info * info,pmd_t * dst_pmdp,pmd_t * src_pmdp,unsigned long start,unsigned long end)64 static int copy_pte(struct trans_pgd_info *info, pmd_t *dst_pmdp,
65 		    pmd_t *src_pmdp, unsigned long start, unsigned long end)
66 {
67 	pte_t *src_ptep;
68 	pte_t *dst_ptep;
69 	unsigned long addr = start;
70 
71 	dst_ptep = trans_alloc(info);
72 	if (!dst_ptep)
73 		return -ENOMEM;
74 	pmd_populate_kernel(NULL, dst_pmdp, dst_ptep);
75 	dst_ptep = pte_offset_kernel(dst_pmdp, start);
76 
77 	src_ptep = pte_offset_kernel(src_pmdp, start);
78 	do {
79 		_copy_pte(dst_ptep, src_ptep, addr);
80 	} while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
81 
82 	return 0;
83 }
84 
copy_pmd(struct trans_pgd_info * info,pud_t * dst_pudp,pud_t * src_pudp,unsigned long start,unsigned long end)85 static int copy_pmd(struct trans_pgd_info *info, pud_t *dst_pudp,
86 		    pud_t *src_pudp, unsigned long start, unsigned long end)
87 {
88 	pmd_t *src_pmdp;
89 	pmd_t *dst_pmdp;
90 	unsigned long next;
91 	unsigned long addr = start;
92 
93 	if (pud_none(READ_ONCE(*dst_pudp))) {
94 		dst_pmdp = trans_alloc(info);
95 		if (!dst_pmdp)
96 			return -ENOMEM;
97 		pud_populate(NULL, dst_pudp, dst_pmdp);
98 	}
99 	dst_pmdp = pmd_offset(dst_pudp, start);
100 
101 	src_pmdp = pmd_offset(src_pudp, start);
102 	do {
103 		pmd_t pmd = READ_ONCE(*src_pmdp);
104 
105 		next = pmd_addr_end(addr, end);
106 		if (pmd_none(pmd))
107 			continue;
108 		if (pmd_table(pmd)) {
109 			if (copy_pte(info, dst_pmdp, src_pmdp, addr, next))
110 				return -ENOMEM;
111 		} else {
112 			set_pmd(dst_pmdp,
113 				__pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY));
114 		}
115 	} while (dst_pmdp++, src_pmdp++, addr = next, addr != end);
116 
117 	return 0;
118 }
119 
copy_pud(struct trans_pgd_info * info,p4d_t * dst_p4dp,p4d_t * src_p4dp,unsigned long start,unsigned long end)120 static int copy_pud(struct trans_pgd_info *info, p4d_t *dst_p4dp,
121 		    p4d_t *src_p4dp, unsigned long start,
122 		    unsigned long end)
123 {
124 	pud_t *dst_pudp;
125 	pud_t *src_pudp;
126 	unsigned long next;
127 	unsigned long addr = start;
128 
129 	if (p4d_none(READ_ONCE(*dst_p4dp))) {
130 		dst_pudp = trans_alloc(info);
131 		if (!dst_pudp)
132 			return -ENOMEM;
133 		p4d_populate(NULL, dst_p4dp, dst_pudp);
134 	}
135 	dst_pudp = pud_offset(dst_p4dp, start);
136 
137 	src_pudp = pud_offset(src_p4dp, start);
138 	do {
139 		pud_t pud = READ_ONCE(*src_pudp);
140 
141 		next = pud_addr_end(addr, end);
142 		if (pud_none(pud))
143 			continue;
144 		if (pud_table(pud)) {
145 			if (copy_pmd(info, dst_pudp, src_pudp, addr, next))
146 				return -ENOMEM;
147 		} else {
148 			set_pud(dst_pudp,
149 				__pud(pud_val(pud) & ~PUD_SECT_RDONLY));
150 		}
151 	} while (dst_pudp++, src_pudp++, addr = next, addr != end);
152 
153 	return 0;
154 }
155 
copy_p4d(struct trans_pgd_info * info,pgd_t * dst_pgdp,pgd_t * src_pgdp,unsigned long start,unsigned long end)156 static int copy_p4d(struct trans_pgd_info *info, pgd_t *dst_pgdp,
157 		    pgd_t *src_pgdp, unsigned long start,
158 		    unsigned long end)
159 {
160 	p4d_t *dst_p4dp;
161 	p4d_t *src_p4dp;
162 	unsigned long next;
163 	unsigned long addr = start;
164 
165 	dst_p4dp = p4d_offset(dst_pgdp, start);
166 	src_p4dp = p4d_offset(src_pgdp, start);
167 	do {
168 		next = p4d_addr_end(addr, end);
169 		if (p4d_none(READ_ONCE(*src_p4dp)))
170 			continue;
171 		if (copy_pud(info, dst_p4dp, src_p4dp, addr, next))
172 			return -ENOMEM;
173 	} while (dst_p4dp++, src_p4dp++, addr = next, addr != end);
174 
175 	return 0;
176 }
177 
copy_page_tables(struct trans_pgd_info * info,pgd_t * dst_pgdp,unsigned long start,unsigned long end)178 static int copy_page_tables(struct trans_pgd_info *info, pgd_t *dst_pgdp,
179 			    unsigned long start, unsigned long end)
180 {
181 	unsigned long next;
182 	unsigned long addr = start;
183 	pgd_t *src_pgdp = pgd_offset_k(start);
184 
185 	dst_pgdp = pgd_offset_pgd(dst_pgdp, start);
186 	do {
187 		next = pgd_addr_end(addr, end);
188 		if (pgd_none(READ_ONCE(*src_pgdp)))
189 			continue;
190 		if (copy_p4d(info, dst_pgdp, src_pgdp, addr, next))
191 			return -ENOMEM;
192 	} while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
193 
194 	return 0;
195 }
196 
197 /*
198  * Create trans_pgd and copy linear map.
199  * info:	contains allocator and its argument
200  * dst_pgdp:	new page table that is created, and to which map is copied.
201  * start:	Start of the interval (inclusive).
202  * end:		End of the interval (exclusive).
203  *
204  * Returns 0 on success, and -ENOMEM on failure.
205  */
trans_pgd_create_copy(struct trans_pgd_info * info,pgd_t ** dst_pgdp,unsigned long start,unsigned long end)206 int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **dst_pgdp,
207 			  unsigned long start, unsigned long end)
208 {
209 	int rc;
210 	pgd_t *trans_pgd = trans_alloc(info);
211 
212 	if (!trans_pgd) {
213 		pr_err("Failed to allocate memory for temporary page tables.\n");
214 		return -ENOMEM;
215 	}
216 
217 	rc = copy_page_tables(info, trans_pgd, start, end);
218 	if (!rc)
219 		*dst_pgdp = trans_pgd;
220 
221 	return rc;
222 }
223 
224 /*
225  * The page we want to idmap may be outside the range covered by VA_BITS that
226  * can be built using the kernel's p?d_populate() helpers. As a one off, for a
227  * single page, we build these page tables bottom up and just assume that will
228  * need the maximum T0SZ.
229  *
230  * Returns 0 on success, and -ENOMEM on failure.
231  * On success trans_ttbr0 contains page table with idmapped page, t0sz is set to
232  * maximum T0SZ for this page.
233  */
trans_pgd_idmap_page(struct trans_pgd_info * info,phys_addr_t * trans_ttbr0,unsigned long * t0sz,void * page)234 int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0,
235 			 unsigned long *t0sz, void *page)
236 {
237 	phys_addr_t dst_addr = virt_to_phys(page);
238 	unsigned long pfn = __phys_to_pfn(dst_addr);
239 	int max_msb = (dst_addr & GENMASK(52, 48)) ? 51 : 47;
240 	int bits_mapped = PAGE_SHIFT - 4;
241 	unsigned long level_mask, prev_level_entry, *levels[4];
242 	int this_level, index, level_lsb, level_msb;
243 
244 	dst_addr &= PAGE_MASK;
245 	prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_ROX));
246 
247 	for (this_level = 3; this_level >= 0; this_level--) {
248 		levels[this_level] = trans_alloc(info);
249 		if (!levels[this_level])
250 			return -ENOMEM;
251 
252 		level_lsb = ARM64_HW_PGTABLE_LEVEL_SHIFT(this_level);
253 		level_msb = min(level_lsb + bits_mapped, max_msb);
254 		level_mask = GENMASK_ULL(level_msb, level_lsb);
255 
256 		index = (dst_addr & level_mask) >> level_lsb;
257 		*(levels[this_level] + index) = prev_level_entry;
258 
259 		pfn = virt_to_pfn(levels[this_level]);
260 		prev_level_entry = pte_val(pfn_pte(pfn,
261 						   __pgprot(PMD_TYPE_TABLE)));
262 
263 		if (level_msb == max_msb)
264 			break;
265 	}
266 
267 	*trans_ttbr0 = phys_to_ttbr(__pfn_to_phys(pfn));
268 	*t0sz = TCR_T0SZ(max_msb + 1);
269 
270 	return 0;
271 }
272 
273 /*
274  * Create a copy of the vector table so we can call HVC_SET_VECTORS or
275  * HVC_SOFT_RESTART from contexts where the table may be overwritten.
276  */
trans_pgd_copy_el2_vectors(struct trans_pgd_info * info,phys_addr_t * el2_vectors)277 int trans_pgd_copy_el2_vectors(struct trans_pgd_info *info,
278 			       phys_addr_t *el2_vectors)
279 {
280 	void *hyp_stub = trans_alloc(info);
281 
282 	if (!hyp_stub)
283 		return -ENOMEM;
284 	*el2_vectors = virt_to_phys(hyp_stub);
285 	memcpy(hyp_stub, &trans_pgd_stub_vectors, ARM64_VECTOR_TABLE_LEN);
286 	caches_clean_inval_pou((unsigned long)hyp_stub,
287 			       (unsigned long)hyp_stub +
288 			       ARM64_VECTOR_TABLE_LEN);
289 	dcache_clean_inval_poc((unsigned long)hyp_stub,
290 			       (unsigned long)hyp_stub +
291 			       ARM64_VECTOR_TABLE_LEN);
292 
293 	return 0;
294 }
295