1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Page table allocation functions
4  *
5  *    Copyright IBM Corp. 2016
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/page-states.h>
14 #include <asm/pgalloc.h>
15 #include <asm/gmap.h>
16 #include <asm/tlb.h>
17 #include <asm/tlbflush.h>
18 
19 #ifdef CONFIG_PGSTE
20 
21 int page_table_allocate_pgste = 0;
22 EXPORT_SYMBOL(page_table_allocate_pgste);
23 
24 static struct ctl_table page_table_sysctl[] = {
25 	{
26 		.procname	= "allocate_pgste",
27 		.data		= &page_table_allocate_pgste,
28 		.maxlen		= sizeof(int),
29 		.mode		= S_IRUGO | S_IWUSR,
30 		.proc_handler	= proc_dointvec_minmax,
31 		.extra1		= SYSCTL_ZERO,
32 		.extra2		= SYSCTL_ONE,
33 	},
34 };
35 
page_table_register_sysctl(void)36 static int __init page_table_register_sysctl(void)
37 {
38 	return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
39 }
40 __initcall(page_table_register_sysctl);
41 
42 #endif /* CONFIG_PGSTE */
43 
crst_table_alloc(struct mm_struct * mm)44 unsigned long *crst_table_alloc(struct mm_struct *mm)
45 {
46 	struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
47 	unsigned long *table;
48 
49 	if (!ptdesc)
50 		return NULL;
51 	table = ptdesc_to_virt(ptdesc);
52 	__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
53 	return table;
54 }
55 
crst_table_free(struct mm_struct * mm,unsigned long * table)56 void crst_table_free(struct mm_struct *mm, unsigned long *table)
57 {
58 	if (!table)
59 		return;
60 	pagetable_free(virt_to_ptdesc(table));
61 }
62 
__crst_table_upgrade(void * arg)63 static void __crst_table_upgrade(void *arg)
64 {
65 	struct mm_struct *mm = arg;
66 
67 	/* change all active ASCEs to avoid the creation of new TLBs */
68 	if (current->active_mm == mm) {
69 		get_lowcore()->user_asce.val = mm->context.asce;
70 		local_ctl_load(7, &get_lowcore()->user_asce);
71 	}
72 	__tlb_flush_local();
73 }
74 
crst_table_upgrade(struct mm_struct * mm,unsigned long end)75 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
76 {
77 	unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
78 	unsigned long asce_limit = mm->context.asce_limit;
79 
80 	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
81 	VM_BUG_ON(asce_limit < _REGION2_SIZE);
82 
83 	if (end <= asce_limit)
84 		return 0;
85 
86 	if (asce_limit == _REGION2_SIZE) {
87 		p4d = crst_table_alloc(mm);
88 		if (unlikely(!p4d))
89 			goto err_p4d;
90 		crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
91 	}
92 	if (end > _REGION1_SIZE) {
93 		pgd = crst_table_alloc(mm);
94 		if (unlikely(!pgd))
95 			goto err_pgd;
96 		crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
97 	}
98 
99 	spin_lock_bh(&mm->page_table_lock);
100 
101 	/*
102 	 * This routine gets called with mmap_lock lock held and there is
103 	 * no reason to optimize for the case of otherwise. However, if
104 	 * that would ever change, the below check will let us know.
105 	 */
106 	VM_BUG_ON(asce_limit != mm->context.asce_limit);
107 
108 	if (p4d) {
109 		__pgd = (unsigned long *) mm->pgd;
110 		p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
111 		mm->pgd = (pgd_t *) p4d;
112 		mm->context.asce_limit = _REGION1_SIZE;
113 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
114 			_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
115 		mm_inc_nr_puds(mm);
116 	}
117 	if (pgd) {
118 		__pgd = (unsigned long *) mm->pgd;
119 		pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
120 		mm->pgd = (pgd_t *) pgd;
121 		mm->context.asce_limit = TASK_SIZE_MAX;
122 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
123 			_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
124 	}
125 
126 	spin_unlock_bh(&mm->page_table_lock);
127 
128 	on_each_cpu(__crst_table_upgrade, mm, 0);
129 
130 	return 0;
131 
132 err_pgd:
133 	crst_table_free(mm, p4d);
134 err_p4d:
135 	return -ENOMEM;
136 }
137 
138 #ifdef CONFIG_PGSTE
139 
page_table_alloc_pgste(struct mm_struct * mm)140 struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm)
141 {
142 	struct ptdesc *ptdesc;
143 	u64 *table;
144 
145 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
146 	if (ptdesc) {
147 		table = (u64 *)ptdesc_to_virt(ptdesc);
148 		__arch_set_page_dat(table, 1);
149 		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
150 		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
151 	}
152 	return ptdesc;
153 }
154 
page_table_free_pgste(struct ptdesc * ptdesc)155 void page_table_free_pgste(struct ptdesc *ptdesc)
156 {
157 	pagetable_free(ptdesc);
158 }
159 
160 #endif /* CONFIG_PGSTE */
161 
page_table_alloc(struct mm_struct * mm)162 unsigned long *page_table_alloc(struct mm_struct *mm)
163 {
164 	struct ptdesc *ptdesc;
165 	unsigned long *table;
166 
167 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
168 	if (!ptdesc)
169 		return NULL;
170 	if (!pagetable_pte_ctor(ptdesc)) {
171 		pagetable_free(ptdesc);
172 		return NULL;
173 	}
174 	table = ptdesc_to_virt(ptdesc);
175 	__arch_set_page_dat(table, 1);
176 	/* pt_list is used by gmap only */
177 	INIT_LIST_HEAD(&ptdesc->pt_list);
178 	memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
179 	memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
180 	return table;
181 }
182 
pagetable_pte_dtor_free(struct ptdesc * ptdesc)183 static void pagetable_pte_dtor_free(struct ptdesc *ptdesc)
184 {
185 	pagetable_pte_dtor(ptdesc);
186 	pagetable_free(ptdesc);
187 }
188 
page_table_free(struct mm_struct * mm,unsigned long * table)189 void page_table_free(struct mm_struct *mm, unsigned long *table)
190 {
191 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
192 
193 	pagetable_pte_dtor_free(ptdesc);
194 }
195 
__tlb_remove_table(void * table)196 void __tlb_remove_table(void *table)
197 {
198 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
199 	struct page *page = ptdesc_page(ptdesc);
200 
201 	if (compound_order(page) == CRST_ALLOC_ORDER) {
202 		/* pmd, pud, or p4d */
203 		pagetable_free(ptdesc);
204 		return;
205 	}
206 	pagetable_pte_dtor_free(ptdesc);
207 }
208 
209 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pte_free_now(struct rcu_head * head)210 static void pte_free_now(struct rcu_head *head)
211 {
212 	struct ptdesc *ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
213 
214 	pagetable_pte_dtor_free(ptdesc);
215 }
216 
pte_free_defer(struct mm_struct * mm,pgtable_t pgtable)217 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
218 {
219 	struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
220 
221 	call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
222 	/*
223 	 * THPs are not allowed for KVM guests. Warn if pgste ever reaches here.
224 	 * Turn to the generic pte_free_defer() version once gmap is removed.
225 	 */
226 	WARN_ON_ONCE(mm_has_pgste(mm));
227 }
228 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
229 
230 /*
231  * Base infrastructure required to generate basic asces, region, segment,
232  * and page tables that do not make use of enhanced features like EDAT1.
233  */
234 
235 static struct kmem_cache *base_pgt_cache;
236 
base_pgt_alloc(void)237 static unsigned long *base_pgt_alloc(void)
238 {
239 	unsigned long *table;
240 
241 	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
242 	if (table)
243 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
244 	return table;
245 }
246 
base_pgt_free(unsigned long * table)247 static void base_pgt_free(unsigned long *table)
248 {
249 	kmem_cache_free(base_pgt_cache, table);
250 }
251 
base_crst_alloc(unsigned long val)252 static unsigned long *base_crst_alloc(unsigned long val)
253 {
254 	unsigned long *table;
255 	struct ptdesc *ptdesc;
256 
257 	ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
258 	if (!ptdesc)
259 		return NULL;
260 	table = ptdesc_address(ptdesc);
261 	crst_table_init(table, val);
262 	return table;
263 }
264 
base_crst_free(unsigned long * table)265 static void base_crst_free(unsigned long *table)
266 {
267 	if (!table)
268 		return;
269 	pagetable_free(virt_to_ptdesc(table));
270 }
271 
272 #define BASE_ADDR_END_FUNC(NAME, SIZE)					\
273 static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
274 						   unsigned long end)	\
275 {									\
276 	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
277 									\
278 	return (next - 1) < (end - 1) ? next : end;			\
279 }
280 
BASE_ADDR_END_FUNC(page,_PAGE_SIZE)281 BASE_ADDR_END_FUNC(page,    _PAGE_SIZE)
282 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
283 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
284 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
285 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
286 
287 static inline unsigned long base_lra(unsigned long address)
288 {
289 	unsigned long real;
290 
291 	asm volatile(
292 		"	lra	%0,0(%1)\n"
293 		: "=d" (real) : "a" (address) : "cc");
294 	return real;
295 }
296 
base_page_walk(unsigned long * origin,unsigned long addr,unsigned long end,int alloc)297 static int base_page_walk(unsigned long *origin, unsigned long addr,
298 			  unsigned long end, int alloc)
299 {
300 	unsigned long *pte, next;
301 
302 	if (!alloc)
303 		return 0;
304 	pte = origin;
305 	pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
306 	do {
307 		next = base_page_addr_end(addr, end);
308 		*pte = base_lra(addr);
309 	} while (pte++, addr = next, addr < end);
310 	return 0;
311 }
312 
base_segment_walk(unsigned long * origin,unsigned long addr,unsigned long end,int alloc)313 static int base_segment_walk(unsigned long *origin, unsigned long addr,
314 			     unsigned long end, int alloc)
315 {
316 	unsigned long *ste, next, *table;
317 	int rc;
318 
319 	ste = origin;
320 	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
321 	do {
322 		next = base_segment_addr_end(addr, end);
323 		if (*ste & _SEGMENT_ENTRY_INVALID) {
324 			if (!alloc)
325 				continue;
326 			table = base_pgt_alloc();
327 			if (!table)
328 				return -ENOMEM;
329 			*ste = __pa(table) | _SEGMENT_ENTRY;
330 		}
331 		table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
332 		rc = base_page_walk(table, addr, next, alloc);
333 		if (rc)
334 			return rc;
335 		if (!alloc)
336 			base_pgt_free(table);
337 		cond_resched();
338 	} while (ste++, addr = next, addr < end);
339 	return 0;
340 }
341 
base_region3_walk(unsigned long * origin,unsigned long addr,unsigned long end,int alloc)342 static int base_region3_walk(unsigned long *origin, unsigned long addr,
343 			     unsigned long end, int alloc)
344 {
345 	unsigned long *rtte, next, *table;
346 	int rc;
347 
348 	rtte = origin;
349 	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
350 	do {
351 		next = base_region3_addr_end(addr, end);
352 		if (*rtte & _REGION_ENTRY_INVALID) {
353 			if (!alloc)
354 				continue;
355 			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
356 			if (!table)
357 				return -ENOMEM;
358 			*rtte = __pa(table) | _REGION3_ENTRY;
359 		}
360 		table = __va(*rtte & _REGION_ENTRY_ORIGIN);
361 		rc = base_segment_walk(table, addr, next, alloc);
362 		if (rc)
363 			return rc;
364 		if (!alloc)
365 			base_crst_free(table);
366 	} while (rtte++, addr = next, addr < end);
367 	return 0;
368 }
369 
base_region2_walk(unsigned long * origin,unsigned long addr,unsigned long end,int alloc)370 static int base_region2_walk(unsigned long *origin, unsigned long addr,
371 			     unsigned long end, int alloc)
372 {
373 	unsigned long *rste, next, *table;
374 	int rc;
375 
376 	rste = origin;
377 	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
378 	do {
379 		next = base_region2_addr_end(addr, end);
380 		if (*rste & _REGION_ENTRY_INVALID) {
381 			if (!alloc)
382 				continue;
383 			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
384 			if (!table)
385 				return -ENOMEM;
386 			*rste = __pa(table) | _REGION2_ENTRY;
387 		}
388 		table = __va(*rste & _REGION_ENTRY_ORIGIN);
389 		rc = base_region3_walk(table, addr, next, alloc);
390 		if (rc)
391 			return rc;
392 		if (!alloc)
393 			base_crst_free(table);
394 	} while (rste++, addr = next, addr < end);
395 	return 0;
396 }
397 
base_region1_walk(unsigned long * origin,unsigned long addr,unsigned long end,int alloc)398 static int base_region1_walk(unsigned long *origin, unsigned long addr,
399 			     unsigned long end, int alloc)
400 {
401 	unsigned long *rfte, next, *table;
402 	int rc;
403 
404 	rfte = origin;
405 	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
406 	do {
407 		next = base_region1_addr_end(addr, end);
408 		if (*rfte & _REGION_ENTRY_INVALID) {
409 			if (!alloc)
410 				continue;
411 			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
412 			if (!table)
413 				return -ENOMEM;
414 			*rfte = __pa(table) | _REGION1_ENTRY;
415 		}
416 		table = __va(*rfte & _REGION_ENTRY_ORIGIN);
417 		rc = base_region2_walk(table, addr, next, alloc);
418 		if (rc)
419 			return rc;
420 		if (!alloc)
421 			base_crst_free(table);
422 	} while (rfte++, addr = next, addr < end);
423 	return 0;
424 }
425 
426 /**
427  * base_asce_free - free asce and tables returned from base_asce_alloc()
428  * @asce: asce to be freed
429  *
430  * Frees all region, segment, and page tables that were allocated with a
431  * corresponding base_asce_alloc() call.
432  */
base_asce_free(unsigned long asce)433 void base_asce_free(unsigned long asce)
434 {
435 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
436 
437 	if (!asce)
438 		return;
439 	switch (asce & _ASCE_TYPE_MASK) {
440 	case _ASCE_TYPE_SEGMENT:
441 		base_segment_walk(table, 0, _REGION3_SIZE, 0);
442 		break;
443 	case _ASCE_TYPE_REGION3:
444 		base_region3_walk(table, 0, _REGION2_SIZE, 0);
445 		break;
446 	case _ASCE_TYPE_REGION2:
447 		base_region2_walk(table, 0, _REGION1_SIZE, 0);
448 		break;
449 	case _ASCE_TYPE_REGION1:
450 		base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
451 		break;
452 	}
453 	base_crst_free(table);
454 }
455 
base_pgt_cache_init(void)456 static int base_pgt_cache_init(void)
457 {
458 	static DEFINE_MUTEX(base_pgt_cache_mutex);
459 	unsigned long sz = _PAGE_TABLE_SIZE;
460 
461 	if (base_pgt_cache)
462 		return 0;
463 	mutex_lock(&base_pgt_cache_mutex);
464 	if (!base_pgt_cache)
465 		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
466 	mutex_unlock(&base_pgt_cache_mutex);
467 	return base_pgt_cache ? 0 : -ENOMEM;
468 }
469 
470 /**
471  * base_asce_alloc - create kernel mapping without enhanced DAT features
472  * @addr: virtual start address of kernel mapping
473  * @num_pages: number of consecutive pages
474  *
475  * Generate an asce, including all required region, segment and page tables,
476  * that can be used to access the virtual kernel mapping. The difference is
477  * that the returned asce does not make use of any enhanced DAT features like
478  * e.g. large pages. This is required for some I/O functions that pass an
479  * asce, like e.g. some service call requests.
480  *
481  * Note: the returned asce may NEVER be attached to any cpu. It may only be
482  *	 used for I/O requests. tlb entries that might result because the
483  *	 asce was attached to a cpu won't be cleared.
484  */
base_asce_alloc(unsigned long addr,unsigned long num_pages)485 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
486 {
487 	unsigned long asce, *table, end;
488 	int rc;
489 
490 	if (base_pgt_cache_init())
491 		return 0;
492 	end = addr + num_pages * PAGE_SIZE;
493 	if (end <= _REGION3_SIZE) {
494 		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
495 		if (!table)
496 			return 0;
497 		rc = base_segment_walk(table, addr, end, 1);
498 		asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
499 	} else if (end <= _REGION2_SIZE) {
500 		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
501 		if (!table)
502 			return 0;
503 		rc = base_region3_walk(table, addr, end, 1);
504 		asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
505 	} else if (end <= _REGION1_SIZE) {
506 		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
507 		if (!table)
508 			return 0;
509 		rc = base_region2_walk(table, addr, end, 1);
510 		asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
511 	} else {
512 		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
513 		if (!table)
514 			return 0;
515 		rc = base_region1_walk(table, addr, end, 1);
516 		asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
517 	}
518 	if (rc) {
519 		base_asce_free(asce);
520 		asce = 0;
521 	}
522 	return asce;
523 }
524