1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * address space "slices" (meta-segments) support
4   *
5   * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
6   *
7   * Based on hugetlb implementation
8   *
9   * Copyright (C) 2003 David Gibson, IBM Corporation.
10   */
11  
12  #undef DEBUG
13  
14  #include <linux/kernel.h>
15  #include <linux/mm.h>
16  #include <linux/pagemap.h>
17  #include <linux/err.h>
18  #include <linux/spinlock.h>
19  #include <linux/export.h>
20  #include <linux/hugetlb.h>
21  #include <linux/sched/mm.h>
22  #include <linux/security.h>
23  #include <asm/mman.h>
24  #include <asm/mmu.h>
25  #include <asm/copro.h>
26  #include <asm/hugetlb.h>
27  #include <asm/mmu_context.h>
28  
29  static DEFINE_SPINLOCK(slice_convert_lock);
30  
31  #ifdef DEBUG
32  int _slice_debug = 1;
33  
slice_print_mask(const char * label,const struct slice_mask * mask)34  static void slice_print_mask(const char *label, const struct slice_mask *mask)
35  {
36  	if (!_slice_debug)
37  		return;
38  	pr_devel("%s low_slice: %*pbl\n", label,
39  			(int)SLICE_NUM_LOW, &mask->low_slices);
40  	pr_devel("%s high_slice: %*pbl\n", label,
41  			(int)SLICE_NUM_HIGH, mask->high_slices);
42  }
43  
44  #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
45  
46  #else
47  
slice_print_mask(const char * label,const struct slice_mask * mask)48  static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
49  #define slice_dbg(fmt...)
50  
51  #endif
52  
slice_addr_is_low(unsigned long addr)53  static inline notrace bool slice_addr_is_low(unsigned long addr)
54  {
55  	u64 tmp = (u64)addr;
56  
57  	return tmp < SLICE_LOW_TOP;
58  }
59  
slice_range_to_mask(unsigned long start,unsigned long len,struct slice_mask * ret)60  static void slice_range_to_mask(unsigned long start, unsigned long len,
61  				struct slice_mask *ret)
62  {
63  	unsigned long end = start + len - 1;
64  
65  	ret->low_slices = 0;
66  	if (SLICE_NUM_HIGH)
67  		bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
68  
69  	if (slice_addr_is_low(start)) {
70  		unsigned long mend = min(end,
71  					 (unsigned long)(SLICE_LOW_TOP - 1));
72  
73  		ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
74  			- (1u << GET_LOW_SLICE_INDEX(start));
75  	}
76  
77  	if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
78  		unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
79  		unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
80  		unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
81  
82  		bitmap_set(ret->high_slices, start_index, count);
83  	}
84  }
85  
slice_area_is_free(struct mm_struct * mm,unsigned long addr,unsigned long len)86  static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
87  			      unsigned long len)
88  {
89  	struct vm_area_struct *vma;
90  
91  	if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
92  		return 0;
93  	vma = find_vma(mm, addr);
94  	return (!vma || (addr + len) <= vm_start_gap(vma));
95  }
96  
slice_low_has_vma(struct mm_struct * mm,unsigned long slice)97  static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
98  {
99  	return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
100  				   1ul << SLICE_LOW_SHIFT);
101  }
102  
slice_high_has_vma(struct mm_struct * mm,unsigned long slice)103  static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
104  {
105  	unsigned long start = slice << SLICE_HIGH_SHIFT;
106  	unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
107  
108  	/* Hack, so that each addresses is controlled by exactly one
109  	 * of the high or low area bitmaps, the first high area starts
110  	 * at 4GB, not 0 */
111  	if (start == 0)
112  		start = (unsigned long)SLICE_LOW_TOP;
113  
114  	return !slice_area_is_free(mm, start, end - start);
115  }
116  
slice_mask_for_free(struct mm_struct * mm,struct slice_mask * ret,unsigned long high_limit)117  static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
118  				unsigned long high_limit)
119  {
120  	unsigned long i;
121  
122  	ret->low_slices = 0;
123  	if (SLICE_NUM_HIGH)
124  		bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
125  
126  	for (i = 0; i < SLICE_NUM_LOW; i++)
127  		if (!slice_low_has_vma(mm, i))
128  			ret->low_slices |= 1u << i;
129  
130  	if (slice_addr_is_low(high_limit - 1))
131  		return;
132  
133  	for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
134  		if (!slice_high_has_vma(mm, i))
135  			__set_bit(i, ret->high_slices);
136  }
137  
slice_check_range_fits(struct mm_struct * mm,const struct slice_mask * available,unsigned long start,unsigned long len)138  static bool slice_check_range_fits(struct mm_struct *mm,
139  			   const struct slice_mask *available,
140  			   unsigned long start, unsigned long len)
141  {
142  	unsigned long end = start + len - 1;
143  	u64 low_slices = 0;
144  
145  	if (slice_addr_is_low(start)) {
146  		unsigned long mend = min(end,
147  					 (unsigned long)(SLICE_LOW_TOP - 1));
148  
149  		low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
150  				- (1u << GET_LOW_SLICE_INDEX(start));
151  	}
152  	if ((low_slices & available->low_slices) != low_slices)
153  		return false;
154  
155  	if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
156  		unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
157  		unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
158  		unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
159  		unsigned long i;
160  
161  		for (i = start_index; i < start_index + count; i++) {
162  			if (!test_bit(i, available->high_slices))
163  				return false;
164  		}
165  	}
166  
167  	return true;
168  }
169  
slice_flush_segments(void * parm)170  static void slice_flush_segments(void *parm)
171  {
172  #ifdef CONFIG_PPC64
173  	struct mm_struct *mm = parm;
174  	unsigned long flags;
175  
176  	if (mm != current->active_mm)
177  		return;
178  
179  	copy_mm_to_paca(current->active_mm);
180  
181  	local_irq_save(flags);
182  	slb_flush_and_restore_bolted();
183  	local_irq_restore(flags);
184  #endif
185  }
186  
slice_convert(struct mm_struct * mm,const struct slice_mask * mask,int psize)187  static void slice_convert(struct mm_struct *mm,
188  				const struct slice_mask *mask, int psize)
189  {
190  	int index, mask_index;
191  	/* Write the new slice psize bits */
192  	unsigned char *hpsizes, *lpsizes;
193  	struct slice_mask *psize_mask, *old_mask;
194  	unsigned long i, flags;
195  	int old_psize;
196  
197  	slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
198  	slice_print_mask(" mask", mask);
199  
200  	psize_mask = slice_mask_for_size(&mm->context, psize);
201  
202  	/* We need to use a spinlock here to protect against
203  	 * concurrent 64k -> 4k demotion ...
204  	 */
205  	spin_lock_irqsave(&slice_convert_lock, flags);
206  
207  	lpsizes = mm_ctx_low_slices(&mm->context);
208  	for (i = 0; i < SLICE_NUM_LOW; i++) {
209  		if (!(mask->low_slices & (1u << i)))
210  			continue;
211  
212  		mask_index = i & 0x1;
213  		index = i >> 1;
214  
215  		/* Update the slice_mask */
216  		old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
217  		old_mask = slice_mask_for_size(&mm->context, old_psize);
218  		old_mask->low_slices &= ~(1u << i);
219  		psize_mask->low_slices |= 1u << i;
220  
221  		/* Update the sizes array */
222  		lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
223  				(((unsigned long)psize) << (mask_index * 4));
224  	}
225  
226  	hpsizes = mm_ctx_high_slices(&mm->context);
227  	for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
228  		if (!test_bit(i, mask->high_slices))
229  			continue;
230  
231  		mask_index = i & 0x1;
232  		index = i >> 1;
233  
234  		/* Update the slice_mask */
235  		old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
236  		old_mask = slice_mask_for_size(&mm->context, old_psize);
237  		__clear_bit(i, old_mask->high_slices);
238  		__set_bit(i, psize_mask->high_slices);
239  
240  		/* Update the sizes array */
241  		hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
242  				(((unsigned long)psize) << (mask_index * 4));
243  	}
244  
245  	slice_dbg(" lsps=%lx, hsps=%lx\n",
246  		  (unsigned long)mm_ctx_low_slices(&mm->context),
247  		  (unsigned long)mm_ctx_high_slices(&mm->context));
248  
249  	spin_unlock_irqrestore(&slice_convert_lock, flags);
250  
251  	copro_flush_all_slbs(mm);
252  }
253  
254  /*
255   * Compute which slice addr is part of;
256   * set *boundary_addr to the start or end boundary of that slice
257   * (depending on 'end' parameter);
258   * return boolean indicating if the slice is marked as available in the
259   * 'available' slice_mark.
260   */
slice_scan_available(unsigned long addr,const struct slice_mask * available,int end,unsigned long * boundary_addr)261  static bool slice_scan_available(unsigned long addr,
262  				 const struct slice_mask *available,
263  				 int end, unsigned long *boundary_addr)
264  {
265  	unsigned long slice;
266  	if (slice_addr_is_low(addr)) {
267  		slice = GET_LOW_SLICE_INDEX(addr);
268  		*boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
269  		return !!(available->low_slices & (1u << slice));
270  	} else {
271  		slice = GET_HIGH_SLICE_INDEX(addr);
272  		*boundary_addr = (slice + end) ?
273  			((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
274  		return !!test_bit(slice, available->high_slices);
275  	}
276  }
277  
slice_find_area_bottomup(struct mm_struct * mm,unsigned long addr,unsigned long len,const struct slice_mask * available,int psize,unsigned long high_limit)278  static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
279  					      unsigned long addr, unsigned long len,
280  					      const struct slice_mask *available,
281  					      int psize, unsigned long high_limit)
282  {
283  	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
284  	unsigned long found, next_end;
285  	struct vm_unmapped_area_info info = {
286  		.length = len,
287  		.align_mask = PAGE_MASK & ((1ul << pshift) - 1),
288  	};
289  	/*
290  	 * Check till the allow max value for this mmap request
291  	 */
292  	while (addr < high_limit) {
293  		info.low_limit = addr;
294  		if (!slice_scan_available(addr, available, 1, &addr))
295  			continue;
296  
297   next_slice:
298  		/*
299  		 * At this point [info.low_limit; addr) covers
300  		 * available slices only and ends at a slice boundary.
301  		 * Check if we need to reduce the range, or if we can
302  		 * extend it to cover the next available slice.
303  		 */
304  		if (addr >= high_limit)
305  			addr = high_limit;
306  		else if (slice_scan_available(addr, available, 1, &next_end)) {
307  			addr = next_end;
308  			goto next_slice;
309  		}
310  		info.high_limit = addr;
311  
312  		found = vm_unmapped_area(&info);
313  		if (!(found & ~PAGE_MASK))
314  			return found;
315  	}
316  
317  	return -ENOMEM;
318  }
319  
slice_find_area_topdown(struct mm_struct * mm,unsigned long addr,unsigned long len,const struct slice_mask * available,int psize,unsigned long high_limit)320  static unsigned long slice_find_area_topdown(struct mm_struct *mm,
321  					     unsigned long addr, unsigned long len,
322  					     const struct slice_mask *available,
323  					     int psize, unsigned long high_limit)
324  {
325  	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
326  	unsigned long found, prev;
327  	struct vm_unmapped_area_info info = {
328  		.flags = VM_UNMAPPED_AREA_TOPDOWN,
329  		.length = len,
330  		.align_mask = PAGE_MASK & ((1ul << pshift) - 1),
331  	};
332  	unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
333  
334  	/*
335  	 * If we are trying to allocate above DEFAULT_MAP_WINDOW
336  	 * Add the different to the mmap_base.
337  	 * Only for that request for which high_limit is above
338  	 * DEFAULT_MAP_WINDOW we should apply this.
339  	 */
340  	if (high_limit > DEFAULT_MAP_WINDOW)
341  		addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
342  
343  	while (addr > min_addr) {
344  		info.high_limit = addr;
345  		if (!slice_scan_available(addr - 1, available, 0, &addr))
346  			continue;
347  
348   prev_slice:
349  		/*
350  		 * At this point [addr; info.high_limit) covers
351  		 * available slices only and starts at a slice boundary.
352  		 * Check if we need to reduce the range, or if we can
353  		 * extend it to cover the previous available slice.
354  		 */
355  		if (addr < min_addr)
356  			addr = min_addr;
357  		else if (slice_scan_available(addr - 1, available, 0, &prev)) {
358  			addr = prev;
359  			goto prev_slice;
360  		}
361  		info.low_limit = addr;
362  
363  		found = vm_unmapped_area(&info);
364  		if (!(found & ~PAGE_MASK))
365  			return found;
366  	}
367  
368  	/*
369  	 * A failed mmap() very likely causes application failure,
370  	 * so fall back to the bottom-up function here. This scenario
371  	 * can happen with large stack limits and large mmap()
372  	 * allocations.
373  	 */
374  	return slice_find_area_bottomup(mm, TASK_UNMAPPED_BASE, len, available, psize, high_limit);
375  }
376  
377  
slice_find_area(struct mm_struct * mm,unsigned long len,const struct slice_mask * mask,int psize,int topdown,unsigned long high_limit)378  static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
379  				     const struct slice_mask *mask, int psize,
380  				     int topdown, unsigned long high_limit)
381  {
382  	if (topdown)
383  		return slice_find_area_topdown(mm, mm->mmap_base, len, mask, psize, high_limit);
384  	else
385  		return slice_find_area_bottomup(mm, mm->mmap_base, len, mask, psize, high_limit);
386  }
387  
slice_copy_mask(struct slice_mask * dst,const struct slice_mask * src)388  static inline void slice_copy_mask(struct slice_mask *dst,
389  					const struct slice_mask *src)
390  {
391  	dst->low_slices = src->low_slices;
392  	if (!SLICE_NUM_HIGH)
393  		return;
394  	bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
395  }
396  
slice_or_mask(struct slice_mask * dst,const struct slice_mask * src1,const struct slice_mask * src2)397  static inline void slice_or_mask(struct slice_mask *dst,
398  					const struct slice_mask *src1,
399  					const struct slice_mask *src2)
400  {
401  	dst->low_slices = src1->low_slices | src2->low_slices;
402  	if (!SLICE_NUM_HIGH)
403  		return;
404  	bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
405  }
406  
slice_andnot_mask(struct slice_mask * dst,const struct slice_mask * src1,const struct slice_mask * src2)407  static inline void slice_andnot_mask(struct slice_mask *dst,
408  					const struct slice_mask *src1,
409  					const struct slice_mask *src2)
410  {
411  	dst->low_slices = src1->low_slices & ~src2->low_slices;
412  	if (!SLICE_NUM_HIGH)
413  		return;
414  	bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
415  }
416  
417  #ifdef CONFIG_PPC_64K_PAGES
418  #define MMU_PAGE_BASE	MMU_PAGE_64K
419  #else
420  #define MMU_PAGE_BASE	MMU_PAGE_4K
421  #endif
422  
slice_get_unmapped_area(unsigned long addr,unsigned long len,unsigned long flags,unsigned int psize,int topdown)423  unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
424  				      unsigned long flags, unsigned int psize,
425  				      int topdown)
426  {
427  	struct slice_mask good_mask;
428  	struct slice_mask potential_mask;
429  	const struct slice_mask *maskp;
430  	const struct slice_mask *compat_maskp = NULL;
431  	int fixed = (flags & MAP_FIXED);
432  	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
433  	unsigned long page_size = 1UL << pshift;
434  	struct mm_struct *mm = current->mm;
435  	unsigned long newaddr;
436  	unsigned long high_limit;
437  
438  	high_limit = DEFAULT_MAP_WINDOW;
439  	if (addr >= high_limit || (fixed && (addr + len > high_limit)))
440  		high_limit = TASK_SIZE;
441  
442  	if (len > high_limit)
443  		return -ENOMEM;
444  	if (len & (page_size - 1))
445  		return -EINVAL;
446  	if (fixed) {
447  		if (addr & (page_size - 1))
448  			return -EINVAL;
449  		if (addr > high_limit - len)
450  			return -ENOMEM;
451  	}
452  
453  	if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
454  		/*
455  		 * Increasing the slb_addr_limit does not require
456  		 * slice mask cache to be recalculated because it should
457  		 * be already initialised beyond the old address limit.
458  		 */
459  		mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
460  
461  		on_each_cpu(slice_flush_segments, mm, 1);
462  	}
463  
464  	/* Sanity checks */
465  	BUG_ON(mm->task_size == 0);
466  	BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
467  	VM_BUG_ON(radix_enabled());
468  
469  	slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
470  	slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
471  		  addr, len, flags, topdown);
472  
473  	/* If hint, make sure it matches our alignment restrictions */
474  	if (!fixed && addr) {
475  		addr = ALIGN(addr, page_size);
476  		slice_dbg(" aligned addr=%lx\n", addr);
477  		/* Ignore hint if it's too large or overlaps a VMA */
478  		if (addr > high_limit - len || addr < mmap_min_addr ||
479  		    !slice_area_is_free(mm, addr, len))
480  			addr = 0;
481  	}
482  
483  	/* First make up a "good" mask of slices that have the right size
484  	 * already
485  	 */
486  	maskp = slice_mask_for_size(&mm->context, psize);
487  
488  	/*
489  	 * Here "good" means slices that are already the right page size,
490  	 * "compat" means slices that have a compatible page size (i.e.
491  	 * 4k in a 64k pagesize kernel), and "free" means slices without
492  	 * any VMAs.
493  	 *
494  	 * If MAP_FIXED:
495  	 *	check if fits in good | compat => OK
496  	 *	check if fits in good | compat | free => convert free
497  	 *	else bad
498  	 * If have hint:
499  	 *	check if hint fits in good => OK
500  	 *	check if hint fits in good | free => convert free
501  	 * Otherwise:
502  	 *	search in good, found => OK
503  	 *	search in good | free, found => convert free
504  	 *	search in good | compat | free, found => convert free.
505  	 */
506  
507  	/*
508  	 * If we support combo pages, we can allow 64k pages in 4k slices
509  	 * The mask copies could be avoided in most cases here if we had
510  	 * a pointer to good mask for the next code to use.
511  	 */
512  	if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
513  		compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
514  		if (fixed)
515  			slice_or_mask(&good_mask, maskp, compat_maskp);
516  		else
517  			slice_copy_mask(&good_mask, maskp);
518  	} else {
519  		slice_copy_mask(&good_mask, maskp);
520  	}
521  
522  	slice_print_mask(" good_mask", &good_mask);
523  	if (compat_maskp)
524  		slice_print_mask(" compat_mask", compat_maskp);
525  
526  	/* First check hint if it's valid or if we have MAP_FIXED */
527  	if (addr != 0 || fixed) {
528  		/* Check if we fit in the good mask. If we do, we just return,
529  		 * nothing else to do
530  		 */
531  		if (slice_check_range_fits(mm, &good_mask, addr, len)) {
532  			slice_dbg(" fits good !\n");
533  			newaddr = addr;
534  			goto return_addr;
535  		}
536  	} else {
537  		/* Now let's see if we can find something in the existing
538  		 * slices for that size
539  		 */
540  		newaddr = slice_find_area(mm, len, &good_mask,
541  					  psize, topdown, high_limit);
542  		if (newaddr != -ENOMEM) {
543  			/* Found within the good mask, we don't have to setup,
544  			 * we thus return directly
545  			 */
546  			slice_dbg(" found area at 0x%lx\n", newaddr);
547  			goto return_addr;
548  		}
549  	}
550  	/*
551  	 * We don't fit in the good mask, check what other slices are
552  	 * empty and thus can be converted
553  	 */
554  	slice_mask_for_free(mm, &potential_mask, high_limit);
555  	slice_or_mask(&potential_mask, &potential_mask, &good_mask);
556  	slice_print_mask(" potential", &potential_mask);
557  
558  	if (addr != 0 || fixed) {
559  		if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
560  			slice_dbg(" fits potential !\n");
561  			newaddr = addr;
562  			goto convert;
563  		}
564  	}
565  
566  	/* If we have MAP_FIXED and failed the above steps, then error out */
567  	if (fixed)
568  		return -EBUSY;
569  
570  	slice_dbg(" search...\n");
571  
572  	/* If we had a hint that didn't work out, see if we can fit
573  	 * anywhere in the good area.
574  	 */
575  	if (addr) {
576  		newaddr = slice_find_area(mm, len, &good_mask,
577  					  psize, topdown, high_limit);
578  		if (newaddr != -ENOMEM) {
579  			slice_dbg(" found area at 0x%lx\n", newaddr);
580  			goto return_addr;
581  		}
582  	}
583  
584  	/* Now let's see if we can find something in the existing slices
585  	 * for that size plus free slices
586  	 */
587  	newaddr = slice_find_area(mm, len, &potential_mask,
588  				  psize, topdown, high_limit);
589  
590  	if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM &&
591  	    psize == MMU_PAGE_64K) {
592  		/* retry the search with 4k-page slices included */
593  		slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
594  		newaddr = slice_find_area(mm, len, &potential_mask,
595  					  psize, topdown, high_limit);
596  	}
597  
598  	if (newaddr == -ENOMEM)
599  		return -ENOMEM;
600  
601  	slice_range_to_mask(newaddr, len, &potential_mask);
602  	slice_dbg(" found potential area at 0x%lx\n", newaddr);
603  	slice_print_mask(" mask", &potential_mask);
604  
605   convert:
606  	/*
607  	 * Try to allocate the context before we do slice convert
608  	 * so that we handle the context allocation failure gracefully.
609  	 */
610  	if (need_extra_context(mm, newaddr)) {
611  		if (alloc_extended_context(mm, newaddr) < 0)
612  			return -ENOMEM;
613  	}
614  
615  	slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
616  	if (compat_maskp && !fixed)
617  		slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
618  	if (potential_mask.low_slices ||
619  		(SLICE_NUM_HIGH &&
620  		 !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
621  		slice_convert(mm, &potential_mask, psize);
622  		if (psize > MMU_PAGE_BASE)
623  			on_each_cpu(slice_flush_segments, mm, 1);
624  	}
625  	return newaddr;
626  
627  return_addr:
628  	if (need_extra_context(mm, newaddr)) {
629  		if (alloc_extended_context(mm, newaddr) < 0)
630  			return -ENOMEM;
631  	}
632  	return newaddr;
633  }
634  EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
635  
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)636  unsigned long arch_get_unmapped_area(struct file *filp,
637  				     unsigned long addr,
638  				     unsigned long len,
639  				     unsigned long pgoff,
640  				     unsigned long flags,
641  				     vm_flags_t vm_flags)
642  {
643  	if (radix_enabled())
644  		return generic_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
645  
646  	return slice_get_unmapped_area(addr, len, flags,
647  				       mm_ctx_user_psize(&current->mm->context), 0);
648  }
649  
arch_get_unmapped_area_topdown(struct file * filp,const unsigned long addr0,const unsigned long len,const unsigned long pgoff,const unsigned long flags,vm_flags_t vm_flags)650  unsigned long arch_get_unmapped_area_topdown(struct file *filp,
651  					     const unsigned long addr0,
652  					     const unsigned long len,
653  					     const unsigned long pgoff,
654  					     const unsigned long flags,
655  					     vm_flags_t vm_flags)
656  {
657  	if (radix_enabled())
658  		return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags, vm_flags);
659  
660  	return slice_get_unmapped_area(addr0, len, flags,
661  				       mm_ctx_user_psize(&current->mm->context), 1);
662  }
663  
get_slice_psize(struct mm_struct * mm,unsigned long addr)664  unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
665  {
666  	unsigned char *psizes;
667  	int index, mask_index;
668  
669  	VM_BUG_ON(radix_enabled());
670  
671  	if (slice_addr_is_low(addr)) {
672  		psizes = mm_ctx_low_slices(&mm->context);
673  		index = GET_LOW_SLICE_INDEX(addr);
674  	} else {
675  		psizes = mm_ctx_high_slices(&mm->context);
676  		index = GET_HIGH_SLICE_INDEX(addr);
677  	}
678  	mask_index = index & 0x1;
679  	return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
680  }
681  EXPORT_SYMBOL_GPL(get_slice_psize);
682  
slice_init_new_context_exec(struct mm_struct * mm)683  void slice_init_new_context_exec(struct mm_struct *mm)
684  {
685  	unsigned char *hpsizes, *lpsizes;
686  	struct slice_mask *mask;
687  	unsigned int psize = mmu_virtual_psize;
688  
689  	slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
690  
691  	/*
692  	 * In the case of exec, use the default limit. In the
693  	 * case of fork it is just inherited from the mm being
694  	 * duplicated.
695  	 */
696  	mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT);
697  	mm_ctx_set_user_psize(&mm->context, psize);
698  
699  	/*
700  	 * Set all slice psizes to the default.
701  	 */
702  	lpsizes = mm_ctx_low_slices(&mm->context);
703  	memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
704  
705  	hpsizes = mm_ctx_high_slices(&mm->context);
706  	memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
707  
708  	/*
709  	 * Slice mask cache starts zeroed, fill the default size cache.
710  	 */
711  	mask = slice_mask_for_size(&mm->context, psize);
712  	mask->low_slices = ~0UL;
713  	if (SLICE_NUM_HIGH)
714  		bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
715  }
716  
slice_setup_new_exec(void)717  void slice_setup_new_exec(void)
718  {
719  	struct mm_struct *mm = current->mm;
720  
721  	slice_dbg("slice_setup_new_exec(mm=%p)\n", mm);
722  
723  	if (!is_32bit_task())
724  		return;
725  
726  	mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
727  }
728  
slice_set_range_psize(struct mm_struct * mm,unsigned long start,unsigned long len,unsigned int psize)729  void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
730  			   unsigned long len, unsigned int psize)
731  {
732  	struct slice_mask mask;
733  
734  	VM_BUG_ON(radix_enabled());
735  
736  	slice_range_to_mask(start, len, &mask);
737  	slice_convert(mm, &mask, psize);
738  }
739  
740  #ifdef CONFIG_HUGETLB_PAGE
741  /*
742   * is_hugepage_only_range() is used by generic code to verify whether
743   * a normal mmap mapping (non hugetlbfs) is valid on a given area.
744   *
745   * until the generic code provides a more generic hook and/or starts
746   * calling arch get_unmapped_area for MAP_FIXED (which our implementation
747   * here knows how to deal with), we hijack it to keep standard mappings
748   * away from us.
749   *
750   * because of that generic code limitation, MAP_FIXED mapping cannot
751   * "convert" back a slice with no VMAs to the standard page size, only
752   * get_unmapped_area() can. It would be possible to fix it here but I
753   * prefer working on fixing the generic code instead.
754   *
755   * WARNING: This will not work if hugetlbfs isn't enabled since the
756   * generic code will redefine that function as 0 in that. This is ok
757   * for now as we only use slices with hugetlbfs enabled. This should
758   * be fixed as the generic code gets fixed.
759   */
slice_is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)760  int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
761  			   unsigned long len)
762  {
763  	const struct slice_mask *maskp;
764  	unsigned int psize = mm_ctx_user_psize(&mm->context);
765  
766  	VM_BUG_ON(radix_enabled());
767  
768  	maskp = slice_mask_for_size(&mm->context, psize);
769  
770  	/* We need to account for 4k slices too */
771  	if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
772  		const struct slice_mask *compat_maskp;
773  		struct slice_mask available;
774  
775  		compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
776  		slice_or_mask(&available, maskp, compat_maskp);
777  		return !slice_check_range_fits(mm, &available, addr, len);
778  	}
779  
780  	return !slice_check_range_fits(mm, maskp, addr, len);
781  }
782  
vma_mmu_pagesize(struct vm_area_struct * vma)783  unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
784  {
785  	/* With radix we don't use slice, so derive it from vma*/
786  	if (radix_enabled())
787  		return vma_kernel_pagesize(vma);
788  
789  	return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start));
790  }
791  
file_to_psize(struct file * file)792  static int file_to_psize(struct file *file)
793  {
794  	struct hstate *hstate = hstate_file(file);
795  	return shift_to_mmu_psize(huge_page_shift(hstate));
796  }
797  
hugetlb_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)798  unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
799  					unsigned long len, unsigned long pgoff,
800  					unsigned long flags)
801  {
802  	if (radix_enabled())
803  		return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
804  
805  	return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
806  }
807  #endif
808