1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * IA-32 Huge TLB Page Support for Kernel.
4 *
5 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
6 */
7
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/sched/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/pagemap.h>
14 #include <linux/err.h>
15 #include <linux/sysctl.h>
16 #include <linux/compat.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20 #include <asm/elf.h>
21
22 #ifdef CONFIG_HUGETLB_PAGE
hugetlb_get_unmapped_area_bottomup(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)23 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24 unsigned long addr, unsigned long len,
25 unsigned long pgoff, unsigned long flags)
26 {
27 struct hstate *h = hstate_file(file);
28 struct vm_unmapped_area_info info = {};
29
30 info.length = len;
31 info.low_limit = get_mmap_base(1);
32
33 /*
34 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
35 * in the full address space.
36 */
37 info.high_limit = in_32bit_syscall() ?
38 task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
39
40 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
41 return vm_unmapped_area(&info);
42 }
43
hugetlb_get_unmapped_area_topdown(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)44 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
45 unsigned long addr, unsigned long len,
46 unsigned long pgoff, unsigned long flags)
47 {
48 struct hstate *h = hstate_file(file);
49 struct vm_unmapped_area_info info = {};
50
51 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
52 info.length = len;
53 info.low_limit = PAGE_SIZE;
54 info.high_limit = get_mmap_base(0);
55
56 /*
57 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
58 * in the full address space.
59 */
60 if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
61 info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
62
63 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
64 addr = vm_unmapped_area(&info);
65
66 /*
67 * A failed mmap() very likely causes application failure,
68 * so fall back to the bottom-up function here. This scenario
69 * can happen with large stack limits and large mmap()
70 * allocations.
71 */
72 if (addr & ~PAGE_MASK) {
73 VM_BUG_ON(addr != -ENOMEM);
74 info.flags = 0;
75 info.low_limit = TASK_UNMAPPED_BASE;
76 info.high_limit = TASK_SIZE_LOW;
77 addr = vm_unmapped_area(&info);
78 }
79
80 return addr;
81 }
82
83 unsigned long
hugetlb_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)84 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
85 unsigned long len, unsigned long pgoff, unsigned long flags)
86 {
87 struct hstate *h = hstate_file(file);
88 struct mm_struct *mm = current->mm;
89 struct vm_area_struct *vma;
90
91 if (len & ~huge_page_mask(h))
92 return -EINVAL;
93
94 if (len > TASK_SIZE)
95 return -ENOMEM;
96
97 /* No address checking. See comment at mmap_address_hint_valid() */
98 if (flags & MAP_FIXED) {
99 if (prepare_hugepage_range(file, addr, len))
100 return -EINVAL;
101 return addr;
102 }
103
104 if (addr) {
105 addr &= huge_page_mask(h);
106 if (!mmap_address_hint_valid(addr, len))
107 goto get_unmapped_area;
108
109 vma = find_vma(mm, addr);
110 if (!vma || addr + len <= vm_start_gap(vma))
111 return addr;
112 }
113
114 get_unmapped_area:
115 if (!test_bit(MMF_TOPDOWN, &mm->flags))
116 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
117 pgoff, flags);
118 else
119 return hugetlb_get_unmapped_area_topdown(file, addr, len,
120 pgoff, flags);
121 }
122 #endif /* CONFIG_HUGETLB_PAGE */
123
124 #ifdef CONFIG_X86_64
arch_hugetlb_valid_size(unsigned long size)125 bool __init arch_hugetlb_valid_size(unsigned long size)
126 {
127 if (size == PMD_SIZE)
128 return true;
129 else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES))
130 return true;
131 else
132 return false;
133 }
134
135 #ifdef CONFIG_CONTIG_ALLOC
gigantic_pages_init(void)136 static __init int gigantic_pages_init(void)
137 {
138 /* With compaction or CMA we can allocate gigantic pages at runtime */
139 if (boot_cpu_has(X86_FEATURE_GBPAGES))
140 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
141 return 0;
142 }
143 arch_initcall(gigantic_pages_init);
144 #endif
145 #endif
146