1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HIGHMEM_INTERNAL_H
3 #define _LINUX_HIGHMEM_INTERNAL_H
4 
5 /*
6  * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
7  */
8 #ifdef CONFIG_KMAP_LOCAL
9 void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
10 void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
11 void kunmap_local_indexed(const void *vaddr);
12 void kmap_local_fork(struct task_struct *tsk);
13 void __kmap_local_sched_out(void);
14 void __kmap_local_sched_in(void);
kmap_assert_nomap(void)15 static inline void kmap_assert_nomap(void)
16 {
17 	DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
18 }
19 #else
kmap_local_fork(struct task_struct * tsk)20 static inline void kmap_local_fork(struct task_struct *tsk) { }
kmap_assert_nomap(void)21 static inline void kmap_assert_nomap(void) { }
22 #endif
23 
24 #ifdef CONFIG_HIGHMEM
25 #include <asm/highmem.h>
26 
27 #ifndef ARCH_HAS_KMAP_FLUSH_TLB
kmap_flush_tlb(unsigned long addr)28 static inline void kmap_flush_tlb(unsigned long addr) { }
29 #endif
30 
31 #ifndef kmap_prot
32 #define kmap_prot PAGE_KERNEL
33 #endif
34 
35 void *kmap_high(struct page *page);
36 void kunmap_high(struct page *page);
37 void __kmap_flush_unused(void);
38 struct page *__kmap_to_page(void *addr);
39 
kmap(struct page * page)40 static inline void *kmap(struct page *page)
41 {
42 	void *addr;
43 
44 	might_sleep();
45 	if (!PageHighMem(page))
46 		addr = page_address(page);
47 	else
48 		addr = kmap_high(page);
49 	kmap_flush_tlb((unsigned long)addr);
50 	return addr;
51 }
52 
kunmap(struct page * page)53 static inline void kunmap(struct page *page)
54 {
55 	might_sleep();
56 	if (!PageHighMem(page))
57 		return;
58 	kunmap_high(page);
59 }
60 
kmap_to_page(void * addr)61 static inline struct page *kmap_to_page(void *addr)
62 {
63 	return __kmap_to_page(addr);
64 }
65 
kmap_flush_unused(void)66 static inline void kmap_flush_unused(void)
67 {
68 	__kmap_flush_unused();
69 }
70 
kmap_local_page(struct page * page)71 static inline void *kmap_local_page(struct page *page)
72 {
73 	return __kmap_local_page_prot(page, kmap_prot);
74 }
75 
kmap_local_folio(struct folio * folio,size_t offset)76 static inline void *kmap_local_folio(struct folio *folio, size_t offset)
77 {
78 	struct page *page = folio_page(folio, offset / PAGE_SIZE);
79 	return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
80 }
81 
kmap_local_page_prot(struct page * page,pgprot_t prot)82 static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
83 {
84 	return __kmap_local_page_prot(page, prot);
85 }
86 
kmap_local_pfn(unsigned long pfn)87 static inline void *kmap_local_pfn(unsigned long pfn)
88 {
89 	return __kmap_local_pfn_prot(pfn, kmap_prot);
90 }
91 
__kunmap_local(const void * vaddr)92 static inline void __kunmap_local(const void *vaddr)
93 {
94 	kunmap_local_indexed(vaddr);
95 }
96 
kmap_atomic_prot(struct page * page,pgprot_t prot)97 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
98 {
99 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
100 		migrate_disable();
101 	else
102 		preempt_disable();
103 
104 	pagefault_disable();
105 	return __kmap_local_page_prot(page, prot);
106 }
107 
kmap_atomic(struct page * page)108 static inline void *kmap_atomic(struct page *page)
109 {
110 	return kmap_atomic_prot(page, kmap_prot);
111 }
112 
kmap_atomic_pfn(unsigned long pfn)113 static inline void *kmap_atomic_pfn(unsigned long pfn)
114 {
115 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
116 		migrate_disable();
117 	else
118 		preempt_disable();
119 
120 	pagefault_disable();
121 	return __kmap_local_pfn_prot(pfn, kmap_prot);
122 }
123 
__kunmap_atomic(const void * addr)124 static inline void __kunmap_atomic(const void *addr)
125 {
126 	kunmap_local_indexed(addr);
127 	pagefault_enable();
128 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
129 		migrate_enable();
130 	else
131 		preempt_enable();
132 }
133 
134 unsigned long __nr_free_highpages(void);
135 unsigned long __totalhigh_pages(void);
136 
nr_free_highpages(void)137 static inline unsigned long nr_free_highpages(void)
138 {
139 	return __nr_free_highpages();
140 }
141 
totalhigh_pages(void)142 static inline unsigned long totalhigh_pages(void)
143 {
144 	return __totalhigh_pages();
145 }
146 
is_kmap_addr(const void * x)147 static inline bool is_kmap_addr(const void *x)
148 {
149 	unsigned long addr = (unsigned long)x;
150 
151 	return (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) ||
152 		(addr >= __fix_to_virt(FIX_KMAP_END) &&
153 		 addr < __fix_to_virt(FIX_KMAP_BEGIN));
154 }
155 #else /* CONFIG_HIGHMEM */
156 
kmap_to_page(void * addr)157 static inline struct page *kmap_to_page(void *addr)
158 {
159 	return virt_to_page(addr);
160 }
161 
kmap(struct page * page)162 static inline void *kmap(struct page *page)
163 {
164 	might_sleep();
165 	return page_address(page);
166 }
167 
kunmap_high(struct page * page)168 static inline void kunmap_high(struct page *page) { }
kmap_flush_unused(void)169 static inline void kmap_flush_unused(void) { }
170 
kunmap(struct page * page)171 static inline void kunmap(struct page *page)
172 {
173 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
174 	kunmap_flush_on_unmap(page_address(page));
175 #endif
176 }
177 
kmap_local_page(struct page * page)178 static inline void *kmap_local_page(struct page *page)
179 {
180 	return page_address(page);
181 }
182 
kmap_local_folio(struct folio * folio,size_t offset)183 static inline void *kmap_local_folio(struct folio *folio, size_t offset)
184 {
185 	return page_address(&folio->page) + offset;
186 }
187 
kmap_local_page_prot(struct page * page,pgprot_t prot)188 static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
189 {
190 	return kmap_local_page(page);
191 }
192 
kmap_local_pfn(unsigned long pfn)193 static inline void *kmap_local_pfn(unsigned long pfn)
194 {
195 	return kmap_local_page(pfn_to_page(pfn));
196 }
197 
__kunmap_local(const void * addr)198 static inline void __kunmap_local(const void *addr)
199 {
200 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
201 	kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
202 #endif
203 }
204 
kmap_atomic(struct page * page)205 static inline void *kmap_atomic(struct page *page)
206 {
207 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
208 		migrate_disable();
209 	else
210 		preempt_disable();
211 	pagefault_disable();
212 	return page_address(page);
213 }
214 
kmap_atomic_prot(struct page * page,pgprot_t prot)215 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
216 {
217 	return kmap_atomic(page);
218 }
219 
kmap_atomic_pfn(unsigned long pfn)220 static inline void *kmap_atomic_pfn(unsigned long pfn)
221 {
222 	return kmap_atomic(pfn_to_page(pfn));
223 }
224 
__kunmap_atomic(const void * addr)225 static inline void __kunmap_atomic(const void *addr)
226 {
227 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
228 	kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
229 #endif
230 	pagefault_enable();
231 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
232 		migrate_enable();
233 	else
234 		preempt_enable();
235 }
236 
nr_free_highpages(void)237 static inline unsigned long nr_free_highpages(void) { return 0; }
totalhigh_pages(void)238 static inline unsigned long totalhigh_pages(void) { return 0; }
239 
is_kmap_addr(const void * x)240 static inline bool is_kmap_addr(const void *x)
241 {
242 	return false;
243 }
244 
245 #endif /* CONFIG_HIGHMEM */
246 
247 /**
248  * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
249  * @__addr:       Virtual address to be unmapped
250  *
251  * Unmaps an address previously mapped by kmap_atomic() and re-enables
252  * pagefaults. Depending on PREEMP_RT configuration, re-enables also
253  * migration and preemption. Users should not count on these side effects.
254  *
255  * Mappings should be unmapped in the reverse order that they were mapped.
256  * See kmap_local_page() for details on nesting.
257  *
258  * @__addr can be any address within the mapped page, so there is no need
259  * to subtract any offset that has been added. In contrast to kunmap(),
260  * this function takes the address returned from kmap_atomic(), not the
261  * page passed to it. The compiler will warn you if you pass the page.
262  */
263 #define kunmap_atomic(__addr)					\
264 do {								\
265 	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
266 	__kunmap_atomic(__addr);				\
267 } while (0)
268 
269 /**
270  * kunmap_local - Unmap a page mapped via kmap_local_page().
271  * @__addr: An address within the page mapped
272  *
273  * @__addr can be any address within the mapped page.  Commonly it is the
274  * address return from kmap_local_page(), but it can also include offsets.
275  *
276  * Unmapping should be done in the reverse order of the mapping.  See
277  * kmap_local_page() for details.
278  */
279 #define kunmap_local(__addr)					\
280 do {								\
281 	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
282 	__kunmap_local(__addr);					\
283 } while (0)
284 
285 #endif
286