1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _LINUX_HIGHMEM_H
3  #define _LINUX_HIGHMEM_H
4  
5  #include <linux/fs.h>
6  #include <linux/kernel.h>
7  #include <linux/bug.h>
8  #include <linux/cacheflush.h>
9  #include <linux/kmsan.h>
10  #include <linux/mm.h>
11  #include <linux/uaccess.h>
12  #include <linux/hardirq.h>
13  
14  #include "highmem-internal.h"
15  
16  /**
17   * kmap - Map a page for long term usage
18   * @page:	Pointer to the page to be mapped
19   *
20   * Returns: The virtual address of the mapping
21   *
22   * Can only be invoked from preemptible task context because on 32bit
23   * systems with CONFIG_HIGHMEM enabled this function might sleep.
24   *
25   * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
26   * this returns the virtual address of the direct kernel mapping.
27   *
28   * The returned virtual address is globally visible and valid up to the
29   * point where it is unmapped via kunmap(). The pointer can be handed to
30   * other contexts.
31   *
32   * For highmem pages on 32bit systems this can be slow as the mapping space
33   * is limited and protected by a global lock. In case that there is no
34   * mapping slot available the function blocks until a slot is released via
35   * kunmap().
36   */
37  static inline void *kmap(struct page *page);
38  
39  /**
40   * kunmap - Unmap the virtual address mapped by kmap()
41   * @page:	Pointer to the page which was mapped by kmap()
42   *
43   * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
44   * pages in the low memory area.
45   */
46  static inline void kunmap(struct page *page);
47  
48  /**
49   * kmap_to_page - Get the page for a kmap'ed address
50   * @addr:	The address to look up
51   *
52   * Returns: The page which is mapped to @addr.
53   */
54  static inline struct page *kmap_to_page(void *addr);
55  
56  /**
57   * kmap_flush_unused - Flush all unused kmap mappings in order to
58   *		       remove stray mappings
59   */
60  static inline void kmap_flush_unused(void);
61  
62  /**
63   * kmap_local_page - Map a page for temporary usage
64   * @page: Pointer to the page to be mapped
65   *
66   * Returns: The virtual address of the mapping
67   *
68   * Can be invoked from any context, including interrupts.
69   *
70   * Requires careful handling when nesting multiple mappings because the map
71   * management is stack based. The unmap has to be in the reverse order of
72   * the map operation:
73   *
74   * addr1 = kmap_local_page(page1);
75   * addr2 = kmap_local_page(page2);
76   * ...
77   * kunmap_local(addr2);
78   * kunmap_local(addr1);
79   *
80   * Unmapping addr1 before addr2 is invalid and causes malfunction.
81   *
82   * Contrary to kmap() mappings the mapping is only valid in the context of
83   * the caller and cannot be handed to other contexts.
84   *
85   * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
86   * virtual address of the direct mapping. Only real highmem pages are
87   * temporarily mapped.
88   *
89   * While kmap_local_page() is significantly faster than kmap() for the highmem
90   * case it comes with restrictions about the pointer validity.
91   *
92   * On HIGHMEM enabled systems mapping a highmem page has the side effect of
93   * disabling migration in order to keep the virtual address stable across
94   * preemption. No caller of kmap_local_page() can rely on this side effect.
95   */
96  static inline void *kmap_local_page(struct page *page);
97  
98  /**
99   * kmap_local_folio - Map a page in this folio for temporary usage
100   * @folio: The folio containing the page.
101   * @offset: The byte offset within the folio which identifies the page.
102   *
103   * Requires careful handling when nesting multiple mappings because the map
104   * management is stack based. The unmap has to be in the reverse order of
105   * the map operation::
106   *
107   *   addr1 = kmap_local_folio(folio1, offset1);
108   *   addr2 = kmap_local_folio(folio2, offset2);
109   *   ...
110   *   kunmap_local(addr2);
111   *   kunmap_local(addr1);
112   *
113   * Unmapping addr1 before addr2 is invalid and causes malfunction.
114   *
115   * Contrary to kmap() mappings the mapping is only valid in the context of
116   * the caller and cannot be handed to other contexts.
117   *
118   * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
119   * virtual address of the direct mapping. Only real highmem pages are
120   * temporarily mapped.
121   *
122   * While it is significantly faster than kmap() for the highmem case it
123   * comes with restrictions about the pointer validity.
124   *
125   * On HIGHMEM enabled systems mapping a highmem page has the side effect of
126   * disabling migration in order to keep the virtual address stable across
127   * preemption. No caller of kmap_local_folio() can rely on this side effect.
128   *
129   * Context: Can be invoked from any context.
130   * Return: The virtual address of @offset.
131   */
132  static inline void *kmap_local_folio(struct folio *folio, size_t offset);
133  
134  /**
135   * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
136   * @page:	Pointer to the page to be mapped
137   *
138   * Returns: The virtual address of the mapping
139   *
140   * In fact a wrapper around kmap_local_page() which also disables pagefaults
141   * and, depending on PREEMPT_RT configuration, also CPU migration and
142   * preemption. Therefore users should not count on the latter two side effects.
143   *
144   * Mappings should always be released by kunmap_atomic().
145   *
146   * Do not use in new code. Use kmap_local_page() instead.
147   *
148   * It is used in atomic context when code wants to access the contents of a
149   * page that might be allocated from high memory (see __GFP_HIGHMEM), for
150   * example a page in the pagecache.  The API has two functions, and they
151   * can be used in a manner similar to the following::
152   *
153   *   // Find the page of interest.
154   *   struct page *page = find_get_page(mapping, offset);
155   *
156   *   // Gain access to the contents of that page.
157   *   void *vaddr = kmap_atomic(page);
158   *
159   *   // Do something to the contents of that page.
160   *   memset(vaddr, 0, PAGE_SIZE);
161   *
162   *   // Unmap that page.
163   *   kunmap_atomic(vaddr);
164   *
165   * Note that the kunmap_atomic() call takes the result of the kmap_atomic()
166   * call, not the argument.
167   *
168   * If you need to map two pages because you want to copy from one page to
169   * another you need to keep the kmap_atomic calls strictly nested, like:
170   *
171   * vaddr1 = kmap_atomic(page1);
172   * vaddr2 = kmap_atomic(page2);
173   *
174   * memcpy(vaddr1, vaddr2, PAGE_SIZE);
175   *
176   * kunmap_atomic(vaddr2);
177   * kunmap_atomic(vaddr1);
178   */
179  static inline void *kmap_atomic(struct page *page);
180  
181  /* Highmem related interfaces for management code */
182  static inline unsigned long nr_free_highpages(void);
183  static inline unsigned long totalhigh_pages(void);
184  
185  #ifndef ARCH_HAS_FLUSH_ANON_PAGE
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)186  static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
187  {
188  }
189  #endif
190  
191  #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
flush_kernel_vmap_range(void * vaddr,int size)192  static inline void flush_kernel_vmap_range(void *vaddr, int size)
193  {
194  }
invalidate_kernel_vmap_range(void * vaddr,int size)195  static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
196  {
197  }
198  #endif
199  
200  /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
201  #ifndef clear_user_highpage
clear_user_highpage(struct page * page,unsigned long vaddr)202  static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
203  {
204  	void *addr = kmap_local_page(page);
205  	clear_user_page(addr, vaddr, page);
206  	kunmap_local(addr);
207  }
208  #endif
209  
210  #ifndef vma_alloc_zeroed_movable_folio
211  /**
212   * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
213   * @vma: The VMA the page is to be allocated for.
214   * @vaddr: The virtual address the page will be inserted into.
215   *
216   * This function will allocate a page suitable for inserting into this
217   * VMA at this virtual address.  It may be allocated from highmem or
218   * the movable zone.  An architecture may provide its own implementation.
219   *
220   * Return: A folio containing one allocated and zeroed page or NULL if
221   * we are out of memory.
222   */
223  static inline
vma_alloc_zeroed_movable_folio(struct vm_area_struct * vma,unsigned long vaddr)224  struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
225  				   unsigned long vaddr)
226  {
227  	struct folio *folio;
228  
229  	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
230  	if (folio)
231  		clear_user_highpage(&folio->page, vaddr);
232  
233  	return folio;
234  }
235  #endif
236  
clear_highpage(struct page * page)237  static inline void clear_highpage(struct page *page)
238  {
239  	void *kaddr = kmap_local_page(page);
240  	clear_page(kaddr);
241  	kunmap_local(kaddr);
242  }
243  
clear_highpage_kasan_tagged(struct page * page)244  static inline void clear_highpage_kasan_tagged(struct page *page)
245  {
246  	void *kaddr = kmap_local_page(page);
247  
248  	clear_page(kasan_reset_tag(kaddr));
249  	kunmap_local(kaddr);
250  }
251  
252  #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
253  
tag_clear_highpage(struct page * page)254  static inline void tag_clear_highpage(struct page *page)
255  {
256  }
257  
258  #endif
259  
260  /*
261   * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
262   * If we pass in a head page, we can zero up to the size of the compound page.
263   */
264  #ifdef CONFIG_HIGHMEM
265  void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
266  		unsigned start2, unsigned end2);
267  #else
zero_user_segments(struct page * page,unsigned start1,unsigned end1,unsigned start2,unsigned end2)268  static inline void zero_user_segments(struct page *page,
269  		unsigned start1, unsigned end1,
270  		unsigned start2, unsigned end2)
271  {
272  	void *kaddr = kmap_local_page(page);
273  	unsigned int i;
274  
275  	BUG_ON(end1 > page_size(page) || end2 > page_size(page));
276  
277  	if (end1 > start1)
278  		memset(kaddr + start1, 0, end1 - start1);
279  
280  	if (end2 > start2)
281  		memset(kaddr + start2, 0, end2 - start2);
282  
283  	kunmap_local(kaddr);
284  	for (i = 0; i < compound_nr(page); i++)
285  		flush_dcache_page(page + i);
286  }
287  #endif
288  
zero_user_segment(struct page * page,unsigned start,unsigned end)289  static inline void zero_user_segment(struct page *page,
290  	unsigned start, unsigned end)
291  {
292  	zero_user_segments(page, start, end, 0, 0);
293  }
294  
zero_user(struct page * page,unsigned start,unsigned size)295  static inline void zero_user(struct page *page,
296  	unsigned start, unsigned size)
297  {
298  	zero_user_segments(page, start, start + size, 0, 0);
299  }
300  
301  #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
302  
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)303  static inline void copy_user_highpage(struct page *to, struct page *from,
304  	unsigned long vaddr, struct vm_area_struct *vma)
305  {
306  	char *vfrom, *vto;
307  
308  	vfrom = kmap_local_page(from);
309  	vto = kmap_local_page(to);
310  	copy_user_page(vto, vfrom, vaddr, to);
311  	kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
312  	kunmap_local(vto);
313  	kunmap_local(vfrom);
314  }
315  
316  #endif
317  
318  #ifndef __HAVE_ARCH_COPY_HIGHPAGE
319  
copy_highpage(struct page * to,struct page * from)320  static inline void copy_highpage(struct page *to, struct page *from)
321  {
322  	char *vfrom, *vto;
323  
324  	vfrom = kmap_local_page(from);
325  	vto = kmap_local_page(to);
326  	copy_page(vto, vfrom);
327  	kmsan_copy_page_meta(to, from);
328  	kunmap_local(vto);
329  	kunmap_local(vfrom);
330  }
331  
332  #endif
333  
334  #ifdef copy_mc_to_kernel
335  /*
336   * If architecture supports machine check exception handling, define the
337   * #MC versions of copy_user_highpage and copy_highpage. They copy a memory
338   * page with #MC in source page (@from) handled, and return the number
339   * of bytes not copied if there was a #MC, otherwise 0 for success.
340   */
copy_mc_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)341  static inline int copy_mc_user_highpage(struct page *to, struct page *from,
342  					unsigned long vaddr, struct vm_area_struct *vma)
343  {
344  	unsigned long ret;
345  	char *vfrom, *vto;
346  
347  	vfrom = kmap_local_page(from);
348  	vto = kmap_local_page(to);
349  	ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
350  	if (!ret)
351  		kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
352  	kunmap_local(vto);
353  	kunmap_local(vfrom);
354  
355  	if (ret)
356  		memory_failure_queue(page_to_pfn(from), 0);
357  
358  	return ret;
359  }
360  
copy_mc_highpage(struct page * to,struct page * from)361  static inline int copy_mc_highpage(struct page *to, struct page *from)
362  {
363  	unsigned long ret;
364  	char *vfrom, *vto;
365  
366  	vfrom = kmap_local_page(from);
367  	vto = kmap_local_page(to);
368  	ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
369  	if (!ret)
370  		kmsan_copy_page_meta(to, from);
371  	kunmap_local(vto);
372  	kunmap_local(vfrom);
373  
374  	if (ret)
375  		memory_failure_queue(page_to_pfn(from), 0);
376  
377  	return ret;
378  }
379  #else
copy_mc_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)380  static inline int copy_mc_user_highpage(struct page *to, struct page *from,
381  					unsigned long vaddr, struct vm_area_struct *vma)
382  {
383  	copy_user_highpage(to, from, vaddr, vma);
384  	return 0;
385  }
386  
copy_mc_highpage(struct page * to,struct page * from)387  static inline int copy_mc_highpage(struct page *to, struct page *from)
388  {
389  	copy_highpage(to, from);
390  	return 0;
391  }
392  #endif
393  
memcpy_page(struct page * dst_page,size_t dst_off,struct page * src_page,size_t src_off,size_t len)394  static inline void memcpy_page(struct page *dst_page, size_t dst_off,
395  			       struct page *src_page, size_t src_off,
396  			       size_t len)
397  {
398  	char *dst = kmap_local_page(dst_page);
399  	char *src = kmap_local_page(src_page);
400  
401  	VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
402  	memcpy(dst + dst_off, src + src_off, len);
403  	kunmap_local(src);
404  	kunmap_local(dst);
405  }
406  
memset_page(struct page * page,size_t offset,int val,size_t len)407  static inline void memset_page(struct page *page, size_t offset, int val,
408  			       size_t len)
409  {
410  	char *addr = kmap_local_page(page);
411  
412  	VM_BUG_ON(offset + len > PAGE_SIZE);
413  	memset(addr + offset, val, len);
414  	kunmap_local(addr);
415  }
416  
memcpy_from_page(char * to,struct page * page,size_t offset,size_t len)417  static inline void memcpy_from_page(char *to, struct page *page,
418  				    size_t offset, size_t len)
419  {
420  	char *from = kmap_local_page(page);
421  
422  	VM_BUG_ON(offset + len > PAGE_SIZE);
423  	memcpy(to, from + offset, len);
424  	kunmap_local(from);
425  }
426  
memcpy_to_page(struct page * page,size_t offset,const char * from,size_t len)427  static inline void memcpy_to_page(struct page *page, size_t offset,
428  				  const char *from, size_t len)
429  {
430  	char *to = kmap_local_page(page);
431  
432  	VM_BUG_ON(offset + len > PAGE_SIZE);
433  	memcpy(to + offset, from, len);
434  	flush_dcache_page(page);
435  	kunmap_local(to);
436  }
437  
memzero_page(struct page * page,size_t offset,size_t len)438  static inline void memzero_page(struct page *page, size_t offset, size_t len)
439  {
440  	char *addr = kmap_local_page(page);
441  
442  	VM_BUG_ON(offset + len > PAGE_SIZE);
443  	memset(addr + offset, 0, len);
444  	flush_dcache_page(page);
445  	kunmap_local(addr);
446  }
447  
448  /**
449   * memcpy_from_folio - Copy a range of bytes from a folio.
450   * @to: The memory to copy to.
451   * @folio: The folio to read from.
452   * @offset: The first byte in the folio to read.
453   * @len: The number of bytes to copy.
454   */
memcpy_from_folio(char * to,struct folio * folio,size_t offset,size_t len)455  static inline void memcpy_from_folio(char *to, struct folio *folio,
456  		size_t offset, size_t len)
457  {
458  	VM_BUG_ON(offset + len > folio_size(folio));
459  
460  	do {
461  		const char *from = kmap_local_folio(folio, offset);
462  		size_t chunk = len;
463  
464  		if (folio_test_highmem(folio) &&
465  		    chunk > PAGE_SIZE - offset_in_page(offset))
466  			chunk = PAGE_SIZE - offset_in_page(offset);
467  		memcpy(to, from, chunk);
468  		kunmap_local(from);
469  
470  		to += chunk;
471  		offset += chunk;
472  		len -= chunk;
473  	} while (len > 0);
474  }
475  
476  /**
477   * memcpy_to_folio - Copy a range of bytes to a folio.
478   * @folio: The folio to write to.
479   * @offset: The first byte in the folio to store to.
480   * @from: The memory to copy from.
481   * @len: The number of bytes to copy.
482   */
memcpy_to_folio(struct folio * folio,size_t offset,const char * from,size_t len)483  static inline void memcpy_to_folio(struct folio *folio, size_t offset,
484  		const char *from, size_t len)
485  {
486  	VM_BUG_ON(offset + len > folio_size(folio));
487  
488  	do {
489  		char *to = kmap_local_folio(folio, offset);
490  		size_t chunk = len;
491  
492  		if (folio_test_highmem(folio) &&
493  		    chunk > PAGE_SIZE - offset_in_page(offset))
494  			chunk = PAGE_SIZE - offset_in_page(offset);
495  		memcpy(to, from, chunk);
496  		kunmap_local(to);
497  
498  		from += chunk;
499  		offset += chunk;
500  		len -= chunk;
501  	} while (len > 0);
502  
503  	flush_dcache_folio(folio);
504  }
505  
506  /**
507   * folio_zero_tail - Zero the tail of a folio.
508   * @folio: The folio to zero.
509   * @offset: The byte offset in the folio to start zeroing at.
510   * @kaddr: The address the folio is currently mapped to.
511   *
512   * If you have already used kmap_local_folio() to map a folio, written
513   * some data to it and now need to zero the end of the folio (and flush
514   * the dcache), you can use this function.  If you do not have the
515   * folio kmapped (eg the folio has been partially populated by DMA),
516   * use folio_zero_range() or folio_zero_segment() instead.
517   *
518   * Return: An address which can be passed to kunmap_local().
519   */
folio_zero_tail(struct folio * folio,size_t offset,void * kaddr)520  static inline __must_check void *folio_zero_tail(struct folio *folio,
521  		size_t offset, void *kaddr)
522  {
523  	size_t len = folio_size(folio) - offset;
524  
525  	if (folio_test_highmem(folio)) {
526  		size_t max = PAGE_SIZE - offset_in_page(offset);
527  
528  		while (len > max) {
529  			memset(kaddr, 0, max);
530  			kunmap_local(kaddr);
531  			len -= max;
532  			offset += max;
533  			max = PAGE_SIZE;
534  			kaddr = kmap_local_folio(folio, offset);
535  		}
536  	}
537  
538  	memset(kaddr, 0, len);
539  	flush_dcache_folio(folio);
540  
541  	return kaddr;
542  }
543  
544  /**
545   * folio_fill_tail - Copy some data to a folio and pad with zeroes.
546   * @folio: The destination folio.
547   * @offset: The offset into @folio at which to start copying.
548   * @from: The data to copy.
549   * @len: How many bytes of data to copy.
550   *
551   * This function is most useful for filesystems which support inline data.
552   * When they want to copy data from the inode into the page cache, this
553   * function does everything for them.  It supports large folios even on
554   * HIGHMEM configurations.
555   */
folio_fill_tail(struct folio * folio,size_t offset,const char * from,size_t len)556  static inline void folio_fill_tail(struct folio *folio, size_t offset,
557  		const char *from, size_t len)
558  {
559  	char *to = kmap_local_folio(folio, offset);
560  
561  	VM_BUG_ON(offset + len > folio_size(folio));
562  
563  	if (folio_test_highmem(folio)) {
564  		size_t max = PAGE_SIZE - offset_in_page(offset);
565  
566  		while (len > max) {
567  			memcpy(to, from, max);
568  			kunmap_local(to);
569  			len -= max;
570  			from += max;
571  			offset += max;
572  			max = PAGE_SIZE;
573  			to = kmap_local_folio(folio, offset);
574  		}
575  	}
576  
577  	memcpy(to, from, len);
578  	to = folio_zero_tail(folio, offset + len, to + len);
579  	kunmap_local(to);
580  }
581  
582  /**
583   * memcpy_from_file_folio - Copy some bytes from a file folio.
584   * @to: The destination buffer.
585   * @folio: The folio to copy from.
586   * @pos: The position in the file.
587   * @len: The maximum number of bytes to copy.
588   *
589   * Copy up to @len bytes from this folio.  This may be limited by PAGE_SIZE
590   * if the folio comes from HIGHMEM, and by the size of the folio.
591   *
592   * Return: The number of bytes copied from the folio.
593   */
memcpy_from_file_folio(char * to,struct folio * folio,loff_t pos,size_t len)594  static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
595  		loff_t pos, size_t len)
596  {
597  	size_t offset = offset_in_folio(folio, pos);
598  	char *from = kmap_local_folio(folio, offset);
599  
600  	if (folio_test_highmem(folio)) {
601  		offset = offset_in_page(offset);
602  		len = min_t(size_t, len, PAGE_SIZE - offset);
603  	} else
604  		len = min(len, folio_size(folio) - offset);
605  
606  	memcpy(to, from, len);
607  	kunmap_local(from);
608  
609  	return len;
610  }
611  
612  /**
613   * folio_zero_segments() - Zero two byte ranges in a folio.
614   * @folio: The folio to write to.
615   * @start1: The first byte to zero.
616   * @xend1: One more than the last byte in the first range.
617   * @start2: The first byte to zero in the second range.
618   * @xend2: One more than the last byte in the second range.
619   */
folio_zero_segments(struct folio * folio,size_t start1,size_t xend1,size_t start2,size_t xend2)620  static inline void folio_zero_segments(struct folio *folio,
621  		size_t start1, size_t xend1, size_t start2, size_t xend2)
622  {
623  	zero_user_segments(&folio->page, start1, xend1, start2, xend2);
624  }
625  
626  /**
627   * folio_zero_segment() - Zero a byte range in a folio.
628   * @folio: The folio to write to.
629   * @start: The first byte to zero.
630   * @xend: One more than the last byte to zero.
631   */
folio_zero_segment(struct folio * folio,size_t start,size_t xend)632  static inline void folio_zero_segment(struct folio *folio,
633  		size_t start, size_t xend)
634  {
635  	zero_user_segments(&folio->page, start, xend, 0, 0);
636  }
637  
638  /**
639   * folio_zero_range() - Zero a byte range in a folio.
640   * @folio: The folio to write to.
641   * @start: The first byte to zero.
642   * @length: The number of bytes to zero.
643   */
folio_zero_range(struct folio * folio,size_t start,size_t length)644  static inline void folio_zero_range(struct folio *folio,
645  		size_t start, size_t length)
646  {
647  	zero_user_segments(&folio->page, start, start + length, 0, 0);
648  }
649  
650  /**
651   * folio_release_kmap - Unmap a folio and drop a refcount.
652   * @folio: The folio to release.
653   * @addr: The address previously returned by a call to kmap_local_folio().
654   *
655   * It is common, eg in directory handling to kmap a folio.  This function
656   * unmaps the folio and drops the refcount that was being held to keep the
657   * folio alive while we accessed it.
658   */
folio_release_kmap(struct folio * folio,void * addr)659  static inline void folio_release_kmap(struct folio *folio, void *addr)
660  {
661  	kunmap_local(addr);
662  	folio_put(folio);
663  }
664  
unmap_and_put_page(struct page * page,void * addr)665  static inline void unmap_and_put_page(struct page *page, void *addr)
666  {
667  	folio_release_kmap(page_folio(page), addr);
668  }
669  
670  #endif /* _LINUX_HIGHMEM_H */
671