Lines Matching +full:page +full:- +full:offset

1 /* SPDX-License-Identifier: GPL-2.0 */
14 #include "highmem-internal.h"
17 * kmap - Map a page for long term usage
18 * @page: Pointer to the page to be mapped
37 static inline void *kmap(struct page *page);
40 * kunmap - Unmap the virtual address mapped by kmap()
41 * @page: Pointer to the page which was mapped by kmap()
46 static inline void kunmap(struct page *page);
49 * kmap_to_page - Get the page for a kmap'ed address
52 * Returns: The page which is mapped to @addr.
54 static inline struct page *kmap_to_page(void *addr);
57 * kmap_flush_unused - Flush all unused kmap mappings in order to
63 * kmap_local_page - Map a page for temporary usage
64 * @page: Pointer to the page to be mapped
92 * On HIGHMEM enabled systems mapping a highmem page has the side effect of
96 static inline void *kmap_local_page(struct page *page);
99 * kmap_local_folio - Map a page in this folio for temporary usage
100 * @folio: The folio containing the page.
101 * @offset: The byte offset within the folio which identifies the page.
125 * On HIGHMEM enabled systems mapping a highmem page has the side effect of
130 * Return: The virtual address of @offset.
132 static inline void *kmap_local_folio(struct folio *folio, size_t offset);
135 * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
136 * @page: Pointer to the page to be mapped
149 * page that might be allocated from high memory (see __GFP_HIGHMEM), for
150 * example a page in the pagecache. The API has two functions, and they
153 * // Find the page of interest.
154 * struct page *page = find_get_page(mapping, offset);
156 * // Gain access to the contents of that page.
157 * void *vaddr = kmap_atomic(page);
159 * // Do something to the contents of that page.
162 * // Unmap that page.
168 * If you need to map two pages because you want to copy from one page to
179 static inline void *kmap_atomic(struct page *page);
186 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma… in flush_anon_page() argument
202 static inline void clear_user_highpage(struct page *page, unsigned long vaddr) in clear_user_highpage() argument
204 void *addr = kmap_local_page(page); in clear_user_highpage()
205 clear_user_page(addr, vaddr, page); in clear_user_highpage()
212 * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
213 * @vma: The VMA the page is to be allocated for.
214 * @vaddr: The virtual address the page will be inserted into.
216 * This function will allocate a page suitable for inserting into this
220 * Return: A folio containing one allocated and zeroed page or NULL if
231 clear_user_highpage(&folio->page, vaddr); in vma_alloc_zeroed_movable_folio()
237 static inline void clear_highpage(struct page *page) in clear_highpage() argument
239 void *kaddr = kmap_local_page(page); in clear_highpage()
244 static inline void clear_highpage_kasan_tagged(struct page *page) in clear_highpage_kasan_tagged() argument
246 void *kaddr = kmap_local_page(page); in clear_highpage_kasan_tagged()
254 static inline void tag_clear_highpage(struct page *page) in tag_clear_highpage() argument
261 * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
262 * If we pass in a head page, we can zero up to the size of the compound page.
265 void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
268 static inline void zero_user_segments(struct page *page, in zero_user_segments() argument
272 void *kaddr = kmap_local_page(page); in zero_user_segments()
275 BUG_ON(end1 > page_size(page) || end2 > page_size(page)); in zero_user_segments()
278 memset(kaddr + start1, 0, end1 - start1); in zero_user_segments()
281 memset(kaddr + start2, 0, end2 - start2); in zero_user_segments()
284 for (i = 0; i < compound_nr(page); i++) in zero_user_segments()
285 flush_dcache_page(page + i); in zero_user_segments()
289 static inline void zero_user_segment(struct page *page, in zero_user_segment() argument
292 zero_user_segments(page, start, end, 0, 0); in zero_user_segment()
295 static inline void zero_user(struct page *page, in zero_user() argument
298 zero_user_segments(page, start, start + size, 0, 0); in zero_user()
303 static inline void copy_user_highpage(struct page *to, struct page *from, in copy_user_highpage()
320 static inline void copy_highpage(struct page *to, struct page *from) in copy_highpage()
338 * page with #MC in source page (@from) handled, and return the number
341 static inline int copy_mc_user_highpage(struct page *to, struct page *from, in copy_mc_user_highpage()
361 static inline int copy_mc_highpage(struct page *to, struct page *from) in copy_mc_highpage()
380 static inline int copy_mc_user_highpage(struct page *to, struct page *from, in copy_mc_user_highpage()
387 static inline int copy_mc_highpage(struct page *to, struct page *from) in copy_mc_highpage()
394 static inline void memcpy_page(struct page *dst_page, size_t dst_off, in memcpy_page()
395 struct page *src_page, size_t src_off, in memcpy_page()
407 static inline void memset_page(struct page *page, size_t offset, int val, in memset_page() argument
410 char *addr = kmap_local_page(page); in memset_page()
412 VM_BUG_ON(offset + len > PAGE_SIZE); in memset_page()
413 memset(addr + offset, val, len); in memset_page()
417 static inline void memcpy_from_page(char *to, struct page *page, in memcpy_from_page() argument
418 size_t offset, size_t len) in memcpy_from_page() argument
420 char *from = kmap_local_page(page); in memcpy_from_page()
422 VM_BUG_ON(offset + len > PAGE_SIZE); in memcpy_from_page()
423 memcpy(to, from + offset, len); in memcpy_from_page()
427 static inline void memcpy_to_page(struct page *page, size_t offset, in memcpy_to_page() argument
430 char *to = kmap_local_page(page); in memcpy_to_page()
432 VM_BUG_ON(offset + len > PAGE_SIZE); in memcpy_to_page()
433 memcpy(to + offset, from, len); in memcpy_to_page()
434 flush_dcache_page(page); in memcpy_to_page()
438 static inline void memzero_page(struct page *page, size_t offset, size_t len) in memzero_page() argument
440 char *addr = kmap_local_page(page); in memzero_page()
442 VM_BUG_ON(offset + len > PAGE_SIZE); in memzero_page()
443 memset(addr + offset, 0, len); in memzero_page()
444 flush_dcache_page(page); in memzero_page()
449 * memcpy_from_folio - Copy a range of bytes from a folio.
452 * @offset: The first byte in the folio to read.
456 size_t offset, size_t len) in memcpy_from_folio() argument
458 VM_BUG_ON(offset + len > folio_size(folio)); in memcpy_from_folio()
461 const char *from = kmap_local_folio(folio, offset); in memcpy_from_folio()
465 chunk > PAGE_SIZE - offset_in_page(offset)) in memcpy_from_folio()
466 chunk = PAGE_SIZE - offset_in_page(offset); in memcpy_from_folio()
471 offset += chunk; in memcpy_from_folio()
472 len -= chunk; in memcpy_from_folio()
477 * memcpy_to_folio - Copy a range of bytes to a folio.
479 * @offset: The first byte in the folio to store to.
483 static inline void memcpy_to_folio(struct folio *folio, size_t offset, in memcpy_to_folio() argument
486 VM_BUG_ON(offset + len > folio_size(folio)); in memcpy_to_folio()
489 char *to = kmap_local_folio(folio, offset); in memcpy_to_folio()
493 chunk > PAGE_SIZE - offset_in_page(offset)) in memcpy_to_folio()
494 chunk = PAGE_SIZE - offset_in_page(offset); in memcpy_to_folio()
499 offset += chunk; in memcpy_to_folio()
500 len -= chunk; in memcpy_to_folio()
507 * folio_zero_tail - Zero the tail of a folio.
509 * @offset: The byte offset in the folio to start zeroing at.
521 size_t offset, void *kaddr) in folio_zero_tail() argument
523 size_t len = folio_size(folio) - offset; in folio_zero_tail()
526 size_t max = PAGE_SIZE - offset_in_page(offset); in folio_zero_tail()
531 len -= max; in folio_zero_tail()
532 offset += max; in folio_zero_tail()
534 kaddr = kmap_local_folio(folio, offset); in folio_zero_tail()
545 * folio_fill_tail - Copy some data to a folio and pad with zeroes.
547 * @offset: The offset into @folio at which to start copying.
552 * When they want to copy data from the inode into the page cache, this
556 static inline void folio_fill_tail(struct folio *folio, size_t offset, in folio_fill_tail() argument
559 char *to = kmap_local_folio(folio, offset); in folio_fill_tail()
561 VM_BUG_ON(offset + len > folio_size(folio)); in folio_fill_tail()
564 size_t max = PAGE_SIZE - offset_in_page(offset); in folio_fill_tail()
569 len -= max; in folio_fill_tail()
571 offset += max; in folio_fill_tail()
573 to = kmap_local_folio(folio, offset); in folio_fill_tail()
578 to = folio_zero_tail(folio, offset + len, to + len); in folio_fill_tail()
583 * memcpy_from_file_folio - Copy some bytes from a file folio.
597 size_t offset = offset_in_folio(folio, pos); in memcpy_from_file_folio() local
598 char *from = kmap_local_folio(folio, offset); in memcpy_from_file_folio()
601 offset = offset_in_page(offset); in memcpy_from_file_folio()
602 len = min_t(size_t, len, PAGE_SIZE - offset); in memcpy_from_file_folio()
604 len = min(len, folio_size(folio) - offset); in memcpy_from_file_folio()
613 * folio_zero_segments() - Zero two byte ranges in a folio.
623 zero_user_segments(&folio->page, start1, xend1, start2, xend2); in folio_zero_segments()
627 * folio_zero_segment() - Zero a byte range in a folio.
635 zero_user_segments(&folio->page, start, xend, 0, 0); in folio_zero_segment()
639 * folio_zero_range() - Zero a byte range in a folio.
647 zero_user_segments(&folio->page, start, start + length, 0, 0); in folio_zero_range()
651 * folio_release_kmap - Unmap a folio and drop a refcount.
665 static inline void unmap_and_put_page(struct page *page, void *addr) in unmap_and_put_page() argument
667 folio_release_kmap(page_folio(page), addr); in unmap_and_put_page()