Lines Matching +full:page +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0-only
14 #define KVM_PGTABLE_FIRST_LEVEL -1
18 * The largest supported block sizes for KVM (no 52-bit PA support):
19 * - 4K (level 1): 1GB
20 * - 16K (level 2): 32MB
21 * - 64K (level 2): 512MB
60 #define KVM_PHYS_INVALID (-1ULL)
99 * Used to indicate a pte for which a 'break-before-make' sequence is in
176 static inline bool kvm_is_block_size_supported(u64 size) in kvm_is_block_size_supported() argument
178 bool is_power_of_two = IS_ALIGNED(size, size); in kvm_is_block_size_supported()
180 return is_power_of_two && (size & kvm_supported_block_sizes()); in kvm_is_block_size_supported()
184 * struct kvm_pgtable_mm_ops - Memory management callbacks.
185 * @zalloc_page: Allocate a single zeroed memory page.
188 * the page is 1.
190 * The @size parameter is in bytes, and is rounded
191 * up to the next page boundary. The resulting
197 * @get_page: Increment the refcount on a page.
198 * @put_page: Decrement the refcount on a page. When the
199 * refcount reaches 0 the page is automatically
201 * @page_count: Return the refcount of a page.
213 void* (*zalloc_pages_exact)(size_t size);
214 void (*free_pages_exact)(void *addr, size_t size);
221 void (*dcache_clean_inval_poc)(void *addr, size_t size);
222 void (*icache_inval_pou)(void *addr, size_t size);
226 * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
227 * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have
237 * enum kvm_pgtable_prot - Page-table permissions and attributes.
277 * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
284 * @KVM_PGTABLE_WALK_SHARED: Indicates the page-tables may be shared
286 * @KVM_PGTABLE_WALK_HANDLE_FAULT: Indicates the page-table walk was
289 * without Break-before-make's
322 return ctx->flags & KVM_PGTABLE_WALK_SHARED; in kvm_pgtable_walk_shared()
326 * struct kvm_pgtable_walker - Hook into a page-table walk.
329 * @flags: Bitwise-OR of flags to identify the entry types on which to
339 * RCU cannot be used in a non-kernel context such as the hyp. As such, page
357 * non-shared table walkers are allowed in the hypervisor. in kvm_pgtable_walk_begin()
359 if (walker->flags & KVM_PGTABLE_WALK_SHARED) in kvm_pgtable_walk_begin()
360 return -EPERM; in kvm_pgtable_walk_begin()
379 return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED)); in kvm_dereference_pteref()
384 if (walker->flags & KVM_PGTABLE_WALK_SHARED) in kvm_pgtable_walk_begin()
392 if (walker->flags & KVM_PGTABLE_WALK_SHARED) in kvm_pgtable_walk_end()
404 * struct kvm_pgtable - KVM page-table.
405 * @ia_bits: Maximum input address size, in bits.
406 * @start_level: Level at which the page-table walk starts.
407 * @pgd: Pointer to the first top-level entry of the page-table.
409 * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
410 * @flags: Stage-2 page-table flags.
411 * @force_pte_cb: Function that returns true if page level mappings must
420 /* Stage-2 only */
427 * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
428 * @pgt: Uninitialised page-table structure to initialise.
438 * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table.
439 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
441 * The page-table is assumed to be unreachable by any hardware walkers prior
447 * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table.
448 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
450 * @size: Size of the mapping.
454 * The offset of @addr within a page is ignored, @size is rounded-up to
455 * the next page boundary and @phys is rounded-down to the previous page
465 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
469 * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
470 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
472 * @size: Size of the mapping.
474 * The offset of @addr within a page is ignored, @size is rounded-up to
475 * the next page boundary and @phys is rounded-down to the previous page
478 * TLB invalidation is performed for each page-table entry cleared during the
479 * unmapping operation and the reference count for the page-table page
482 * invalid page-table entry or a valid block mapping which maps beyond the range
487 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
490 * kvm_get_vtcr() - Helper to construct VTCR_EL2
506 * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD
509 * Return: the size (in bytes) of the stage-2 PGD
514 * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
515 * @pgt: Uninitialised page-table structure to initialise.
518 * @flags: Stage-2 configuration flags.
519 * @force_pte_cb: Function that returns true if page level mappings must
533 * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
534 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
536 * The page-table is assumed to be unreachable by any hardware walkers prior
542 * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
544 * @pgtable: Unlinked stage-2 paging structure to be freed.
545 * @level: Level of the stage-2 paging structure to be freed.
547 * The page-table is assumed to be unreachable by any hardware walkers prior to
553 * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure.
554 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
556 * @level: Starting level of the stage-2 paging structure to be created.
558 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
559 * page-table pages.
562 * Returns an unlinked page-table tree. This new page-table tree is
564 * therefore unreachableby the hardware page-table walker. No TLB
570 * Return: The fully populated (unlinked) stage-2 paging structure, or
579 * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
580 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
582 * @size: Size of the mapping.
585 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
586 * page-table pages.
587 * @flags: Flags to control the page-table walk (ex. a shared walk)
589 * The offset of @addr within a page is ignored, @size is rounded-up to
590 * the next page boundary and @phys is rounded-down to the previous page
602 * existing block mappings, relying on page-faults to fault back areas outside
607 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
612 * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
614 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
616 * @size: Size of the annotated range.
617 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
618 * page-table pages.
619 * @owner_id: Unique identifier for the owner of the page.
621 * By default, all page-tables are owned by identifier 0. This function can be
623 * stage 2 is used with identity-mappings, these annotations allow to use the
624 * page-table data structure as a simple rmap.
628 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
632 * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
633 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
635 * @size: Size of the mapping.
637 * The offset of @addr within a page is ignored and @size is rounded-up to
638 * the next page boundary.
640 * TLB invalidation is performed for each page-table entry cleared during the
641 * unmapping operation and the reference count for the page-table page
643 * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if
648 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
651 * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
653 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
654 * @addr: Intermediate physical address from which to write-protect,
655 * @size: Size of the range.
657 * The offset of @addr within a page is ignored and @size is rounded-up to
658 * the next page boundary.
666 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
669 * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
670 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
671 * @addr: Intermediate physical address to identify the page-table entry.
673 * The offset of @addr within a page is ignored.
675 * If there is a valid, leaf page-table entry used to translate @addr, then
678 * Return: The old page-table entry prior to setting the flag, 0 on failure.
683 * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
684 * flag in a page-table entry.
685 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
686 * @addr: Intermediate physical address to identify the page-table entry.
687 * @size: Size of the address range to visit.
690 * The offset of @addr within a page is ignored.
693 * page-table entry used to translate the range [@addr, @addr + @size).
702 u64 size, bool mkold);
705 * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
706 * page-table entry.
707 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
708 * @addr: Intermediate physical address to identify the page-table entry.
711 * The offset of @addr within a page is ignored.
713 * If there is a valid, leaf page-table entry used to translate @addr, then
725 * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
726 * of Coherency for guest stage-2 address
728 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
730 * @size: Size of the range.
732 * The offset of @addr within a page is ignored and @size is rounded-up to
733 * the next page boundary.
737 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
740 * kvm_pgtable_stage2_split() - Split a range of huge pages into leaf PTEs pointing
742 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
744 * @size: Size of the range.
745 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
746 * page-table pages.
749 * with the input range (given by @addr and @size).
755 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
759 * kvm_pgtable_walk() - Walk a page-table.
760 * @pgt: Page-table structure initialised by kvm_pgtable_*_init().
762 * @size: Size of the range to walk.
765 * The offset of @addr within a page is ignored and @size is rounded-up to
766 * the next page boundary.
768 * The walker will walk the page-table entries corresponding to the input
770 * Invalid entries are treated as leaf entries. The visited page table entry is
779 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
783 * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry
785 * @pgt: Page-table structure initialised by kvm_pgtable_*_init()
791 * The offset of @addr within a page is ignored.
793 * The walker will walk the page-table entries corresponding to the input
803 * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
804 * stage-2 Page-Table Entry.
805 * @pte: Page-table entry
807 * Return: protection attributes of the page-table entry in the enum
813 * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1
814 * Page-Table Entry.
815 * @pte: Page-table entry
817 * Return: protection attributes of the page-table entry in the enum
823 * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries
825 * @mmu: Stage-2 KVM MMU struct
827 * @size: Size of the range from the base to invalidate
830 phys_addr_t addr, size_t size);