Lines Matching +full:guest +full:- +full:index +full:- +full:bits

1 // SPDX-License-Identifier: GPL-2.0
3 * KVM guest address space mapping code
21 #include <asm/page-states.h>
41 * gmap_alloc - allocate and initialize a guest address space
44 * Returns a guest address space structure.
54 limit = _REGION3_SIZE - 1; in gmap_alloc()
58 limit = _REGION2_SIZE - 1; in gmap_alloc()
62 limit = _REGION1_SIZE - 1; in gmap_alloc()
66 limit = -1UL; in gmap_alloc()
73 INIT_LIST_HEAD(&gmap->crst_list); in gmap_alloc()
74 INIT_LIST_HEAD(&gmap->children); in gmap_alloc()
75 INIT_LIST_HEAD(&gmap->pt_list); in gmap_alloc()
76 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL_ACCOUNT); in gmap_alloc()
77 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC | __GFP_ACCOUNT); in gmap_alloc()
78 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC | __GFP_ACCOUNT); in gmap_alloc()
79 spin_lock_init(&gmap->guest_table_lock); in gmap_alloc()
80 spin_lock_init(&gmap->shadow_lock); in gmap_alloc()
81 refcount_set(&gmap->ref_count, 1); in gmap_alloc()
85 page->index = 0; in gmap_alloc()
86 list_add(&page->lru, &gmap->crst_list); in gmap_alloc()
89 gmap->table = table; in gmap_alloc()
90 gmap->asce = atype | _ASCE_TABLE_LENGTH | in gmap_alloc()
92 gmap->asce_end = limit; in gmap_alloc()
102 * gmap_create - create a guest address space
106 * Returns a guest address space structure.
116 gmap->mm = mm; in gmap_create()
117 spin_lock(&mm->context.lock); in gmap_create()
118 list_add_rcu(&gmap->list, &mm->context.gmap_list); in gmap_create()
119 if (list_is_singular(&mm->context.gmap_list)) in gmap_create()
120 gmap_asce = gmap->asce; in gmap_create()
122 gmap_asce = -1UL; in gmap_create()
123 WRITE_ONCE(mm->context.gmap_asce, gmap_asce); in gmap_create()
124 spin_unlock(&mm->context.lock); in gmap_create()
132 __tlb_flush_idte(gmap->asce); in gmap_flush_tlb()
141 unsigned long index; in gmap_radix_tree_free() local
146 index = 0; in gmap_radix_tree_free()
149 radix_tree_for_each_slot(slot, root, &iter, index) { in gmap_radix_tree_free()
150 indices[nr] = iter.index; in gmap_radix_tree_free()
155 index = indices[i]; in gmap_radix_tree_free()
156 radix_tree_delete(root, index); in gmap_radix_tree_free()
166 unsigned long index; in gmap_rmap_radix_tree_free() local
171 index = 0; in gmap_rmap_radix_tree_free()
174 radix_tree_for_each_slot(slot, root, &iter, index) { in gmap_rmap_radix_tree_free()
175 indices[nr] = iter.index; in gmap_rmap_radix_tree_free()
180 index = indices[i]; in gmap_rmap_radix_tree_free()
181 head = radix_tree_delete(root, index); in gmap_rmap_radix_tree_free()
189 * gmap_free - free a guest address space
190 * @gmap: pointer to the guest address space structure
199 if (!(gmap_is_shadow(gmap) && gmap->removed)) in gmap_free()
202 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) in gmap_free()
204 gmap_radix_tree_free(&gmap->guest_to_host); in gmap_free()
205 gmap_radix_tree_free(&gmap->host_to_guest); in gmap_free()
212 list_for_each_entry_safe(ptdesc, n, &gmap->pt_list, pt_list) in gmap_free()
214 gmap_rmap_radix_tree_free(&gmap->host_to_rmap); in gmap_free()
216 gmap_put(gmap->parent); in gmap_free()
223 * gmap_get - increase reference counter for guest address space
224 * @gmap: pointer to the guest address space structure
230 refcount_inc(&gmap->ref_count); in gmap_get()
236 * gmap_put - decrease reference counter for guest address space
237 * @gmap: pointer to the guest address space structure
239 * If the reference counter reaches zero the guest address space is freed.
243 if (refcount_dec_and_test(&gmap->ref_count)) in gmap_put()
249 * gmap_remove - remove a guest address space but do not free it yet
250 * @gmap: pointer to the guest address space structure
258 if (!list_empty(&gmap->children)) { in gmap_remove()
259 spin_lock(&gmap->shadow_lock); in gmap_remove()
260 list_for_each_entry_safe(sg, next, &gmap->children, list) { in gmap_remove()
261 list_del(&sg->list); in gmap_remove()
264 spin_unlock(&gmap->shadow_lock); in gmap_remove()
266 /* Remove gmap from the pre-mm list */ in gmap_remove()
267 spin_lock(&gmap->mm->context.lock); in gmap_remove()
268 list_del_rcu(&gmap->list); in gmap_remove()
269 if (list_empty(&gmap->mm->context.gmap_list)) in gmap_remove()
271 else if (list_is_singular(&gmap->mm->context.gmap_list)) in gmap_remove()
272 gmap_asce = list_first_entry(&gmap->mm->context.gmap_list, in gmap_remove()
273 struct gmap, list)->asce; in gmap_remove()
275 gmap_asce = -1UL; in gmap_remove()
276 WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce); in gmap_remove()
277 spin_unlock(&gmap->mm->context.lock); in gmap_remove()
285 * gmap_enable - switch primary space to the guest address space
286 * @gmap: pointer to the guest address space structure
290 get_lowcore()->gmap = (unsigned long)gmap; in gmap_enable()
295 * gmap_disable - switch back to the standard primary address space
296 * @gmap: pointer to the guest address space structure
300 get_lowcore()->gmap = 0UL; in gmap_disable()
305 * gmap_get_enabled - get a pointer to the currently enabled gmap
311 return (struct gmap *)get_lowcore()->gmap; in gmap_get_enabled()
327 return -ENOMEM; in gmap_alloc_table()
330 spin_lock(&gmap->guest_table_lock); in gmap_alloc_table()
332 list_add(&page->lru, &gmap->crst_list); in gmap_alloc_table()
335 page->index = gaddr; in gmap_alloc_table()
338 spin_unlock(&gmap->guest_table_lock); in gmap_alloc_table()
345 * __gmap_segment_gaddr - find virtual address from segment pointer
346 * @entry: pointer to a segment table entry in the guest address space
348 * Returns the virtual address in the guest address space for the segment
356 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE; in __gmap_segment_gaddr()
358 return page->index + offset; in __gmap_segment_gaddr()
362 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
363 * @gmap: pointer to the guest address space structure
374 spin_lock(&gmap->guest_table_lock); in __gmap_unlink_by_vmaddr()
375 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT); in __gmap_unlink_by_vmaddr()
380 spin_unlock(&gmap->guest_table_lock); in __gmap_unlink_by_vmaddr()
385 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
386 * @gmap: pointer to the guest address space structure
387 * @gaddr: address in the guest address space
395 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host, in __gmap_unmap_by_gaddr()
401 * gmap_unmap_segment - unmap segment from the guest address space
402 * @gmap: pointer to the guest address space structure
403 * @to: address in the guest address space
406 * Returns 0 if the unmap succeeded, -EINVAL if not.
414 if ((to | len) & (PMD_SIZE - 1)) in gmap_unmap_segment()
415 return -EINVAL; in gmap_unmap_segment()
417 return -EINVAL; in gmap_unmap_segment()
420 mmap_write_lock(gmap->mm); in gmap_unmap_segment()
423 mmap_write_unlock(gmap->mm); in gmap_unmap_segment()
431 * gmap_map_segment - map a segment to the guest address space
432 * @gmap: pointer to the guest address space structure
434 * @to: target address in the guest address space
437 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
446 if ((from | to | len) & (PMD_SIZE - 1)) in gmap_map_segment()
447 return -EINVAL; in gmap_map_segment()
449 from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end) in gmap_map_segment()
450 return -EINVAL; in gmap_map_segment()
453 mmap_write_lock(gmap->mm); in gmap_map_segment()
458 if (radix_tree_insert(&gmap->guest_to_host, in gmap_map_segment()
463 mmap_write_unlock(gmap->mm); in gmap_map_segment()
469 return -ENOMEM; in gmap_map_segment()
474 * __gmap_translate - translate a guest address to a user space address
475 * @gmap: pointer to guest mapping meta data structure
476 * @gaddr: guest address
478 * Returns user space address which corresponds to the guest address or
479 * -EFAULT if no such mapping exists.
491 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT); in __gmap_translate()
493 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT; in __gmap_translate()
498 * gmap_translate - translate a guest address to a user space address
499 * @gmap: pointer to guest mapping meta data structure
500 * @gaddr: guest address
502 * Returns user space address which corresponds to the guest address or
503 * -EFAULT if no such mapping exists.
510 mmap_read_lock(gmap->mm); in gmap_translate()
512 mmap_read_unlock(gmap->mm); in gmap_translate()
518 * gmap_unlink - disconnect a page table from the gmap shadow tables
530 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) { in gmap_unlink()
542 * __gmap_link - set up shadow page tables to connect a host to a guest address
543 * @gmap: pointer to guest mapping meta data structure
544 * @gaddr: guest address
547 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
548 * if the vm address is already mapped to a different guest segment.
566 table = gmap->table; in __gmap_link()
567 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) { in __gmap_link()
572 return -ENOMEM; in __gmap_link()
575 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) { in __gmap_link()
580 return -ENOMEM; in __gmap_link()
583 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) { in __gmap_link()
588 return -ENOMEM; in __gmap_link()
593 mm = gmap->mm; in __gmap_link()
602 return -EFAULT; in __gmap_link()
606 if (pmd_leaf(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m) in __gmap_link()
607 return -EFAULT; in __gmap_link()
613 spin_lock(&gmap->guest_table_lock); in __gmap_link()
615 rc = radix_tree_insert(&gmap->host_to_guest, in __gmap_link()
633 spin_unlock(&gmap->guest_table_lock); in __gmap_link()
640 * gmap_fault - resolve a fault on a guest address
641 * @gmap: pointer to guest mapping meta data structure
642 * @gaddr: guest address
645 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
646 * if the vm address is already mapped to a different guest segment.
655 mmap_read_lock(gmap->mm); in gmap_fault()
664 if (fixup_user_fault(gmap->mm, vmaddr, fault_flags, in gmap_fault()
666 rc = -EFAULT; in gmap_fault()
678 mmap_read_unlock(gmap->mm); in gmap_fault()
693 /* Find the vm address for the guest address */ in __gmap_zap()
694 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host, in __gmap_zap()
699 vma = vma_lookup(gmap->mm, vmaddr); in __gmap_zap()
704 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); in __gmap_zap()
706 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0); in __gmap_zap()
718 mmap_read_lock(gmap->mm); in gmap_discard()
721 /* Find the vm address for the guest address */ in gmap_discard()
723 radix_tree_lookup(&gmap->guest_to_host, in gmap_discard()
729 vma = find_vma(gmap->mm, vmaddr); in gmap_discard()
738 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); in gmap_discard()
741 mmap_read_unlock(gmap->mm); in gmap_discard()
749 * gmap_register_pte_notifier - register a pte invalidation callback
755 list_add_rcu(&nb->list, &gmap_notifier_list); in gmap_register_pte_notifier()
761 * gmap_unregister_pte_notifier - remove a pte invalidation callback
767 list_del_rcu(&nb->list); in gmap_unregister_pte_notifier()
774 * gmap_call_notifier - call all registered invalidation callbacks
775 * @gmap: pointer to guest mapping meta data structure
776 * @start: start virtual address in the guest address space
777 * @end: end virtual address in the guest address space
785 nb->notifier_call(gmap, start, end); in gmap_call_notifier()
789 * gmap_table_walk - walk the gmap page tables
790 * @gmap: pointer to guest mapping meta data structure
791 * @gaddr: virtual address in the guest address space
794 * Returns a table entry pointer for the given guest address and @level
797 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
798 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
799 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
809 const int asce_type = gmap->asce & _ASCE_TYPE_MASK; in gmap_table_walk()
810 unsigned long *table = gmap->table; in gmap_table_walk()
812 if (gmap_is_shadow(gmap) && gmap->removed) in gmap_table_walk()
819 gaddr & (-1UL << (31 + (asce_type >> 2) * 11))) in gmap_table_walk()
860 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
862 * @gmap: pointer to guest mapping meta data structure
863 * @gaddr: virtual address in the guest address space
866 * Returns a pointer to the locked pte for a guest address, or NULL
878 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl); in gmap_pte_op_walk()
882 * gmap_pte_op_fixup - force a page in and connect the gmap page table
883 * @gmap: pointer to guest mapping meta data structure
884 * @gaddr: virtual address in the guest address space
889 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
895 struct mm_struct *mm = gmap->mm; in gmap_pte_op_fixup()
902 return -EFAULT; in gmap_pte_op_fixup()
911 * gmap_pte_op_end - release the page table lock
921 * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
923 * @gmap: pointer to guest mapping meta data structure
924 * @gaddr: virtual address in the guest address space
926 * Returns a pointer to the pmd for a guest address, or NULL
938 if (!gmap->mm->context.allow_gmap_hpage_1m) in gmap_pmd_op_walk()
941 spin_lock(&gmap->guest_table_lock); in gmap_pmd_op_walk()
943 spin_unlock(&gmap->guest_table_lock); in gmap_pmd_op_walk()
949 spin_unlock(&gmap->guest_table_lock); in gmap_pmd_op_walk()
954 * gmap_pmd_op_end - release the guest_table_lock if needed
955 * @gmap: pointer to the guest mapping meta data structure
961 spin_unlock(&gmap->guest_table_lock); in gmap_pmd_op_end()
965 * gmap_protect_pmd - remove access rights to memory and set pmd notification bits
968 * @bits: notification bits to set
972 * -EAGAIN if a fixup is needed
973 * -EINVAL if unsupported notifier bits have been specified
975 * Expected to be called with sg->mm->mmap_lock in read and
979 pmd_t *pmdp, int prot, unsigned long bits) in gmap_protect_pmd() argument
987 return -EAGAIN; in gmap_protect_pmd()
1000 if (bits & GMAP_NOTIFY_MPROT) in gmap_protect_pmd()
1004 if (bits & GMAP_NOTIFY_SHADOW) in gmap_protect_pmd()
1005 return -EINVAL; in gmap_protect_pmd()
1011 * gmap_protect_pte - remove access rights to memory and set pgste bits
1012 * @gmap: pointer to guest mapping meta data structure
1013 * @gaddr: virtual address in the guest address space
1016 * @bits: notification bits to set
1018 * Returns 0 if successfully protected, -ENOMEM if out of memory and
1019 * -EAGAIN if a fixup is needed.
1021 * Expected to be called with sg->mm->mmap_lock in read
1024 pmd_t *pmdp, int prot, unsigned long bits) in gmap_protect_pte() argument
1032 return -EAGAIN; in gmap_protect_pte()
1034 ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl); in gmap_protect_pte()
1036 return -ENOMEM; in gmap_protect_pte()
1038 pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0; in gmap_protect_pte()
1039 pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0; in gmap_protect_pte()
1041 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits); in gmap_protect_pte()
1047 * gmap_protect_range - remove access rights to memory and set pgste bits
1048 * @gmap: pointer to guest mapping meta data structure
1049 * @gaddr: virtual address in the guest address space
1052 * @bits: pgste notification bits to set
1054 * Returns 0 if successfully protected, -ENOMEM if out of memory and
1055 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
1057 * Called with sg->mm->mmap_lock in read.
1060 unsigned long len, int prot, unsigned long bits) in gmap_protect_range() argument
1068 rc = -EAGAIN; in gmap_protect_range()
1073 bits); in gmap_protect_range()
1075 len -= PAGE_SIZE; in gmap_protect_range()
1080 bits); in gmap_protect_range()
1082 dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK); in gmap_protect_range()
1083 len = len < dist ? 0 : len - dist; in gmap_protect_range()
1090 if (rc == -EINVAL) in gmap_protect_range()
1093 /* -EAGAIN, fixup of userspace mm and gmap */ in gmap_protect_range()
1106 * gmap_mprotect_notify - change access rights for a range of ptes and
1108 * @gmap: pointer to guest mapping meta data structure
1109 * @gaddr: virtual address in the guest address space
1115 * If the gmap mapping is missing for one or more pages -EFAULT is
1116 * returned. If no memory could be allocated -ENOMEM is returned.
1125 return -EINVAL; in gmap_mprotect_notify()
1127 return -EINVAL; in gmap_mprotect_notify()
1128 mmap_read_lock(gmap->mm); in gmap_mprotect_notify()
1130 mmap_read_unlock(gmap->mm); in gmap_mprotect_notify()
1136 * gmap_read_table - get an unsigned long value from a guest page table using
1138 * @gmap: pointer to guest mapping meta data structure
1139 * @gaddr: virtual address in the guest address space
1142 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
1143 * if reading using the virtual address failed. -EINVAL if called on a gmap
1146 * Called with gmap->mm->mmap_lock in read.
1156 return -EINVAL; in gmap_read_table()
1159 rc = -EAGAIN; in gmap_read_table()
1189 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
1190 * @sg: pointer to the shadow guest address space structure
1194 * Called with the sg->guest_table_lock
1203 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT); in gmap_insert_rmap()
1205 rmap->next = radix_tree_deref_slot_protected(slot, in gmap_insert_rmap()
1206 &sg->guest_table_lock); in gmap_insert_rmap()
1207 for (temp = rmap->next; temp; temp = temp->next) { in gmap_insert_rmap()
1208 if (temp->raddr == rmap->raddr) { in gmap_insert_rmap()
1213 radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap); in gmap_insert_rmap()
1215 rmap->next = NULL; in gmap_insert_rmap()
1216 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT, in gmap_insert_rmap()
1222 * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap
1223 * @sg: pointer to the shadow guest address space structure
1225 * @paddr: address in the parent guest address space
1228 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1229 * if out of memory and -EFAULT if paddr is invalid.
1242 parent = sg->parent; in gmap_protect_rmap()
1249 return -ENOMEM; in gmap_protect_rmap()
1250 rmap->raddr = raddr; in gmap_protect_rmap()
1256 rc = -EAGAIN; in gmap_protect_rmap()
1259 spin_lock(&sg->guest_table_lock); in gmap_protect_rmap()
1260 rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ, in gmap_protect_rmap()
1264 spin_unlock(&sg->guest_table_lock); in gmap_protect_rmap()
1276 len -= PAGE_SIZE; in gmap_protect_rmap()
1289 * gmap_idte_one - invalidate a single region or segment table entry
1290 * @asce: region or segment table *origin* + table-type bits
1295 * The table-type of the @asce identifies the portion of the @vaddr
1296 * that is used as the invalidation index.
1306 * gmap_unshadow_page - remove a page from a shadow page table
1307 * @sg: pointer to the shadow guest address space structure
1308 * @raddr: rmap address in the shadow guest address space
1310 * Called with the sg->guest_table_lock
1320 gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1); in gmap_unshadow_page()
1321 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table); in gmap_unshadow_page()
1325 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1326 * @sg: pointer to the shadow guest address space structure
1327 * @raddr: rmap address in the shadow guest address space
1330 * Called with the sg->guest_table_lock
1343 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1344 * @sg: pointer to the shadow guest address space structure
1345 * @raddr: address in the shadow guest address space
1347 * Called with the sg->guest_table_lock
1359 gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1); in gmap_unshadow_pgt()
1360 sto = __pa(ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT)); in gmap_unshadow_pgt()
1367 list_del(&ptdesc->pt_list); in gmap_unshadow_pgt()
1372 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1373 * @sg: pointer to the shadow guest address space structure
1374 * @raddr: rmap address in the shadow guest address space
1377 * Called with the sg->guest_table_lock
1395 list_del(&ptdesc->pt_list); in __gmap_unshadow_sgt()
1401 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1402 * @sg: pointer to the shadow guest address space structure
1403 * @raddr: rmap address in the shadow guest address space
1405 * Called with the shadow->guest_table_lock
1414 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */ in gmap_unshadow_sgt()
1417 gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1); in gmap_unshadow_sgt()
1418 r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT)); in gmap_unshadow_sgt()
1425 list_del(&page->lru); in gmap_unshadow_sgt()
1430 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1431 * @sg: pointer to the shadow guest address space structure
1432 * @raddr: address in the shadow guest address space
1433 * @r3t: pointer to the start of a shadow region-3 table
1435 * Called with the sg->guest_table_lock
1453 list_del(&page->lru); in __gmap_unshadow_r3t()
1459 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1460 * @sg: pointer to the shadow guest address space structure
1461 * @raddr: rmap address in the shadow guest address space
1463 * Called with the sg->guest_table_lock
1472 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */ in gmap_unshadow_r3t()
1475 gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1); in gmap_unshadow_r3t()
1476 r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT)); in gmap_unshadow_r3t()
1483 list_del(&page->lru); in gmap_unshadow_r3t()
1488 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1489 * @sg: pointer to the shadow guest address space structure
1490 * @raddr: rmap address in the shadow guest address space
1491 * @r2t: pointer to the start of a shadow region-2 table
1493 * Called with the sg->guest_table_lock
1511 list_del(&page->lru); in __gmap_unshadow_r2t()
1517 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1518 * @sg: pointer to the shadow guest address space structure
1519 * @raddr: rmap address in the shadow guest address space
1521 * Called with the sg->guest_table_lock
1530 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */ in gmap_unshadow_r2t()
1533 gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1); in gmap_unshadow_r2t()
1534 r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT)); in gmap_unshadow_r2t()
1541 list_del(&page->lru); in gmap_unshadow_r2t()
1546 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1547 * @sg: pointer to the shadow guest address space structure
1548 * @raddr: rmap address in the shadow guest address space
1549 * @r1t: pointer to the start of a shadow region-1 table
1551 * Called with the shadow->guest_table_lock
1568 /* Clear entry and flush translation r1t -> r2t */ in __gmap_unshadow_r1t()
1573 list_del(&page->lru); in __gmap_unshadow_r1t()
1579 * gmap_unshadow - remove a shadow page table completely
1580 * @sg: pointer to the shadow guest address space structure
1582 * Called with sg->guest_table_lock
1589 if (sg->removed) in gmap_unshadow()
1591 sg->removed = 1; in gmap_unshadow()
1592 gmap_call_notifier(sg, 0, -1UL); in gmap_unshadow()
1594 table = __va(sg->asce & _ASCE_ORIGIN); in gmap_unshadow()
1595 switch (sg->asce & _ASCE_TYPE_MASK) { in gmap_unshadow()
1612 * gmap_find_shadow - find a specific asce in the list of shadow tables
1618 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1626 list_for_each_entry(sg, &parent->children, list) { in gmap_find_shadow()
1627 if (sg->orig_asce != asce || sg->edat_level != edat_level || in gmap_find_shadow()
1628 sg->removed) in gmap_find_shadow()
1630 if (!sg->initialized) in gmap_find_shadow()
1631 return ERR_PTR(-EAGAIN); in gmap_find_shadow()
1632 refcount_inc(&sg->ref_count); in gmap_find_shadow()
1639 * gmap_shadow_valid - check if a shadow guest address space matches the
1641 * @sg: pointer to the shadow guest address space structure
1652 if (sg->removed) in gmap_shadow_valid()
1654 return sg->orig_asce == asce && sg->edat_level == edat_level; in gmap_shadow_valid()
1659 * gmap_shadow - create/find a shadow guest address space
1665 * will be set to read-only and marked in the PGSTEs of the kvm process.
1669 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1670 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1680 BUG_ON(parent->mm->context.allow_gmap_hpage_1m); in gmap_shadow()
1682 spin_lock(&parent->shadow_lock); in gmap_shadow()
1684 spin_unlock(&parent->shadow_lock); in gmap_shadow()
1688 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11)); in gmap_shadow()
1690 limit = -1UL; in gmap_shadow()
1693 return ERR_PTR(-ENOMEM); in gmap_shadow()
1694 new->mm = parent->mm; in gmap_shadow()
1695 new->parent = gmap_get(parent); in gmap_shadow()
1696 new->private = parent->private; in gmap_shadow()
1697 new->orig_asce = asce; in gmap_shadow()
1698 new->edat_level = edat_level; in gmap_shadow()
1699 new->initialized = false; in gmap_shadow()
1700 spin_lock(&parent->shadow_lock); in gmap_shadow()
1704 spin_unlock(&parent->shadow_lock); in gmap_shadow()
1709 /* only allow one real-space gmap shadow */ in gmap_shadow()
1710 list_for_each_entry(sg, &parent->children, list) { in gmap_shadow()
1711 if (sg->orig_asce & _ASCE_REAL_SPACE) { in gmap_shadow()
1712 spin_lock(&sg->guest_table_lock); in gmap_shadow()
1714 spin_unlock(&sg->guest_table_lock); in gmap_shadow()
1715 list_del(&sg->list); in gmap_shadow()
1721 refcount_set(&new->ref_count, 2); in gmap_shadow()
1722 list_add(&new->list, &parent->children); in gmap_shadow()
1725 new->initialized = true; in gmap_shadow()
1726 spin_unlock(&parent->shadow_lock); in gmap_shadow()
1729 spin_unlock(&parent->shadow_lock); in gmap_shadow()
1731 mmap_read_lock(parent->mm); in gmap_shadow()
1735 mmap_read_unlock(parent->mm); in gmap_shadow()
1736 spin_lock(&parent->shadow_lock); in gmap_shadow()
1737 new->initialized = true; in gmap_shadow()
1739 list_del(&new->list); in gmap_shadow()
1743 spin_unlock(&parent->shadow_lock); in gmap_shadow()
1749 * gmap_shadow_r2t - create an empty shadow region 2 table
1750 * @sg: pointer to the shadow guest address space structure
1753 * @fake: r2t references contiguous guest memory block, not a r2t
1756 * four pages of the source table are made read-only in the parent gmap
1760 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1761 * shadow table structure is incomplete, -ENOMEM if out of memory and
1762 * -EFAULT if an address in the parent gmap could not be resolved.
1764 * Called with sg->mm->mmap_lock in read.
1779 return -ENOMEM; in gmap_shadow_r2t()
1780 page->index = r2t & _REGION_ENTRY_ORIGIN; in gmap_shadow_r2t()
1782 page->index |= GMAP_SHADOW_FAKE_TABLE; in gmap_shadow_r2t()
1785 spin_lock(&sg->guest_table_lock); in gmap_shadow_r2t()
1786 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */ in gmap_shadow_r2t()
1788 rc = -EAGAIN; /* Race with unshadow */ in gmap_shadow_r2t()
1795 rc = -EAGAIN; /* Race with shadow */ in gmap_shadow_r2t()
1802 if (sg->edat_level >= 1) in gmap_shadow_r2t()
1804 list_add(&page->lru, &sg->crst_list); in gmap_shadow_r2t()
1808 spin_unlock(&sg->guest_table_lock); in gmap_shadow_r2t()
1811 spin_unlock(&sg->guest_table_lock); in gmap_shadow_r2t()
1812 /* Make r2t read-only in parent gmap page table */ in gmap_shadow_r2t()
1816 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset; in gmap_shadow_r2t()
1818 spin_lock(&sg->guest_table_lock); in gmap_shadow_r2t()
1822 rc = -EAGAIN; /* Race with unshadow */ in gmap_shadow_r2t()
1828 spin_unlock(&sg->guest_table_lock); in gmap_shadow_r2t()
1831 spin_unlock(&sg->guest_table_lock); in gmap_shadow_r2t()
1838 * gmap_shadow_r3t - create a shadow region 3 table
1839 * @sg: pointer to the shadow guest address space structure
1842 * @fake: r3t references contiguous guest memory block, not a r3t
1844 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1845 * shadow table structure is incomplete, -ENOMEM if out of memory and
1846 * -EFAULT if an address in the parent gmap could not be resolved.
1848 * Called with sg->mm->mmap_lock in read.
1863 return -ENOMEM; in gmap_shadow_r3t()
1864 page->index = r3t & _REGION_ENTRY_ORIGIN; in gmap_shadow_r3t()
1866 page->index |= GMAP_SHADOW_FAKE_TABLE; in gmap_shadow_r3t()
1869 spin_lock(&sg->guest_table_lock); in gmap_shadow_r3t()
1870 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */ in gmap_shadow_r3t()
1872 rc = -EAGAIN; /* Race with unshadow */ in gmap_shadow_r3t()
1879 rc = -EAGAIN; /* Race with shadow */ in gmap_shadow_r3t()
1886 if (sg->edat_level >= 1) in gmap_shadow_r3t()
1888 list_add(&page->lru, &sg->crst_list); in gmap_shadow_r3t()
1892 spin_unlock(&sg->guest_table_lock); in gmap_shadow_r3t()
1895 spin_unlock(&sg->guest_table_lock); in gmap_shadow_r3t()
1896 /* Make r3t read-only in parent gmap page table */ in gmap_shadow_r3t()
1900 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset; in gmap_shadow_r3t()
1902 spin_lock(&sg->guest_table_lock); in gmap_shadow_r3t()
1906 rc = -EAGAIN; /* Race with unshadow */ in gmap_shadow_r3t()
1912 spin_unlock(&sg->guest_table_lock); in gmap_shadow_r3t()
1915 spin_unlock(&sg->guest_table_lock); in gmap_shadow_r3t()
1922 * gmap_shadow_sgt - create a shadow segment table
1923 * @sg: pointer to the shadow guest address space structure
1926 * @fake: sgt references contiguous guest memory block, not a sgt
1928 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1929 * shadow table structure is incomplete, -ENOMEM if out of memory and
1930 * -EFAULT if an address in the parent gmap could not be resolved.
1932 * Called with sg->mm->mmap_lock in read.
1947 return -ENOMEM; in gmap_shadow_sgt()
1948 page->index = sgt & _REGION_ENTRY_ORIGIN; in gmap_shadow_sgt()
1950 page->index |= GMAP_SHADOW_FAKE_TABLE; in gmap_shadow_sgt()
1953 spin_lock(&sg->guest_table_lock); in gmap_shadow_sgt()
1954 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */ in gmap_shadow_sgt()
1956 rc = -EAGAIN; /* Race with unshadow */ in gmap_shadow_sgt()
1963 rc = -EAGAIN; /* Race with shadow */ in gmap_shadow_sgt()
1970 if (sg->edat_level >= 1) in gmap_shadow_sgt()
1972 list_add(&page->lru, &sg->crst_list); in gmap_shadow_sgt()
1976 spin_unlock(&sg->guest_table_lock); in gmap_shadow_sgt()
1979 spin_unlock(&sg->guest_table_lock); in gmap_shadow_sgt()
1980 /* Make sgt read-only in parent gmap page table */ in gmap_shadow_sgt()
1984 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset; in gmap_shadow_sgt()
1986 spin_lock(&sg->guest_table_lock); in gmap_shadow_sgt()
1990 rc = -EAGAIN; /* Race with unshadow */ in gmap_shadow_sgt()
1996 spin_unlock(&sg->guest_table_lock); in gmap_shadow_sgt()
1999 spin_unlock(&sg->guest_table_lock); in gmap_shadow_sgt()
2006 * gmap_shadow_pgt_lookup - find a shadow page table
2007 * @sg: pointer to the shadow guest address space structure
2011 * @fake: pgt references contiguous guest memory block, not a pgtable
2013 * Returns 0 if the shadow page table was found and -EAGAIN if the page
2016 * Called with sg->mm->mmap_lock in read.
2027 spin_lock(&sg->guest_table_lock); in gmap_shadow_pgt_lookup()
2032 *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE; in gmap_shadow_pgt_lookup()
2034 *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE); in gmap_shadow_pgt_lookup()
2037 rc = -EAGAIN; in gmap_shadow_pgt_lookup()
2039 spin_unlock(&sg->guest_table_lock); in gmap_shadow_pgt_lookup()
2046 * gmap_shadow_pgt - instantiate a shadow page table
2047 * @sg: pointer to the shadow guest address space structure
2050 * @fake: pgt references contiguous guest memory block, not a pgtable
2052 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
2053 * shadow table structure is incomplete, -ENOMEM if out of memory,
2054 * -EFAULT if an address in the parent gmap could not be resolved and
2056 * Called with gmap->mm->mmap_lock in read
2069 ptdesc = page_table_alloc_pgste(sg->mm); in gmap_shadow_pgt()
2071 return -ENOMEM; in gmap_shadow_pgt()
2072 ptdesc->pt_index = pgt & _SEGMENT_ENTRY_ORIGIN; in gmap_shadow_pgt()
2074 ptdesc->pt_index |= GMAP_SHADOW_FAKE_TABLE; in gmap_shadow_pgt()
2077 spin_lock(&sg->guest_table_lock); in gmap_shadow_pgt()
2080 rc = -EAGAIN; /* Race with unshadow */ in gmap_shadow_pgt()
2087 rc = -EAGAIN; /* Race with shadow */ in gmap_shadow_pgt()
2093 list_add(&ptdesc->pt_list, &sg->pt_list); in gmap_shadow_pgt()
2097 spin_unlock(&sg->guest_table_lock); in gmap_shadow_pgt()
2100 spin_unlock(&sg->guest_table_lock); in gmap_shadow_pgt()
2101 /* Make pgt read-only in parent gmap page table (not the pgste) */ in gmap_shadow_pgt()
2105 spin_lock(&sg->guest_table_lock); in gmap_shadow_pgt()
2109 rc = -EAGAIN; /* Race with unshadow */ in gmap_shadow_pgt()
2115 spin_unlock(&sg->guest_table_lock); in gmap_shadow_pgt()
2118 spin_unlock(&sg->guest_table_lock); in gmap_shadow_pgt()
2126 * gmap_shadow_page - create a shadow page mapping
2127 * @sg: pointer to the shadow guest address space structure
2131 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
2132 * shadow table structure is incomplete, -ENOMEM if out of memory and
2133 * -EFAULT if an address in the parent gmap could not be resolved.
2135 * Called with sg->mm->mmap_lock in read.
2148 parent = sg->parent; in gmap_shadow_page()
2153 return -ENOMEM; in gmap_shadow_page()
2154 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE; in gmap_shadow_page()
2166 rc = -EAGAIN; in gmap_shadow_page()
2169 spin_lock(&sg->guest_table_lock); in gmap_shadow_page()
2173 spin_unlock(&sg->guest_table_lock); in gmap_shadow_page()
2178 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte); in gmap_shadow_page()
2186 spin_unlock(&sg->guest_table_lock); in gmap_shadow_page()
2201 * gmap_shadow_notify - handle notifications for shadow gmap
2203 * Called with sg->parent->shadow_lock.
2209 unsigned long start, end, bits, raddr; in gmap_shadow_notify() local
2213 spin_lock(&sg->guest_table_lock); in gmap_shadow_notify()
2214 if (sg->removed) { in gmap_shadow_notify()
2215 spin_unlock(&sg->guest_table_lock); in gmap_shadow_notify()
2219 start = sg->orig_asce & _ASCE_ORIGIN; in gmap_shadow_notify()
2220 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE; in gmap_shadow_notify()
2221 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start && in gmap_shadow_notify()
2225 spin_unlock(&sg->guest_table_lock); in gmap_shadow_notify()
2226 list_del(&sg->list); in gmap_shadow_notify()
2231 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT); in gmap_shadow_notify()
2233 bits = rmap->raddr & _SHADOW_RMAP_MASK; in gmap_shadow_notify()
2234 raddr = rmap->raddr ^ bits; in gmap_shadow_notify()
2235 switch (bits) { in gmap_shadow_notify()
2254 spin_unlock(&sg->guest_table_lock); in gmap_shadow_notify()
2258 * ptep_notify - call all invalidation callbacks for a specific pte.
2262 * @bits: bits from the pgste that caused the notify call
2268 pte_t *pte, unsigned long bits) in ptep_notify() argument
2277 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) { in ptep_notify()
2278 spin_lock(&gmap->guest_table_lock); in ptep_notify()
2279 table = radix_tree_lookup(&gmap->host_to_guest, in ptep_notify()
2283 spin_unlock(&gmap->guest_table_lock); in ptep_notify()
2287 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) { in ptep_notify()
2288 spin_lock(&gmap->shadow_lock); in ptep_notify()
2290 &gmap->children, list) in ptep_notify()
2292 spin_unlock(&gmap->shadow_lock); in ptep_notify()
2294 if (bits & PGSTE_IN_BIT) in ptep_notify()
2295 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1); in ptep_notify()
2305 gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1); in pmdp_notify_gmap()
2309 * gmap_pmdp_xchg - exchange a gmap pmd with another
2310 * @gmap: pointer to the guest address space structure
2313 * @gaddr: the affected guest address
2325 __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce, in gmap_pmdp_xchg()
2342 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) { in gmap_pmdp_clear()
2343 spin_lock(&gmap->guest_table_lock); in gmap_pmdp_clear()
2344 pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest, in gmap_pmdp_clear()
2355 spin_unlock(&gmap->guest_table_lock); in gmap_pmdp_clear()
2361 * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
2373 * gmap_pmdp_csp - csp all affected guest pmd entries
2384 * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
2395 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) { in gmap_pmdp_idte_local()
2396 spin_lock(&gmap->guest_table_lock); in gmap_pmdp_idte_local()
2397 entry = radix_tree_delete(&gmap->host_to_guest, in gmap_pmdp_idte_local()
2407 gmap->asce, IDTE_LOCAL); in gmap_pmdp_idte_local()
2412 spin_unlock(&gmap->guest_table_lock); in gmap_pmdp_idte_local()
2419 * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
2430 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) { in gmap_pmdp_idte_global()
2431 spin_lock(&gmap->guest_table_lock); in gmap_pmdp_idte_global()
2432 entry = radix_tree_delete(&gmap->host_to_guest, in gmap_pmdp_idte_global()
2442 gmap->asce, IDTE_GLOBAL); in gmap_pmdp_idte_global()
2449 spin_unlock(&gmap->guest_table_lock); in gmap_pmdp_idte_global()
2456 * gmap_test_and_clear_dirty_pmd - test and reset segment dirty status
2457 * @gmap: pointer to guest address space
2459 * @gaddr: virtual address in the guest address space
2482 * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
2483 * @gmap: pointer to guest address space
2485 * @gaddr: virtual address in the guest address space
2508 ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl); in gmap_sync_dirty_log_pmd()
2511 if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep)) in gmap_sync_dirty_log_pmd()
2524 struct vm_area_struct *vma = walk->vma; in thp_split_walk_pmd_entry()
2544 mm->def_flags |= VM_NOHUGEPAGE; in thp_split_mm()
2557 struct mm_struct *mm = current->mm; in s390_enable_sie()
2564 return -EINVAL; in s390_enable_sie()
2566 mm->context.has_pgste = 1; in s390_enable_sie()
2577 unsigned long *found_addr = walk->private; in find_zeropage_pte_entry()
2587 if (!is_cow_mapping(walk->vma->vm_flags)) in find_zeropage_pte_entry()
2588 return -EFAULT; in find_zeropage_pte_entry()
2606 * This must be called after mm->context.allow_cow_sharing was
2613 * mm->context.allow_cow_sharing is set to 0.
2629 if ((vma->vm_flags & VM_PFNMAP) || is_vm_hugetlb_page(vma)) in __s390_unshare_zeropages()
2631 addr = vma->vm_start; in __s390_unshare_zeropages()
2634 rc = walk_page_range_vma(vma, addr, vma->vm_end, in __s390_unshare_zeropages()
2646 return -ENOMEM; in __s390_unshare_zeropages()
2668 if (!mm->context.allow_cow_sharing) in __s390_disable_cow_sharing()
2671 mm->context.allow_cow_sharing = 0; in __s390_disable_cow_sharing()
2678 * from re-enabling it. in __s390_disable_cow_sharing()
2683 mm->context.allow_cow_sharing = 1; in __s390_disable_cow_sharing()
2688 * Disable most COW-sharing of memory pages for the whole process:
2692 * Not that we currently don't bother with COW-shared pages that are shared
2699 mmap_write_lock(current->mm); in s390_disable_cow_sharing()
2700 rc = __s390_disable_cow_sharing(current->mm); in s390_disable_cow_sharing()
2701 mmap_write_unlock(current->mm); in s390_disable_cow_sharing()
2714 ptep_zap_key(walk->mm, addr, pte); in __s390_enable_skey_pte()
2741 * between actual guest memory and the process executable or in __s390_enable_skey_hugetlb()
2751 set_bit(PG_arch_1, &folio->flags); in __s390_enable_skey_hugetlb()
2765 struct mm_struct *mm = current->mm; in s390_enable_skey()
2772 mm->context.uses_skeys = 1; in s390_enable_skey()
2775 mm->context.uses_skeys = 0; in s390_enable_skey()
2792 ptep_zap_unused(walk->mm, addr, pte, 1); in __s390_reset_cmma()
2820 struct reset_walk_state *p = walk->private; in s390_gather_pages()
2826 p->pfns[p->count] = phys_to_pfn(pte_val(pte)); in s390_gather_pages()
2827 p->next = next; in s390_gather_pages()
2828 p->count++; in s390_gather_pages()
2830 return p->count >= GATHER_GET_PAGES; in s390_gather_pages()
2859 * __s390_uv_destroy_range - Call the destroy secure page UVC on each page
2870 * Return: 0 on success, -EINTR if the function stopped before completing
2886 return -EINTR; in __s390_uv_destroy_range()
2893 * s390_unlist_old_asce - Remove the topmost level of page tables from the
2913 old = virt_to_page(gmap->table); in s390_unlist_old_asce()
2914 spin_lock(&gmap->guest_table_lock); in s390_unlist_old_asce()
2915 list_del(&old->lru); in s390_unlist_old_asce()
2930 INIT_LIST_HEAD(&old->lru); in s390_unlist_old_asce()
2931 spin_unlock(&gmap->guest_table_lock); in s390_unlist_old_asce()
2936 * s390_replace_asce - Try to replace the current ASCE of a gmap with a copy
2939 * If the ASCE is a SEGMENT type then this function will return -EINVAL,
2941 * to the wrong pages, causing use-after-free and memory corruption.
2957 if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT) in s390_replace_asce()
2958 return -EINVAL; in s390_replace_asce()
2962 return -ENOMEM; in s390_replace_asce()
2963 page->index = 0; in s390_replace_asce()
2965 memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT)); in s390_replace_asce()
2972 spin_lock(&gmap->guest_table_lock); in s390_replace_asce()
2973 list_add(&page->lru, &gmap->crst_list); in s390_replace_asce()
2974 spin_unlock(&gmap->guest_table_lock); in s390_replace_asce()
2976 /* Set new table origin while preserving existing ASCE control bits */ in s390_replace_asce()
2977 asce = (gmap->asce & ~_ASCE_ORIGIN) | __pa(table); in s390_replace_asce()
2978 WRITE_ONCE(gmap->asce, asce); in s390_replace_asce()
2979 WRITE_ONCE(gmap->mm->context.gmap_asce, asce); in s390_replace_asce()
2980 WRITE_ONCE(gmap->table, table); in s390_replace_asce()