Lines Matching refs:iter

213 				  struct tdp_iter *iter)  in tdp_mmu_init_child_sp()  argument
218 parent_sp = sptep_to_sp(rcu_dereference(iter->sptep)); in tdp_mmu_init_child_sp()
223 tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role); in tdp_mmu_init_child_sp()
533 static inline int __must_check __tdp_mmu_set_spte_atomic(struct tdp_iter *iter, in __tdp_mmu_set_spte_atomic() argument
536 u64 *sptep = rcu_dereference(iter->sptep); in __tdp_mmu_set_spte_atomic()
544 WARN_ON_ONCE(iter->yielded || is_frozen_spte(iter->old_spte)); in __tdp_mmu_set_spte_atomic()
553 if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte)) in __tdp_mmu_set_spte_atomic()
577 struct tdp_iter *iter, in tdp_mmu_set_spte_atomic() argument
584 ret = __tdp_mmu_set_spte_atomic(iter, new_spte); in tdp_mmu_set_spte_atomic()
588 handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, in tdp_mmu_set_spte_atomic()
589 new_spte, iter->level, true); in tdp_mmu_set_spte_atomic()
595 struct tdp_iter *iter) in tdp_mmu_zap_spte_atomic() argument
609 ret = __tdp_mmu_set_spte_atomic(iter, FROZEN_SPTE); in tdp_mmu_zap_spte_atomic()
613 kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level); in tdp_mmu_zap_spte_atomic()
621 __kvm_tdp_mmu_write_spte(iter->sptep, SHADOW_NONPRESENT_VALUE); in tdp_mmu_zap_spte_atomic()
629 handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, in tdp_mmu_zap_spte_atomic()
630 SHADOW_NONPRESENT_VALUE, iter->level, true); in tdp_mmu_zap_spte_atomic()
669 static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_iter_set_spte() argument
672 WARN_ON_ONCE(iter->yielded); in tdp_mmu_iter_set_spte()
673 iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, in tdp_mmu_iter_set_spte()
674 iter->old_spte, new_spte, in tdp_mmu_iter_set_spte()
675 iter->gfn, iter->level); in tdp_mmu_iter_set_spte()
706 struct tdp_iter *iter, in tdp_mmu_iter_cond_resched() argument
709 WARN_ON_ONCE(iter->yielded); in tdp_mmu_iter_cond_resched()
712 if (iter->next_last_level_gfn == iter->yielded_gfn) in tdp_mmu_iter_cond_resched()
728 WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn); in tdp_mmu_iter_cond_resched()
730 iter->yielded = true; in tdp_mmu_iter_cond_resched()
733 return iter->yielded; in tdp_mmu_iter_cond_resched()
750 struct tdp_iter iter; in __tdp_mmu_zap_root() local
755 for_each_tdp_pte_min_level(iter, root, zap_level, start, end) { in __tdp_mmu_zap_root()
757 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) in __tdp_mmu_zap_root()
760 if (!is_shadow_present_pte(iter.old_spte)) in __tdp_mmu_zap_root()
763 if (iter.level > zap_level) in __tdp_mmu_zap_root()
767 tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE); in __tdp_mmu_zap_root()
768 else if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE)) in __tdp_mmu_zap_root()
851 struct tdp_iter iter; in tdp_mmu_zap_leafs() local
859 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) { in tdp_mmu_zap_leafs()
861 tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) { in tdp_mmu_zap_leafs()
866 if (!is_shadow_present_pte(iter.old_spte) || in tdp_mmu_zap_leafs()
867 !is_last_spte(iter.old_spte, iter.level)) in tdp_mmu_zap_leafs()
870 tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE); in tdp_mmu_zap_leafs()
1019 struct tdp_iter *iter) in tdp_mmu_map_handle_target_level() argument
1021 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep)); in tdp_mmu_map_handle_target_level()
1030 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); in tdp_mmu_map_handle_target_level()
1032 wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn, in tdp_mmu_map_handle_target_level()
1033 fault->pfn, iter->old_spte, fault->prefetch, true, in tdp_mmu_map_handle_target_level()
1036 if (new_spte == iter->old_spte) in tdp_mmu_map_handle_target_level()
1038 else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) in tdp_mmu_map_handle_target_level()
1040 else if (is_shadow_present_pte(iter->old_spte) && in tdp_mmu_map_handle_target_level()
1041 !is_last_spte(iter->old_spte, iter->level)) in tdp_mmu_map_handle_target_level()
1042 kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level); in tdp_mmu_map_handle_target_level()
1055 trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn, in tdp_mmu_map_handle_target_level()
1059 trace_kvm_mmu_set_spte(iter->level, iter->gfn, in tdp_mmu_map_handle_target_level()
1060 rcu_dereference(iter->sptep)); in tdp_mmu_map_handle_target_level()
1078 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_link_sp() argument
1085 ret = tdp_mmu_set_spte_atomic(kvm, iter, spte); in tdp_mmu_link_sp()
1089 tdp_mmu_iter_set_spte(kvm, iter, spte); in tdp_mmu_link_sp()
1097 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1108 struct tdp_iter iter; in kvm_tdp_mmu_map() local
1118 tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { in kvm_tdp_mmu_map()
1122 disallowed_hugepage_adjust(fault, iter.old_spte, iter.level); in kvm_tdp_mmu_map()
1128 if (is_frozen_spte(iter.old_spte)) in kvm_tdp_mmu_map()
1131 if (iter.level == fault->goal_level) in kvm_tdp_mmu_map()
1135 if (is_shadow_present_pte(iter.old_spte) && in kvm_tdp_mmu_map()
1136 !is_large_pte(iter.old_spte)) in kvm_tdp_mmu_map()
1144 tdp_mmu_init_child_sp(sp, &iter); in kvm_tdp_mmu_map()
1148 if (is_shadow_present_pte(iter.old_spte)) in kvm_tdp_mmu_map()
1149 r = tdp_mmu_split_huge_page(kvm, &iter, sp, true); in kvm_tdp_mmu_map()
1151 r = tdp_mmu_link_sp(kvm, &iter, sp, true); in kvm_tdp_mmu_map()
1163 fault->req_level >= iter.level) { in kvm_tdp_mmu_map()
1175 WARN_ON_ONCE(iter.level == fault->goal_level); in kvm_tdp_mmu_map()
1179 ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter); in kvm_tdp_mmu_map()
1198 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1206 struct tdp_iter iter; in kvm_tdp_mmu_handle_gfn() local
1216 tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) in kvm_tdp_mmu_handle_gfn()
1217 ret |= handler(kvm, &iter, range); in kvm_tdp_mmu_handle_gfn()
1233 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, in age_gfn_range() argument
1239 if (!is_accessed_spte(iter->old_spte)) in age_gfn_range()
1242 if (spte_ad_enabled(iter->old_spte)) { in age_gfn_range()
1243 iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep, in age_gfn_range()
1244 iter->old_spte, in age_gfn_range()
1246 iter->level); in age_gfn_range()
1247 new_spte = iter->old_spte & ~shadow_accessed_mask; in age_gfn_range()
1253 if (is_writable_pte(iter->old_spte)) in age_gfn_range()
1254 kvm_set_pfn_dirty(spte_to_pfn(iter->old_spte)); in age_gfn_range()
1256 new_spte = mark_spte_for_access_track(iter->old_spte); in age_gfn_range()
1257 iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep, in age_gfn_range()
1258 iter->old_spte, new_spte, in age_gfn_range()
1259 iter->level); in age_gfn_range()
1262 trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level, in age_gfn_range()
1263 iter->old_spte, new_spte); in age_gfn_range()
1272 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, in test_age_gfn() argument
1275 return is_accessed_spte(iter->old_spte); in test_age_gfn()
1291 struct tdp_iter iter; in wrprot_gfn_range() local
1299 for_each_tdp_pte_min_level(iter, root, min_level, start, end) { in wrprot_gfn_range()
1301 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) in wrprot_gfn_range()
1304 if (!is_shadow_present_pte(iter.old_spte) || in wrprot_gfn_range()
1305 !is_last_spte(iter.old_spte, iter.level) || in wrprot_gfn_range()
1306 !(iter.old_spte & PT_WRITABLE_MASK)) in wrprot_gfn_range()
1309 new_spte = iter.old_spte & ~PT_WRITABLE_MASK; in wrprot_gfn_range()
1311 if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) in wrprot_gfn_range()
1359 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_split_huge_page() argument
1362 const u64 huge_spte = iter->old_spte; in tdp_mmu_split_huge_page()
1363 const int level = iter->level; in tdp_mmu_split_huge_page()
1381 ret = tdp_mmu_link_sp(kvm, iter, sp, shared); in tdp_mmu_split_huge_page()
1393 trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret); in tdp_mmu_split_huge_page()
1403 struct tdp_iter iter; in tdp_mmu_split_huge_pages_root() local
1418 for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) { in tdp_mmu_split_huge_pages_root()
1420 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) in tdp_mmu_split_huge_pages_root()
1423 if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte)) in tdp_mmu_split_huge_pages_root()
1442 trace_kvm_mmu_split_huge_page(iter.gfn, in tdp_mmu_split_huge_pages_root()
1443 iter.old_spte, in tdp_mmu_split_huge_pages_root()
1444 iter.level, -ENOMEM); in tdp_mmu_split_huge_pages_root()
1450 iter.yielded = true; in tdp_mmu_split_huge_pages_root()
1454 tdp_mmu_init_child_sp(sp, &iter); in tdp_mmu_split_huge_pages_root()
1456 if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared)) in tdp_mmu_split_huge_pages_root()
1512 struct tdp_iter iter; in clear_dirty_gfn_range() local
1517 tdp_root_for_each_pte(iter, root, start, end) { in clear_dirty_gfn_range()
1519 if (!is_shadow_present_pte(iter.old_spte) || in clear_dirty_gfn_range()
1520 !is_last_spte(iter.old_spte, iter.level)) in clear_dirty_gfn_range()
1523 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) in clear_dirty_gfn_range()
1527 spte_ad_need_write_protect(iter.old_spte)); in clear_dirty_gfn_range()
1529 if (!(iter.old_spte & dbit)) in clear_dirty_gfn_range()
1532 if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit)) in clear_dirty_gfn_range()
1566 struct tdp_iter iter; in clear_dirty_pt_masked() local
1572 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), in clear_dirty_pt_masked()
1578 spte_ad_need_write_protect(iter.old_spte)); in clear_dirty_pt_masked()
1580 if (iter.level > PG_LEVEL_4K || in clear_dirty_pt_masked()
1581 !(mask & (1UL << (iter.gfn - gfn)))) in clear_dirty_pt_masked()
1584 mask &= ~(1UL << (iter.gfn - gfn)); in clear_dirty_pt_masked()
1586 if (!(iter.old_spte & dbit)) in clear_dirty_pt_masked()
1589 iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep, in clear_dirty_pt_masked()
1590 iter.old_spte, dbit, in clear_dirty_pt_masked()
1591 iter.level); in clear_dirty_pt_masked()
1593 trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level, in clear_dirty_pt_masked()
1594 iter.old_spte, in clear_dirty_pt_masked()
1595 iter.old_spte & ~dbit); in clear_dirty_pt_masked()
1596 kvm_set_pfn_dirty(spte_to_pfn(iter.old_spte)); in clear_dirty_pt_masked()
1624 struct tdp_iter iter; in zap_collapsible_spte_range() local
1629 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) { in zap_collapsible_spte_range()
1631 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) in zap_collapsible_spte_range()
1634 if (iter.level > KVM_MAX_HUGEPAGE_LEVEL || in zap_collapsible_spte_range()
1635 !is_shadow_present_pte(iter.old_spte)) in zap_collapsible_spte_range()
1643 if (is_last_spte(iter.old_spte, iter.level)) in zap_collapsible_spte_range()
1653 if (iter.gfn < start || iter.gfn >= end) in zap_collapsible_spte_range()
1657 iter.gfn, PG_LEVEL_NUM); in zap_collapsible_spte_range()
1658 if (max_mapping_level < iter.level) in zap_collapsible_spte_range()
1662 if (tdp_mmu_zap_spte_atomic(kvm, &iter)) in zap_collapsible_spte_range()
1691 struct tdp_iter iter; in write_protect_gfn() local
1699 for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) { in write_protect_gfn()
1700 if (!is_shadow_present_pte(iter.old_spte) || in write_protect_gfn()
1701 !is_last_spte(iter.old_spte, iter.level)) in write_protect_gfn()
1704 new_spte = iter.old_spte & in write_protect_gfn()
1707 if (new_spte == iter.old_spte) in write_protect_gfn()
1710 tdp_mmu_iter_set_spte(kvm, &iter, new_spte); in write_protect_gfn()
1747 struct tdp_iter iter; in kvm_tdp_mmu_get_walk() local
1754 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { in kvm_tdp_mmu_get_walk()
1755 leaf = iter.level; in kvm_tdp_mmu_get_walk()
1756 sptes[leaf] = iter.old_spte; in kvm_tdp_mmu_get_walk()
1776 struct tdp_iter iter; in kvm_tdp_mmu_fast_pf_get_last_sptep() local
1780 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { in kvm_tdp_mmu_fast_pf_get_last_sptep()
1781 *spte = iter.old_spte; in kvm_tdp_mmu_fast_pf_get_last_sptep()
1782 sptep = iter.sptep; in kvm_tdp_mmu_fast_pf_get_last_sptep()