Lines Matching refs:kvm

233 	struct kvm *kvm;  member
248 int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) in kvmppc_uvmem_slot_init() argument
263 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_init()
264 list_add(&p->list, &kvm->arch.uvmem_pfns); in kvmppc_uvmem_slot_init()
265 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_init()
273 void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot) in kvmppc_uvmem_slot_free() argument
277 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_free()
278 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) { in kvmppc_uvmem_slot_free()
286 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_free()
289 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm, in kvmppc_mark_gfn() argument
294 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { in kvmppc_mark_gfn()
309 unsigned long uvmem_pfn, struct kvm *kvm) in kvmppc_gfn_secure_uvmem_pfn() argument
311 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn); in kvmppc_gfn_secure_uvmem_pfn()
315 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_secure_mem_pfn() argument
317 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0); in kvmppc_gfn_secure_mem_pfn()
321 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_shared() argument
323 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0); in kvmppc_gfn_shared()
327 static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_remove() argument
329 kvmppc_mark_gfn(gfn, kvm, 0, 0); in kvmppc_gfn_remove()
333 static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm, in kvmppc_gfn_is_uvmem_pfn() argument
338 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { in kvmppc_gfn_is_uvmem_pfn()
362 struct kvm *kvm, unsigned long *gfn) in kvmppc_next_nontransitioned_gfn() argument
368 list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list) in kvmppc_next_nontransitioned_gfn()
391 static int kvmppc_memslot_page_merge(struct kvm *kvm, in kvmppc_memslot_page_merge() argument
395 unsigned long end, start = gfn_to_hva(kvm, gfn); in kvmppc_memslot_page_merge()
406 mmap_write_lock(kvm->mm); in kvmppc_memslot_page_merge()
408 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_memslot_page_merge()
426 mmap_write_unlock(kvm->mm); in kvmppc_memslot_page_merge()
430 static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm, in __kvmppc_uvmem_memslot_delete() argument
433 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in __kvmppc_uvmem_memslot_delete()
434 kvmppc_uvmem_slot_free(kvm, memslot); in __kvmppc_uvmem_memslot_delete()
435 kvmppc_memslot_page_merge(kvm, memslot, true); in __kvmppc_uvmem_memslot_delete()
438 static int __kvmppc_uvmem_memslot_create(struct kvm *kvm, in __kvmppc_uvmem_memslot_create() argument
443 if (kvmppc_memslot_page_merge(kvm, memslot, false)) in __kvmppc_uvmem_memslot_create()
446 if (kvmppc_uvmem_slot_init(kvm, memslot)) in __kvmppc_uvmem_memslot_create()
449 ret = uv_register_mem_slot(kvm->arch.lpid, in __kvmppc_uvmem_memslot_create()
459 kvmppc_uvmem_slot_free(kvm, memslot); in __kvmppc_uvmem_memslot_create()
461 kvmppc_memslot_page_merge(kvm, memslot, true); in __kvmppc_uvmem_memslot_create()
465 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) in kvmppc_h_svm_init_start() argument
472 kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START; in kvmppc_h_svm_init_start()
478 if (!kvm_is_radix(kvm)) in kvmppc_h_svm_init_start()
482 if (!kvm->arch.svm_enabled) in kvmppc_h_svm_init_start()
485 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_init_start()
488 slots = kvm_memslots(kvm); in kvmppc_h_svm_init_start()
490 ret = __kvmppc_uvmem_memslot_create(kvm, memslot); in kvmppc_h_svm_init_start()
496 slots = kvm_memslots(kvm); in kvmppc_h_svm_init_start()
500 __kvmppc_uvmem_memslot_delete(kvm, memslot); in kvmppc_h_svm_init_start()
504 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_init_start()
516 struct kvm *kvm, unsigned long gpa, struct page *fault_page) in __kvmppc_svm_page_out() argument
536 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) in __kvmppc_svm_page_out()
568 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift, in __kvmppc_svm_page_out()
589 struct kvm *kvm, unsigned long gpa, in kvmppc_svm_page_out() argument
594 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_svm_page_out()
595 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, in kvmppc_svm_page_out()
597 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_svm_page_out()
611 struct kvm *kvm, bool skip_page_out) in kvmppc_uvmem_drop_pages() argument
620 mmap_read_lock(kvm->mm); in kvmppc_uvmem_drop_pages()
629 vma = vma_lookup(kvm->mm, addr); in kvmppc_uvmem_drop_pages()
636 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_drop_pages()
638 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { in kvmppc_uvmem_drop_pages()
645 PAGE_SHIFT, kvm, pvt->gpa, NULL)) in kvmppc_uvmem_drop_pages()
650 kvmppc_gfn_remove(gfn, kvm); in kvmppc_uvmem_drop_pages()
653 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_drop_pages()
656 mmap_read_unlock(kvm->mm); in kvmppc_uvmem_drop_pages()
659 unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm) in kvmppc_h_svm_init_abort() argument
668 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_init_abort()
671 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) in kvmppc_h_svm_init_abort()
674 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_init_abort()
676 kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm)) in kvmppc_h_svm_init_abort()
677 kvmppc_uvmem_drop_pages(memslot, kvm, false); in kvmppc_h_svm_init_abort()
679 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_init_abort()
681 kvm->arch.secure_guest = 0; in kvmppc_h_svm_init_abort()
682 uv_svm_terminate(kvm->arch.lpid); in kvmppc_h_svm_init_abort()
695 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) in kvmppc_uvmem_get_page() argument
719 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm); in kvmppc_uvmem_get_page()
722 pvt->kvm = kvm; in kvmppc_uvmem_get_page()
742 unsigned long end, unsigned long gpa, struct kvm *kvm, in kvmppc_svm_page_in() argument
770 dpage = kvmppc_uvmem_get_page(gpa, kvm); in kvmppc_svm_page_in()
780 ret = uv_page_in(kvm->arch.lpid, pfn << page_shift, in kvmppc_svm_page_in()
794 static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm, in kvmppc_uv_migrate_mem_slot() argument
802 mmap_read_lock(kvm->mm); in kvmppc_uv_migrate_mem_slot()
803 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uv_migrate_mem_slot()
804 while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) { in kvmppc_uv_migrate_mem_slot()
806 start = gfn_to_hva(kvm, gfn); in kvmppc_uv_migrate_mem_slot()
811 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_uv_migrate_mem_slot()
816 (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false); in kvmppc_uv_migrate_mem_slot()
825 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uv_migrate_mem_slot()
826 mmap_read_unlock(kvm->mm); in kvmppc_uv_migrate_mem_slot()
830 unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) in kvmppc_h_svm_init_done() argument
837 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_init_done()
841 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_init_done()
842 slots = kvm_memslots(kvm); in kvmppc_h_svm_init_done()
844 ret = kvmppc_uv_migrate_mem_slot(kvm, memslot); in kvmppc_h_svm_init_done()
860 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE; in kvmppc_h_svm_init_done()
861 pr_info("LPID %lld went secure\n", kvm->arch.lpid); in kvmppc_h_svm_init_done()
864 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_init_done()
877 static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa, in kvmppc_share_page() argument
889 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_share_page()
890 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
891 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { in kvmppc_share_page()
903 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
904 pfn = gfn_to_pfn(kvm, gfn); in kvmppc_share_page()
908 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
909 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { in kvmppc_share_page()
918 if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, in kvmppc_share_page()
920 kvmppc_gfn_shared(gfn, kvm); in kvmppc_share_page()
924 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
926 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_share_page()
936 unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, in kvmppc_h_svm_page_in() argument
946 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_page_in()
956 return kvmppc_share_page(kvm, gpa, page_shift); in kvmppc_h_svm_page_in()
959 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_page_in()
960 mmap_read_lock(kvm->mm); in kvmppc_h_svm_page_in()
962 start = gfn_to_hva(kvm, gfn); in kvmppc_h_svm_page_in()
966 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_h_svm_page_in()
968 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) in kvmppc_h_svm_page_in()
972 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_h_svm_page_in()
976 if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift, in kvmppc_h_svm_page_in()
983 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_h_svm_page_in()
985 mmap_read_unlock(kvm->mm); in kvmppc_h_svm_page_in()
986 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_page_in()
1005 pvt->kvm, pvt->gpa, vmf->page)) in kvmppc_uvmem_migrate_to_ram()
1031 kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm); in kvmppc_uvmem_page_free()
1033 kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm); in kvmppc_uvmem_page_free()
1046 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa, in kvmppc_h_svm_page_out() argument
1055 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_page_out()
1065 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_page_out()
1066 mmap_read_lock(kvm->mm); in kvmppc_h_svm_page_out()
1067 start = gfn_to_hva(kvm, gfn); in kvmppc_h_svm_page_out()
1072 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_h_svm_page_out()
1076 if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, NULL)) in kvmppc_h_svm_page_out()
1079 mmap_read_unlock(kvm->mm); in kvmppc_h_svm_page_out()
1080 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_page_out()
1084 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) in kvmppc_send_page_to_uv() argument
1089 pfn = gfn_to_pfn(kvm, gfn); in kvmppc_send_page_to_uv()
1093 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_send_page_to_uv()
1094 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) in kvmppc_send_page_to_uv()
1097 ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT, in kvmppc_send_page_to_uv()
1101 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_send_page_to_uv()
1105 int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new) in kvmppc_uvmem_memslot_create() argument
1107 int ret = __kvmppc_uvmem_memslot_create(kvm, new); in kvmppc_uvmem_memslot_create()
1110 ret = kvmppc_uv_migrate_mem_slot(kvm, new); in kvmppc_uvmem_memslot_create()
1115 void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old) in kvmppc_uvmem_memslot_delete() argument
1117 __kvmppc_uvmem_memslot_delete(kvm, old); in kvmppc_uvmem_memslot_delete()