Lines Matching refs:vsie_page
26 struct vsie_page { struct
65 static void prefix_unmapped(struct vsie_page *vsie_page) in prefix_unmapped() argument
67 atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20); in prefix_unmapped()
71 static void prefix_unmapped_sync(struct vsie_page *vsie_page) in prefix_unmapped_sync() argument
73 prefix_unmapped(vsie_page); in prefix_unmapped_sync()
74 if (vsie_page->scb_s.prog0c & PROG_IN_SIE) in prefix_unmapped_sync()
75 atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags); in prefix_unmapped_sync()
76 while (vsie_page->scb_s.prog0c & PROG_IN_SIE) in prefix_unmapped_sync()
81 static void prefix_mapped(struct vsie_page *vsie_page) in prefix_mapped() argument
83 atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20); in prefix_mapped()
87 static int prefix_is_mapped(struct vsie_page *vsie_page) in prefix_is_mapped() argument
89 return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST); in prefix_is_mapped()
93 static void update_intervention_requests(struct vsie_page *vsie_page) in update_intervention_requests() argument
98 cpuflags = atomic_read(&vsie_page->scb_o->cpuflags); in update_intervention_requests()
99 atomic_andnot(bits, &vsie_page->scb_s.cpuflags); in update_intervention_requests()
100 atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags); in update_intervention_requests()
104 static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in prepare_cpuflags() argument
106 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in prepare_cpuflags()
107 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in prepare_cpuflags()
295 static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in shadow_crycb() argument
297 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in shadow_crycb()
298 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in shadow_crycb()
327 ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr, in shadow_crycb()
344 vsie_page->crycb.dea_wrapping_key_mask, 56)) in shadow_crycb()
351 b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask; in shadow_crycb()
365 scb_s->crycbd = (u32)virt_to_phys(&vsie_page->crycb) | CRYCB_FORMAT2; in shadow_crycb()
370 static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in prepare_ibc() argument
372 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in prepare_ibc()
373 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in prepare_ibc()
393 static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in unshadow_scb() argument
395 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in unshadow_scb()
396 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in unshadow_scb()
444 static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in shadow_scb() argument
446 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in shadow_scb()
447 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in shadow_scb()
466 rc = prepare_cpuflags(vcpu, vsie_page); in shadow_scb()
502 prefix_unmapped(vsie_page); in shadow_scb()
529 prefix_unmapped(vsie_page); in shadow_scb()
572 prepare_ibc(vcpu, vsie_page); in shadow_scb()
573 rc = shadow_crycb(vcpu, vsie_page); in shadow_scb()
576 unshadow_scb(vcpu, vsie_page); in shadow_scb()
584 struct vsie_page *cur; in kvm_s390_vsie_gmap_notifier()
622 static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in map_prefix() argument
624 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in map_prefix()
628 if (prefix_is_mapped(vsie_page)) in map_prefix()
632 prefix_mapped(vsie_page); in map_prefix()
637 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL); in map_prefix()
639 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, in map_prefix()
646 prefix_unmapped(vsie_page); in map_prefix()
679 static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in unpin_blocks() argument
681 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in unpin_blocks()
686 unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa); in unpin_blocks()
687 vsie_page->sca_gpa = 0; in unpin_blocks()
694 unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa); in unpin_blocks()
695 vsie_page->itdba_gpa = 0; in unpin_blocks()
701 unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa); in unpin_blocks()
702 vsie_page->gvrd_gpa = 0; in unpin_blocks()
708 unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa); in unpin_blocks()
709 vsie_page->riccbd_gpa = 0; in unpin_blocks()
715 unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa); in unpin_blocks()
716 vsie_page->sdnx_gpa = 0; in unpin_blocks()
735 static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in pin_blocks() argument
737 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in pin_blocks()
738 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in pin_blocks()
761 vsie_page->sca_gpa = gpa; in pin_blocks()
778 vsie_page->itdba_gpa = gpa; in pin_blocks()
797 vsie_page->gvrd_gpa = gpa; in pin_blocks()
814 vsie_page->riccbd_gpa = gpa; in pin_blocks()
843 vsie_page->sdnx_gpa = gpa; in pin_blocks()
848 unpin_blocks(vcpu, vsie_page); in pin_blocks()
853 static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, in unpin_scb() argument
856 hpa_t hpa = (hpa_t) vsie_page->scb_o; in unpin_scb()
860 vsie_page->scb_o = NULL; in unpin_scb()
869 static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, in pin_scb() argument
881 vsie_page->scb_o = phys_to_virt(hpa); in pin_scb()
921 static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in handle_fault() argument
930 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, in handle_fault()
937 vsie_page->fault_addr = current->thread.gmap_addr; in handle_fault()
949 struct vsie_page *vsie_page) in handle_last_fault() argument
951 if (vsie_page->fault_addr) in handle_last_fault()
952 kvm_s390_shadow_fault(vcpu, vsie_page->gmap, in handle_last_fault()
953 vsie_page->fault_addr, NULL); in handle_last_fault()
954 vsie_page->fault_addr = 0; in handle_last_fault()
957 static inline void clear_vsie_icpt(struct vsie_page *vsie_page) in clear_vsie_icpt() argument
959 vsie_page->scb_s.icptcode = 0; in clear_vsie_icpt()
963 static void retry_vsie_icpt(struct vsie_page *vsie_page) in retry_vsie_icpt() argument
965 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in retry_vsie_icpt()
975 clear_vsie_icpt(vsie_page); in retry_vsie_icpt()
985 static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in handle_stfle() argument
987 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in handle_stfle()
988 __u32 fac = READ_ONCE(vsie_page->scb_o->fac); in handle_stfle()
995 retry_vsie_icpt(vsie_page); in handle_stfle()
1006 if (read_guest_real(vcpu, fac, &vsie_page->fac, in handle_stfle()
1009 scb_s->fac = (u32)virt_to_phys(&vsie_page->fac); in handle_stfle()
1021 static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg) in vsie_get_register() argument
1027 return vsie_page->scb_s.gg15; in vsie_get_register()
1029 return vsie_page->scb_s.gg14; in vsie_get_register()
1035 static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in vsie_handle_mvpg() argument
1037 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in vsie_handle_mvpg()
1039 u64 *pei_block = &vsie_page->scb_o->mcic; in vsie_handle_mvpg()
1048 dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask; in vsie_handle_mvpg()
1050 src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask; in vsie_handle_mvpg()
1053 rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest); in vsie_handle_mvpg()
1054 rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src); in vsie_handle_mvpg()
1060 retry_vsie_icpt(vsie_page); in vsie_handle_mvpg()
1071 clear_vsie_icpt(vsie_page); in vsie_handle_mvpg()
1094 retry_vsie_icpt(vsie_page); in vsie_handle_mvpg()
1114 static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in do_vsie_run() argument
1118 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in do_vsie_run()
1119 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in do_vsie_run()
1123 handle_last_fault(vcpu, vsie_page); in do_vsie_run()
1169 kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info); in do_vsie_run()
1176 return handle_fault(vcpu, vsie_page); in do_vsie_run()
1181 rc = handle_stfle(vcpu, vsie_page); in do_vsie_run()
1186 clear_vsie_icpt(vsie_page); in do_vsie_run()
1194 rc = vsie_handle_mvpg(vcpu, vsie_page); in do_vsie_run()
1200 static void release_gmap_shadow(struct vsie_page *vsie_page) in release_gmap_shadow() argument
1202 if (vsie_page->gmap) in release_gmap_shadow()
1203 gmap_put(vsie_page->gmap); in release_gmap_shadow()
1204 WRITE_ONCE(vsie_page->gmap, NULL); in release_gmap_shadow()
1205 prefix_unmapped(vsie_page); in release_gmap_shadow()
1209 struct vsie_page *vsie_page) in acquire_gmap_shadow() argument
1226 if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat)) { in acquire_gmap_shadow()
1232 release_gmap_shadow(vsie_page); in acquire_gmap_shadow()
1237 WRITE_ONCE(vsie_page->gmap, gmap); in acquire_gmap_shadow()
1245 struct vsie_page *vsie_page) in register_shadow_scb() argument
1247 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in register_shadow_scb()
1249 WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s); in register_shadow_scb()
1288 static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in vsie_run() argument
1290 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in vsie_run()
1294 rc = acquire_gmap_shadow(vcpu, vsie_page); in vsie_run()
1296 rc = map_prefix(vcpu, vsie_page); in vsie_run()
1298 gmap_enable(vsie_page->gmap); in vsie_run()
1299 update_intervention_requests(vsie_page); in vsie_run()
1300 rc = do_vsie_run(vcpu, vsie_page); in vsie_run()
1355 static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr) in get_vsie_page()
1357 struct vsie_page *vsie_page; in get_vsie_page() local
1407 vsie_page = page_to_virt(page); in get_vsie_page()
1408 memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block)); in get_vsie_page()
1409 release_gmap_shadow(vsie_page); in get_vsie_page()
1410 vsie_page->fault_addr = 0; in get_vsie_page()
1411 vsie_page->scb_s.ihcpu = 0xffffU; in get_vsie_page()
1412 return vsie_page; in get_vsie_page()
1416 static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page) in put_vsie_page() argument
1418 struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT); in put_vsie_page()
1425 struct vsie_page *vsie_page; in kvm_s390_handle_vsie() local
1435 BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE); in kvm_s390_handle_vsie()
1448 vsie_page = get_vsie_page(vcpu->kvm, scb_addr); in kvm_s390_handle_vsie()
1449 if (IS_ERR(vsie_page)) in kvm_s390_handle_vsie()
1450 return PTR_ERR(vsie_page); in kvm_s390_handle_vsie()
1451 else if (!vsie_page) in kvm_s390_handle_vsie()
1455 rc = pin_scb(vcpu, vsie_page, scb_addr); in kvm_s390_handle_vsie()
1458 rc = shadow_scb(vcpu, vsie_page); in kvm_s390_handle_vsie()
1461 rc = pin_blocks(vcpu, vsie_page); in kvm_s390_handle_vsie()
1464 register_shadow_scb(vcpu, vsie_page); in kvm_s390_handle_vsie()
1465 rc = vsie_run(vcpu, vsie_page); in kvm_s390_handle_vsie()
1467 unpin_blocks(vcpu, vsie_page); in kvm_s390_handle_vsie()
1469 unshadow_scb(vcpu, vsie_page); in kvm_s390_handle_vsie()
1471 unpin_scb(vcpu, vsie_page, scb_addr); in kvm_s390_handle_vsie()
1473 put_vsie_page(vcpu->kvm, vsie_page); in kvm_s390_handle_vsie()
1488 struct vsie_page *vsie_page; in kvm_s390_vsie_destroy() local
1496 vsie_page = page_to_virt(page); in kvm_s390_vsie_destroy()
1497 release_gmap_shadow(vsie_page); in kvm_s390_vsie_destroy()