1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (C) 2015 - ARM Ltd
4   * Author: Marc Zyngier <marc.zyngier@arm.com>
5   */
6  
7  #ifndef __ARM64_KVM_HYP_FAULT_H__
8  #define __ARM64_KVM_HYP_FAULT_H__
9  
10  #include <asm/kvm_asm.h>
11  #include <asm/kvm_emulate.h>
12  #include <asm/kvm_hyp.h>
13  #include <asm/kvm_mmu.h>
14  
__translate_far_to_hpfar(u64 far,u64 * hpfar)15  static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
16  {
17  	int ret;
18  	u64 par, tmp;
19  
20  	/*
21  	 * Resolve the IPA the hard way using the guest VA.
22  	 *
23  	 * Stage-1 translation already validated the memory access
24  	 * rights. As such, we can use the EL1 translation regime, and
25  	 * don't have to distinguish between EL0 and EL1 access.
26  	 *
27  	 * We do need to save/restore PAR_EL1 though, as we haven't
28  	 * saved the guest context yet, and we may return early...
29  	 */
30  	par = read_sysreg_par();
31  	ret = system_supports_poe() ? __kvm_at(OP_AT_S1E1A, far) :
32  	                              __kvm_at(OP_AT_S1E1R, far);
33  	if (!ret)
34  		tmp = read_sysreg_par();
35  	else
36  		tmp = SYS_PAR_EL1_F; /* back to the guest */
37  	write_sysreg(par, par_el1);
38  
39  	if (unlikely(tmp & SYS_PAR_EL1_F))
40  		return false; /* Translation failed, back to guest */
41  
42  	/* Convert PAR to HPFAR format */
43  	*hpfar = PAR_TO_HPFAR(tmp);
44  	return true;
45  }
46  
__get_fault_info(u64 esr,struct kvm_vcpu_fault_info * fault)47  static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
48  {
49  	u64 hpfar, far;
50  
51  	far = read_sysreg_el2(SYS_FAR);
52  
53  	/*
54  	 * The HPFAR can be invalid if the stage 2 fault did not
55  	 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
56  	 * bit is clear) and one of the two following cases are true:
57  	 *   1. The fault was due to a permission fault
58  	 *   2. The processor carries errata 834220
59  	 *
60  	 * Therefore, for all non S1PTW faults where we either have a
61  	 * permission fault or the errata workaround is enabled, we
62  	 * resolve the IPA using the AT instruction.
63  	 */
64  	if (!(esr & ESR_ELx_S1PTW) &&
65  	    (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
66  	     esr_fsc_is_permission_fault(esr))) {
67  		if (!__translate_far_to_hpfar(far, &hpfar))
68  			return false;
69  	} else {
70  		hpfar = read_sysreg(hpfar_el2);
71  	}
72  
73  	fault->far_el2 = far;
74  	fault->hpfar_el2 = hpfar;
75  	return true;
76  }
77  
78  #endif
79