1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /*
4  * Privileged (non-hypervisor) host registers to save.
5  */
6 #include "asm/guest-state-buffer.h"
7 
8 struct p9_host_os_sprs {
9 	unsigned long iamr;
10 	unsigned long amr;
11 
12 	unsigned int pmc1;
13 	unsigned int pmc2;
14 	unsigned int pmc3;
15 	unsigned int pmc4;
16 	unsigned int pmc5;
17 	unsigned int pmc6;
18 	unsigned long mmcr0;
19 	unsigned long mmcr1;
20 	unsigned long mmcr2;
21 	unsigned long mmcr3;
22 	unsigned long mmcra;
23 	unsigned long siar;
24 	unsigned long sier1;
25 	unsigned long sier2;
26 	unsigned long sier3;
27 	unsigned long sdar;
28 };
29 
nesting_enabled(struct kvm * kvm)30 static inline bool nesting_enabled(struct kvm *kvm)
31 {
32 	return kvm->arch.nested_enable && kvm_is_radix(kvm);
33 }
34 
35 bool load_vcpu_state(struct kvm_vcpu *vcpu,
36 			   struct p9_host_os_sprs *host_os_sprs);
37 void store_vcpu_state(struct kvm_vcpu *vcpu);
38 void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs);
39 void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
40 				    struct p9_host_os_sprs *host_os_sprs);
41 void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
42 			    struct p9_host_os_sprs *host_os_sprs);
43 void switch_pmu_to_host(struct kvm_vcpu *vcpu,
44 			    struct p9_host_os_sprs *host_os_sprs);
45 
46 #ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
47 void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next);
48 #define start_timing(vcpu, next) accumulate_time(vcpu, next)
49 #define end_timing(vcpu) accumulate_time(vcpu, NULL)
50 #else
51 #define accumulate_time(vcpu, next) do {} while (0)
52 #define start_timing(vcpu, next) do {} while (0)
53 #define end_timing(vcpu) do {} while (0)
54 #endif
55 
__kvmppc_set_msr_hv(struct kvm_vcpu * vcpu,u64 val)56 static inline void __kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 val)
57 {
58 	vcpu->arch.shregs.msr = val;
59 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_MSR);
60 }
61 
__kvmppc_get_msr_hv(struct kvm_vcpu * vcpu)62 static inline u64 __kvmppc_get_msr_hv(struct kvm_vcpu *vcpu)
63 {
64 	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_MSR) < 0);
65 	return vcpu->arch.shregs.msr;
66 }
67 
68 #define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size, iden)		\
69 static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, u##size val)	\
70 {									\
71 	vcpu->arch.reg = val;						\
72 	kvmhv_nestedv2_mark_dirty(vcpu, iden);				\
73 }
74 
75 #define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size, iden)		\
76 static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu)	\
77 {									\
78 	kvmhv_nestedv2_cached_reload(vcpu, iden);			\
79 	return vcpu->arch.reg;						\
80 }
81 
82 #define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(reg, size, iden)			\
83 	KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size, iden)		\
84 	KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size, iden)		\
85 
86 #define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size, iden)	\
87 static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, int i, u##size val)	\
88 {									\
89 	vcpu->arch.reg[i] = val;					\
90 	kvmhv_nestedv2_mark_dirty(vcpu, iden(i));			\
91 }
92 
93 #define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size, iden)	\
94 static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu, int i)	\
95 {									\
96 	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden(i)) < 0);	\
97 	return vcpu->arch.reg[i];					\
98 }
99 
100 #define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(reg, size, iden)		\
101 	KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size, iden)	\
102 	KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size, iden)	\
103 
104 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(mmcra, 64, KVMPPC_GSID_MMCRA)
105 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hfscr, 64, KVMPPC_GSID_HFSCR)
106 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(fscr, 64, KVMPPC_GSID_FSCR)
107 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dscr, 64, KVMPPC_GSID_DSCR)
108 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(purr, 64, KVMPPC_GSID_PURR)
109 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(spurr, 64, KVMPPC_GSID_SPURR)
110 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(amr, 64, KVMPPC_GSID_AMR)
111 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(uamor, 64, KVMPPC_GSID_UAMOR)
112 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(siar, 64, KVMPPC_GSID_SIAR)
113 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(sdar, 64, KVMPPC_GSID_SDAR)
114 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(iamr, 64, KVMPPC_GSID_IAMR)
115 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64, KVMPPC_GSID_DAWR0)
116 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64, KVMPPC_GSID_DAWR1)
117 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64, KVMPPC_GSID_DAWRX0)
118 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64, KVMPPC_GSID_DAWRX1)
119 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dexcr, 64, KVMPPC_GSID_DEXCR)
120 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hashkeyr, 64, KVMPPC_GSID_HASHKEYR)
121 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hashpkeyr, 64, KVMPPC_GSID_HASHPKEYR)
122 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64, KVMPPC_GSID_CIABR)
123 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64, KVMPPC_GSID_WORT)
124 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64, KVMPPC_GSID_PPR)
125 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ctrl, 64, KVMPPC_GSID_CTRL);
126 
127 KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(mmcr, 64, KVMPPC_GSID_MMCR)
128 KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(sier, 64, KVMPPC_GSID_SIER)
129 KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(pmc, 32, KVMPPC_GSID_PMC)
130 
131 KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(pspb, 32, KVMPPC_GSID_PSPB)
132