1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *
4  * Copyright SUSE Linux Products GmbH 2009
5  *
6  * Authors: Alexander Graf <agraf@suse.de>
7  */
8 
9 #ifndef __ASM_KVM_BOOK3S_H__
10 #define __ASM_KVM_BOOK3S_H__
11 
12 #include <linux/types.h>
13 #include <linux/kvm_host.h>
14 #include <asm/kvm_book3s_asm.h>
15 #include <asm/guest-state-buffer.h>
16 
17 struct kvmppc_bat {
18 	u64 raw;
19 	u32 bepi;
20 	u32 bepi_mask;
21 	u32 brpn;
22 	u8 wimg;
23 	u8 pp;
24 	bool vs		: 1;
25 	bool vp		: 1;
26 };
27 
28 struct kvmppc_sid_map {
29 	u64 guest_vsid;
30 	u64 guest_esid;
31 	u64 host_vsid;
32 	bool valid	: 1;
33 };
34 
35 #define SID_MAP_BITS    9
36 #define SID_MAP_NUM     (1 << SID_MAP_BITS)
37 #define SID_MAP_MASK    (SID_MAP_NUM - 1)
38 
39 #ifdef CONFIG_PPC_BOOK3S_64
40 #define SID_CONTEXTS	1
41 #else
42 #define SID_CONTEXTS	128
43 #define VSID_POOL_SIZE	(SID_CONTEXTS * 16)
44 #endif
45 
46 struct hpte_cache {
47 	struct hlist_node list_pte;
48 	struct hlist_node list_pte_long;
49 	struct hlist_node list_vpte;
50 	struct hlist_node list_vpte_long;
51 #ifdef CONFIG_PPC_BOOK3S_64
52 	struct hlist_node list_vpte_64k;
53 #endif
54 	struct rcu_head rcu_head;
55 	u64 host_vpn;
56 	u64 pfn;
57 	ulong slot;
58 	struct kvmppc_pte pte;
59 	int pagesize;
60 };
61 
62 /*
63  * Struct for a virtual core.
64  * Note: entry_exit_map combines a bitmap of threads that have entered
65  * in the bottom 8 bits and a bitmap of threads that have exited in the
66  * next 8 bits.  This is so that we can atomically set the entry bit
67  * iff the exit map is 0 without taking a lock.
68  */
69 struct kvmppc_vcore {
70 	int n_runnable;
71 	int num_threads;
72 	int entry_exit_map;
73 	int napping_threads;
74 	int first_vcpuid;
75 	u16 pcpu;
76 	u16 last_cpu;
77 	u8 vcore_state;
78 	u8 in_guest;
79 	struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
80 	struct list_head preempt_list;
81 	spinlock_t lock;
82 	struct rcuwait wait;
83 	spinlock_t stoltb_lock;	/* protects stolen_tb and preempt_tb */
84 	u64 stolen_tb;
85 	u64 preempt_tb;
86 	struct kvm_vcpu *runner;
87 	struct kvm *kvm;
88 	u64 tb_offset;		/* guest timebase - host timebase */
89 	u64 tb_offset_applied;	/* timebase offset currently in force */
90 	ulong lpcr;
91 	u32 arch_compat;
92 	ulong pcr;
93 	ulong dpdes;		/* doorbell state (POWER8) */
94 	ulong vtb;		/* virtual timebase */
95 	ulong conferring_threads;
96 	unsigned int halt_poll_ns;
97 	atomic_t online_count;
98 };
99 
100 struct kvmppc_vcpu_book3s {
101 	struct kvmppc_sid_map sid_map[SID_MAP_NUM];
102 	struct {
103 		u64 esid;
104 		u64 vsid;
105 	} slb_shadow[64];
106 	u8 slb_shadow_max;
107 	struct kvmppc_bat ibat[8];
108 	struct kvmppc_bat dbat[8];
109 	u64 hid[6];
110 	u64 gqr[8];
111 	u64 sdr1;
112 	u64 hior;
113 	u64 msr_mask;
114 	u64 vtb;
115 #ifdef CONFIG_PPC_BOOK3S_32
116 	u32 vsid_pool[VSID_POOL_SIZE];
117 	u32 vsid_next;
118 #else
119 	u64 proto_vsid_first;
120 	u64 proto_vsid_max;
121 	u64 proto_vsid_next;
122 #endif
123 	int context_id[SID_CONTEXTS];
124 
125 	bool hior_explicit;		/* HIOR is set by ioctl, not PVR */
126 
127 	struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
128 	struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
129 	struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
130 	struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
131 #ifdef CONFIG_PPC_BOOK3S_64
132 	struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
133 #endif
134 	int hpte_cache_count;
135 	spinlock_t mmu_lock;
136 };
137 
138 #define VSID_REAL	0x07ffffffffc00000ULL
139 #define VSID_BAT	0x07ffffffffb00000ULL
140 #define VSID_64K	0x0800000000000000ULL
141 #define VSID_1T		0x1000000000000000ULL
142 #define VSID_REAL_DR	0x2000000000000000ULL
143 #define VSID_REAL_IR	0x4000000000000000ULL
144 #define VSID_PR		0x8000000000000000ULL
145 
146 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
147 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
148 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
149 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
150 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
151 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
152 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
153 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
154 			       bool iswrite);
155 extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
156 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
157 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
158 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
159 extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
160 			unsigned long addr, unsigned long status);
161 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
162 			unsigned long slb_v, unsigned long valid);
163 extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
164 			unsigned long gpa, gva_t ea, int is_store);
165 
166 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
167 extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
168 extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
169 extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
170 extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
171 extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
172 extern int kvmppc_mmu_hpte_sysinit(void);
173 extern void kvmppc_mmu_hpte_sysexit(void);
174 extern int kvmppc_mmu_hv_init(void);
175 extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
176 
177 extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
178 			unsigned long ea, unsigned long dsisr);
179 extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
180 					gva_t eaddr, void *to, void *from,
181 					unsigned long n);
182 extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
183 					void *to, unsigned long n);
184 extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
185 				      void *from, unsigned long n);
186 extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
187 				      struct kvmppc_pte *gpte, u64 root,
188 				      u64 *pte_ret_p);
189 extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
190 			struct kvmppc_pte *gpte, u64 table,
191 			int table_index, u64 *pte_ret_p);
192 extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
193 			struct kvmppc_pte *gpte, bool data, bool iswrite);
194 extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
195 				    unsigned int pshift, u64 lpid);
196 extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
197 			unsigned int shift,
198 			const struct kvm_memory_slot *memslot,
199 			u64 lpid);
200 extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
201 				    bool writing, unsigned long gpa,
202 				    u64 lpid);
203 extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
204 				unsigned long gpa,
205 				struct kvm_memory_slot *memslot,
206 				bool writing, bool kvm_ro,
207 				pte_t *inserted_pte, unsigned int *levelp);
208 extern int kvmppc_init_vm_radix(struct kvm *kvm);
209 extern void kvmppc_free_radix(struct kvm *kvm);
210 extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
211 				      u64 lpid);
212 extern int kvmppc_radix_init(void);
213 extern void kvmppc_radix_exit(void);
214 extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
215 			    unsigned long gfn);
216 extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
217 			  unsigned long gfn);
218 extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
219 			       unsigned long gfn);
220 extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
221 			struct kvm_memory_slot *memslot, unsigned long *map);
222 extern void kvmppc_radix_flush_memslot(struct kvm *kvm,
223 			const struct kvm_memory_slot *memslot);
224 extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
225 
226 /* XXX remove this export when load_last_inst() is generic */
227 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
228 extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
229 extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
230 					  unsigned int vec);
231 extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
232 extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
233 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
234 			   bool upper, u32 val);
235 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
236 extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
237 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
238 			bool writing, bool *writable);
239 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
240 			unsigned long *rmap, long pte_index, int realmode);
241 extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
242 			unsigned long gfn, unsigned long psize);
243 extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
244 			unsigned long pte_index);
245 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
246 			unsigned long pte_index);
247 extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
248 			unsigned long *nb_ret);
249 extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
250 			unsigned long gpa, bool dirty);
251 extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
252 			long pte_index, unsigned long pteh, unsigned long ptel,
253 			pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
254 extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
255 			unsigned long pte_index, unsigned long avpn,
256 			unsigned long *hpret);
257 extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
258 			struct kvm_memory_slot *memslot, unsigned long *map);
259 extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
260 			struct kvm_memory_slot *memslot,
261 			unsigned long *map);
262 extern unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm,
263 			unsigned long lpcr);
264 extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
265 			unsigned long mask);
266 extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
267 
268 extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
269 extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
270 extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);
271 
272 extern void kvmppc_entry_trampoline(void);
273 extern void kvmppc_hv_entry_trampoline(void);
274 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
275 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
276 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
277 extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
278 extern int kvmppc_hcall_impl_pr(unsigned long cmd);
279 extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
280 extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
281 extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
282 
283 long kvmppc_read_intr(void);
284 void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
285 void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
286 
287 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
288 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
289 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
290 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
291 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
292 #else
kvmppc_save_tm_pr(struct kvm_vcpu * vcpu)293 static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
kvmppc_restore_tm_pr(struct kvm_vcpu * vcpu)294 static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
kvmppc_save_tm_sprs(struct kvm_vcpu * vcpu)295 static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
kvmppc_restore_tm_sprs(struct kvm_vcpu * vcpu)296 static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
297 #endif
298 
299 extern unsigned long nested_capabilities;
300 long kvmhv_nested_init(void);
301 void kvmhv_nested_exit(void);
302 void kvmhv_vm_nested_init(struct kvm *kvm);
303 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
304 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
305 void kvmhv_flush_lpid(u64 lpid);
306 void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1);
307 void kvmhv_release_all_nested(struct kvm *kvm);
308 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
309 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
310 long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
311 			     unsigned long type, unsigned long pg_sizes,
312 			     unsigned long start, unsigned long end);
313 int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
314 			  u64 time_limit, unsigned long lpcr);
315 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
316 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
317 				   struct hv_guest_state *hr);
318 long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
319 
320 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
321 
322 
323 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
324 
325 extern struct static_key_false __kvmhv_is_nestedv2;
326 
kvmhv_is_nestedv2(void)327 static inline bool kvmhv_is_nestedv2(void)
328 {
329 	return static_branch_unlikely(&__kvmhv_is_nestedv2);
330 }
331 
kvmhv_is_nestedv1(void)332 static inline bool kvmhv_is_nestedv1(void)
333 {
334 	return !static_branch_likely(&__kvmhv_is_nestedv2);
335 }
336 
337 #else
338 
kvmhv_is_nestedv2(void)339 static inline bool kvmhv_is_nestedv2(void)
340 {
341 	return false;
342 }
343 
kvmhv_is_nestedv1(void)344 static inline bool kvmhv_is_nestedv1(void)
345 {
346 	return false;
347 }
348 
349 #endif
350 
351 int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
352 int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
353 int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden);
354 int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden);
355 
kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu * vcpu,struct pt_regs * regs)356 static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu,
357 					       struct pt_regs *regs)
358 {
359 	if (kvmhv_is_nestedv2())
360 		return __kvmhv_nestedv2_reload_ptregs(vcpu, regs);
361 	return 0;
362 }
kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu * vcpu,struct pt_regs * regs)363 static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
364 						   struct pt_regs *regs)
365 {
366 	if (kvmhv_is_nestedv2())
367 		return __kvmhv_nestedv2_mark_dirty_ptregs(vcpu, regs);
368 	return 0;
369 }
370 
kvmhv_nestedv2_mark_dirty(struct kvm_vcpu * vcpu,u16 iden)371 static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
372 {
373 	if (kvmhv_is_nestedv2())
374 		return __kvmhv_nestedv2_mark_dirty(vcpu, iden);
375 	return 0;
376 }
377 
kvmhv_nestedv2_cached_reload(struct kvm_vcpu * vcpu,u16 iden)378 static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
379 {
380 	if (kvmhv_is_nestedv2())
381 		return __kvmhv_nestedv2_cached_reload(vcpu, iden);
382 	return 0;
383 }
384 
385 extern int kvm_irq_bypass;
386 
to_book3s(struct kvm_vcpu * vcpu)387 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
388 {
389 	return vcpu->arch.book3s;
390 }
391 
392 /* Also add subarch specific defines */
393 
394 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
395 #include <asm/kvm_book3s_32.h>
396 #endif
397 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
398 #include <asm/kvm_book3s_64.h>
399 #endif
400 
kvmppc_set_gpr(struct kvm_vcpu * vcpu,int num,ulong val)401 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
402 {
403 	vcpu->arch.regs.gpr[num] = val;
404 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(num));
405 }
406 
kvmppc_get_gpr(struct kvm_vcpu * vcpu,int num)407 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
408 {
409 	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_GPR(num)) < 0);
410 	return vcpu->arch.regs.gpr[num];
411 }
412 
kvmppc_set_cr(struct kvm_vcpu * vcpu,u32 val)413 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
414 {
415 	vcpu->arch.regs.ccr = val;
416 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
417 }
418 
kvmppc_get_cr(struct kvm_vcpu * vcpu)419 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
420 {
421 	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CR) < 0);
422 	return vcpu->arch.regs.ccr;
423 }
424 
kvmppc_set_xer(struct kvm_vcpu * vcpu,ulong val)425 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
426 {
427 	vcpu->arch.regs.xer = val;
428 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
429 }
430 
kvmppc_get_xer(struct kvm_vcpu * vcpu)431 static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
432 {
433 	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_XER) < 0);
434 	return vcpu->arch.regs.xer;
435 }
436 
kvmppc_set_ctr(struct kvm_vcpu * vcpu,ulong val)437 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
438 {
439 	vcpu->arch.regs.ctr = val;
440 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
441 }
442 
kvmppc_get_ctr(struct kvm_vcpu * vcpu)443 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
444 {
445 	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CTR) < 0);
446 	return vcpu->arch.regs.ctr;
447 }
448 
kvmppc_set_lr(struct kvm_vcpu * vcpu,ulong val)449 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
450 {
451 	vcpu->arch.regs.link = val;
452 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
453 }
454 
kvmppc_get_lr(struct kvm_vcpu * vcpu)455 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
456 {
457 	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_LR) < 0);
458 	return vcpu->arch.regs.link;
459 }
460 
kvmppc_set_pc(struct kvm_vcpu * vcpu,ulong val)461 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
462 {
463 	vcpu->arch.regs.nip = val;
464 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
465 }
466 
kvmppc_get_pc(struct kvm_vcpu * vcpu)467 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
468 {
469 	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_NIA) < 0);
470 	return vcpu->arch.regs.nip;
471 }
472 
473 static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
kvmppc_need_byteswap(struct kvm_vcpu * vcpu)474 static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
475 {
476 	return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
477 }
478 
kvmppc_get_fault_dar(struct kvm_vcpu * vcpu)479 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
480 {
481 	return vcpu->arch.fault_dar;
482 }
483 
kvmppc_get_fpr(struct kvm_vcpu * vcpu,int i)484 static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
485 {
486 	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
487 	return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
488 }
489 
kvmppc_set_fpr(struct kvm_vcpu * vcpu,int i,u64 val)490 static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
491 {
492 	vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
493 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
494 }
495 
kvmppc_get_fpscr(struct kvm_vcpu * vcpu)496 static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu)
497 {
498 	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_FPSCR) < 0);
499 	return vcpu->arch.fp.fpscr;
500 }
501 
kvmppc_set_fpscr(struct kvm_vcpu * vcpu,u64 val)502 static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val)
503 {
504 	vcpu->arch.fp.fpscr = val;
505 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_FPSCR);
506 }
507 
508 
kvmppc_get_vsx_fpr(struct kvm_vcpu * vcpu,int i,int j)509 static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j)
510 {
511 	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
512 	return vcpu->arch.fp.fpr[i][j];
513 }
514 
kvmppc_set_vsx_fpr(struct kvm_vcpu * vcpu,int i,int j,u64 val)515 static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j,
516 				      u64 val)
517 {
518 	vcpu->arch.fp.fpr[i][j] = val;
519 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
520 }
521 
522 #ifdef CONFIG_ALTIVEC
kvmppc_get_vsx_vr(struct kvm_vcpu * vcpu,int i,vector128 * v)523 static inline void kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *v)
524 {
525 	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(32 + i)) < 0);
526 	*v =  vcpu->arch.vr.vr[i];
527 }
528 
kvmppc_set_vsx_vr(struct kvm_vcpu * vcpu,int i,vector128 * val)529 static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i,
530 				     vector128 *val)
531 {
532 	vcpu->arch.vr.vr[i] = *val;
533 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(32 + i));
534 }
535 
kvmppc_get_vscr(struct kvm_vcpu * vcpu)536 static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu)
537 {
538 	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSCR) < 0);
539 	return vcpu->arch.vr.vscr.u[3];
540 }
541 
kvmppc_set_vscr(struct kvm_vcpu * vcpu,u32 val)542 static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val)
543 {
544 	vcpu->arch.vr.vscr.u[3] = val;
545 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSCR);
546 }
547 #endif
548 
549 #define KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden)		\
550 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
551 {									\
552 									\
553 	vcpu->arch.reg = val;						\
554 	kvmhv_nestedv2_mark_dirty(vcpu, iden);				\
555 }
556 
557 #define KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden)		\
558 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
559 {									\
560 	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0);		\
561 	return vcpu->arch.reg;						\
562 }
563 
564 #define KVMPPC_BOOK3S_VCPU_ACCESSOR(reg, size, iden)			\
565 	KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden)		\
566 	KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden)		\
567 
568 KVMPPC_BOOK3S_VCPU_ACCESSOR(pid, 32, KVMPPC_GSID_PIDR)
569 KVMPPC_BOOK3S_VCPU_ACCESSOR(tar, 64, KVMPPC_GSID_TAR)
570 KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbhr, 64, KVMPPC_GSID_EBBHR)
571 KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbrr, 64, KVMPPC_GSID_EBBRR)
572 KVMPPC_BOOK3S_VCPU_ACCESSOR(bescr, 64, KVMPPC_GSID_BESCR)
573 KVMPPC_BOOK3S_VCPU_ACCESSOR(ic, 64, KVMPPC_GSID_IC)
574 KVMPPC_BOOK3S_VCPU_ACCESSOR(vrsave, 64, KVMPPC_GSID_VRSAVE)
575 
576 
577 #define KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden)		\
578 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
579 {									\
580 	vcpu->arch.vcore->reg = val;					\
581 	kvmhv_nestedv2_mark_dirty(vcpu, iden);				\
582 }
583 
584 #define KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden)		\
585 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
586 {									\
587 	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0);		\
588 	return vcpu->arch.vcore->reg;					\
589 }
590 
591 #define KVMPPC_BOOK3S_VCORE_ACCESSOR(reg, size, iden)			\
592 	KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden)		\
593 	KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden)		\
594 
595 
596 KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB)
597 KVMPPC_BOOK3S_VCORE_ACCESSOR(dpdes, 64, KVMPPC_GSID_DPDES)
598 KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR)
599 KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR)
600 KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(tb_offset, 64, KVMPPC_GSID_TB_OFFSET)
601 
kvmppc_get_tb_offset(struct kvm_vcpu * vcpu)602 static inline u64 kvmppc_get_tb_offset(struct kvm_vcpu *vcpu)
603 {
604 	return vcpu->arch.vcore->tb_offset;
605 }
606 
kvmppc_get_dec_expires(struct kvm_vcpu * vcpu)607 static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu)
608 {
609 	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0);
610 	return vcpu->arch.dec_expires;
611 }
612 
kvmppc_set_dec_expires(struct kvm_vcpu * vcpu,u64 val)613 static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val)
614 {
615 	vcpu->arch.dec_expires = val;
616 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB);
617 }
618 
619 /* Expiry time of vcpu DEC relative to host TB */
kvmppc_dec_expires_host_tb(struct kvm_vcpu * vcpu)620 static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu)
621 {
622 	return kvmppc_get_dec_expires(vcpu) - kvmppc_get_tb_offset(vcpu);
623 }
624 
is_kvmppc_resume_guest(int r)625 static inline bool is_kvmppc_resume_guest(int r)
626 {
627 	return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
628 }
629 
630 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
kvmppc_supports_magic_page(struct kvm_vcpu * vcpu)631 static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
632 {
633 	/* Only PR KVM supports the magic page */
634 	return !is_kvmppc_hv_enabled(vcpu->kvm);
635 }
636 
637 extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
638 extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
639 
640 /* Magic register values loaded into r3 and r4 before the 'sc' assembly
641  * instruction for the OSI hypercalls */
642 #define OSI_SC_MAGIC_R3			0x113724FA
643 #define OSI_SC_MAGIC_R4			0x77810F9B
644 
645 #define INS_DCBZ			0x7c0007ec
646 /* TO = 31 for unconditional trap */
647 #define INS_TW				0x7fe00008
648 
649 #define SPLIT_HACK_MASK			0xff000000
650 #define SPLIT_HACK_OFFS			0xfb000000
651 
652 /*
653  * This packs a VCPU ID from the [0..KVM_MAX_VCPU_IDS) space down to the
654  * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
655  * (but not its actual threading mode, which is not available) to avoid
656  * collisions.
657  *
658  * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
659  * 0) unchanged: if the guest is filling each VCORE completely then it will be
660  * using consecutive IDs and it will fill the space without any packing.
661  *
662  * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
663  * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is
664  * added to avoid collisions.
665  *
666  * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
667  * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs
668  * can be safely packed into the second half of each VCORE by adding an offset
669  * of (stride / 2).
670  *
671  * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
672  * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each
673  * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4).
674  *
675  * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
676  * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7
677  * must be free to use.
678  *
679  * (The offsets for each block are stored in block_offsets[], indexed by the
680  * block number if the stride is 8. For cases where the guest's stride is less
681  * than 8, we can re-use the block_offsets array by multiplying the block
682  * number by (MAX_SMT_THREADS / stride) to reach the correct entry.)
683  */
kvmppc_pack_vcpu_id(struct kvm * kvm,u32 id)684 static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
685 {
686 	const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
687 	int stride = kvm->arch.emul_smt_mode;
688 	int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
689 	u32 packed_id;
690 
691 	if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
692 		return 0;
693 	packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
694 	if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
695 		return 0;
696 	return packed_id;
697 }
698 
699 #endif /* __ASM_KVM_BOOK3S_H__ */
700