1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright 2012 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5  */
6 
7 #include <linux/types.h>
8 #include <linux/string.h>
9 #include <linux/kvm.h>
10 #include <linux/kvm_host.h>
11 #include <linux/kernel.h>
12 #include <asm/lppaca.h>
13 #include <asm/opal.h>
14 #include <asm/mce.h>
15 #include <asm/machdep.h>
16 #include <asm/cputhreads.h>
17 #include <asm/hmi.h>
18 #include <asm/kvm_ppc.h>
19 
20 /* SRR1 bits for machine check on POWER7 */
21 #define SRR1_MC_LDSTERR		(1ul << (63-42))
22 #define SRR1_MC_IFETCH_SH	(63-45)
23 #define SRR1_MC_IFETCH_MASK	0x7
24 #define SRR1_MC_IFETCH_SLBPAR		2	/* SLB parity error */
25 #define SRR1_MC_IFETCH_SLBMULTI		3	/* SLB multi-hit */
26 #define SRR1_MC_IFETCH_SLBPARMULTI	4	/* SLB parity + multi-hit */
27 #define SRR1_MC_IFETCH_TLBMULTI		5	/* I-TLB multi-hit */
28 
29 /* DSISR bits for machine check on POWER7 */
30 #define DSISR_MC_DERAT_MULTI	0x800		/* D-ERAT multi-hit */
31 #define DSISR_MC_TLB_MULTI	0x400		/* D-TLB multi-hit */
32 #define DSISR_MC_SLB_PARITY	0x100		/* SLB parity error */
33 #define DSISR_MC_SLB_MULTI	0x080		/* SLB multi-hit */
34 #define DSISR_MC_SLB_PARMULTI	0x040		/* SLB parity + multi-hit */
35 
36 /* POWER7 SLB flush and reload */
reload_slb(struct kvm_vcpu * vcpu)37 static void reload_slb(struct kvm_vcpu *vcpu)
38 {
39 	struct slb_shadow *slb;
40 	unsigned long i, n;
41 
42 	/* First clear out SLB */
43 	asm volatile("slbmte %0,%0; slbia" : : "r" (0));
44 
45 	/* Do they have an SLB shadow buffer registered? */
46 	slb = vcpu->arch.slb_shadow.pinned_addr;
47 	if (!slb)
48 		return;
49 
50 	/* Sanity check */
51 	n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
52 	if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end)
53 		return;
54 
55 	/* Load up the SLB from that */
56 	for (i = 0; i < n; ++i) {
57 		unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
58 		unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
59 
60 		rb = (rb & ~0xFFFul) | i;	/* insert entry number */
61 		asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
62 	}
63 }
64 
65 /*
66  * On POWER7, see if we can handle a machine check that occurred inside
67  * the guest in real mode, without switching to the host partition.
68  */
kvmppc_realmode_mc_power7(struct kvm_vcpu * vcpu)69 static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
70 {
71 	unsigned long srr1 = vcpu->arch.shregs.msr;
72 	long handled = 1;
73 
74 	if (srr1 & SRR1_MC_LDSTERR) {
75 		/* error on load/store */
76 		unsigned long dsisr = vcpu->arch.shregs.dsisr;
77 
78 		if (dsisr & (DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
79 			     DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI)) {
80 			/* flush and reload SLB; flushes D-ERAT too */
81 			reload_slb(vcpu);
82 			dsisr &= ~(DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
83 				   DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI);
84 		}
85 		if (dsisr & DSISR_MC_TLB_MULTI) {
86 			tlbiel_all_lpid(vcpu->kvm->arch.radix);
87 			dsisr &= ~DSISR_MC_TLB_MULTI;
88 		}
89 		/* Any other errors we don't understand? */
90 		if (dsisr & 0xffffffffUL)
91 			handled = 0;
92 	}
93 
94 	switch ((srr1 >> SRR1_MC_IFETCH_SH) & SRR1_MC_IFETCH_MASK) {
95 	case 0:
96 		break;
97 	case SRR1_MC_IFETCH_SLBPAR:
98 	case SRR1_MC_IFETCH_SLBMULTI:
99 	case SRR1_MC_IFETCH_SLBPARMULTI:
100 		reload_slb(vcpu);
101 		break;
102 	case SRR1_MC_IFETCH_TLBMULTI:
103 		tlbiel_all_lpid(vcpu->kvm->arch.radix);
104 		break;
105 	default:
106 		handled = 0;
107 	}
108 
109 	return handled;
110 }
111 
kvmppc_realmode_machine_check(struct kvm_vcpu * vcpu)112 void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
113 {
114 	struct machine_check_event mce_evt;
115 	long handled;
116 
117 	if (vcpu->kvm->arch.fwnmi_enabled) {
118 		/* FWNMI guests handle their own recovery */
119 		handled = 0;
120 	} else {
121 		handled = kvmppc_realmode_mc_power7(vcpu);
122 	}
123 
124 	/*
125 	 * Now get the event and stash it in the vcpu struct so it can
126 	 * be handled by the primary thread in virtual mode.  We can't
127 	 * call machine_check_queue_event() here if we are running on
128 	 * an offline secondary thread.
129 	 */
130 	if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) {
131 		if (handled && mce_evt.version == MCE_V1)
132 			mce_evt.disposition = MCE_DISPOSITION_RECOVERED;
133 	} else {
134 		memset(&mce_evt, 0, sizeof(mce_evt));
135 	}
136 
137 	vcpu->arch.mce_evt = mce_evt;
138 }
139 
140 
kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu * vcpu)141 long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu)
142 {
143 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
144 	long ret = 0;
145 
146 	/*
147 	 * Unapply and clear the offset first. That way, if the TB was not
148 	 * resynced then it will remain in host-offset, and if it was resynced
149 	 * then it is brought into host-offset. Then the tb offset is
150 	 * re-applied before continuing with the KVM exit.
151 	 *
152 	 * This way, we don't need to actually know whether not OPAL resynced
153 	 * the timebase or do any of the complicated dance that the P7/8
154 	 * path requires.
155 	 */
156 	if (vc->tb_offset_applied) {
157 		u64 new_tb = mftb() - vc->tb_offset_applied;
158 		mtspr(SPRN_TBU40, new_tb);
159 		if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
160 			new_tb += 0x1000000;
161 			mtspr(SPRN_TBU40, new_tb);
162 		}
163 		vc->tb_offset_applied = 0;
164 	}
165 
166 	local_paca->hmi_irqs++;
167 
168 	if (hmi_handle_debugtrig(NULL) >= 0) {
169 		ret = 1;
170 		goto out;
171 	}
172 
173 	if (ppc_md.hmi_exception_early)
174 		ppc_md.hmi_exception_early(NULL);
175 
176 out:
177 	if (kvmppc_get_tb_offset(vcpu)) {
178 		u64 new_tb = mftb() + vc->tb_offset;
179 		mtspr(SPRN_TBU40, new_tb);
180 		if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
181 			new_tb += 0x1000000;
182 			mtspr(SPRN_TBU40, new_tb);
183 		}
184 		vc->tb_offset_applied = kvmppc_get_tb_offset(vcpu);
185 	}
186 
187 	return ret;
188 }
189 
190 /*
191  * The following subcore HMI handling is all only for pre-POWER9 CPUs.
192  */
193 
194 /* Check if dynamic split is in force and return subcore size accordingly. */
kvmppc_cur_subcore_size(void)195 static inline int kvmppc_cur_subcore_size(void)
196 {
197 	if (local_paca->kvm_hstate.kvm_split_mode)
198 		return local_paca->kvm_hstate.kvm_split_mode->subcore_size;
199 
200 	return threads_per_subcore;
201 }
202 
kvmppc_subcore_enter_guest(void)203 void kvmppc_subcore_enter_guest(void)
204 {
205 	int thread_id, subcore_id;
206 
207 	thread_id = cpu_thread_in_core(local_paca->paca_index);
208 	subcore_id = thread_id / kvmppc_cur_subcore_size();
209 
210 	local_paca->sibling_subcore_state->in_guest[subcore_id] = 1;
211 }
212 EXPORT_SYMBOL_GPL(kvmppc_subcore_enter_guest);
213 
kvmppc_subcore_exit_guest(void)214 void kvmppc_subcore_exit_guest(void)
215 {
216 	int thread_id, subcore_id;
217 
218 	thread_id = cpu_thread_in_core(local_paca->paca_index);
219 	subcore_id = thread_id / kvmppc_cur_subcore_size();
220 
221 	local_paca->sibling_subcore_state->in_guest[subcore_id] = 0;
222 }
223 EXPORT_SYMBOL_GPL(kvmppc_subcore_exit_guest);
224 
kvmppc_tb_resync_required(void)225 static bool kvmppc_tb_resync_required(void)
226 {
227 	if (test_and_set_bit(CORE_TB_RESYNC_REQ_BIT,
228 				&local_paca->sibling_subcore_state->flags))
229 		return false;
230 
231 	return true;
232 }
233 
kvmppc_tb_resync_done(void)234 static void kvmppc_tb_resync_done(void)
235 {
236 	clear_bit(CORE_TB_RESYNC_REQ_BIT,
237 			&local_paca->sibling_subcore_state->flags);
238 }
239 
240 /*
241  * kvmppc_realmode_hmi_handler() is called only by primary thread during
242  * guest exit path.
243  *
244  * There are multiple reasons why HMI could occur, one of them is
245  * Timebase (TB) error. If this HMI is due to TB error, then TB would
246  * have been in stopped state. The opal hmi handler Will fix it and
247  * restore the TB value with host timebase value. For HMI caused due
248  * to non-TB errors, opal hmi handler will not touch/restore TB register
249  * and hence there won't be any change in TB value.
250  *
251  * Since we are not sure about the cause of this HMI, we can't be sure
252  * about the content of TB register whether it holds guest or host timebase
253  * value. Hence the idea is to resync the TB on every HMI, so that we
254  * know about the exact state of the TB value. Resync TB call will
255  * restore TB to host timebase.
256  *
257  * Things to consider:
258  * - On TB error, HMI interrupt is reported on all the threads of the core
259  *   that has encountered TB error irrespective of split-core mode.
260  * - The very first thread on the core that get chance to fix TB error
261  *   would rsync the TB with local chipTOD value.
262  * - The resync TB is a core level action i.e. it will sync all the TBs
263  *   in that core independent of split-core mode. This means if we trigger
264  *   TB sync from a thread from one subcore, it would affect TB values of
265  *   sibling subcores of the same core.
266  *
267  * All threads need to co-ordinate before making opal hmi handler.
268  * All threads will use sibling_subcore_state->in_guest[] (shared by all
269  * threads in the core) in paca which holds information about whether
270  * sibling subcores are in Guest mode or host mode. The in_guest[] array
271  * is of size MAX_SUBCORE_PER_CORE=4, indexed using subcore id to set/unset
272  * subcore status. Only primary threads from each subcore is responsible
273  * to set/unset its designated array element while entering/exiting the
274  * guset.
275  *
276  * After invoking opal hmi handler call, one of the thread (of entire core)
277  * will need to resync the TB. Bit 63 from subcore state bitmap flags
278  * (sibling_subcore_state->flags) will be used to co-ordinate between
279  * primary threads to decide who takes up the responsibility.
280  *
281  * This is what we do:
282  * - Primary thread from each subcore tries to set resync required bit[63]
283  *   of paca->sibling_subcore_state->flags.
284  * - The first primary thread that is able to set the flag takes the
285  *   responsibility of TB resync. (Let us call it as thread leader)
286  * - All other threads which are in host will call
287  *   wait_for_subcore_guest_exit() and wait for in_guest[0-3] from
288  *   paca->sibling_subcore_state to get cleared.
289  * - All the primary thread will clear its subcore status from subcore
290  *   state in_guest[] array respectively.
291  * - Once all primary threads clear in_guest[0-3], all of them will invoke
292  *   opal hmi handler.
293  * - Now all threads will wait for TB resync to complete by invoking
294  *   wait_for_tb_resync() except the thread leader.
295  * - Thread leader will do a TB resync by invoking opal_resync_timebase()
296  *   call and the it will clear the resync required bit.
297  * - All other threads will now come out of resync wait loop and proceed
298  *   with individual execution.
299  * - On return of this function, primary thread will signal all
300  *   secondary threads to proceed.
301  * - All secondary threads will eventually call opal hmi handler on
302  *   their exit path.
303  *
304  * Returns 1 if the timebase offset should be applied, 0 if not.
305  */
306 
kvmppc_realmode_hmi_handler(void)307 long kvmppc_realmode_hmi_handler(void)
308 {
309 	bool resync_req;
310 
311 	local_paca->hmi_irqs++;
312 
313 	if (hmi_handle_debugtrig(NULL) >= 0)
314 		return 1;
315 
316 	/*
317 	 * By now primary thread has already completed guest->host
318 	 * partition switch but haven't signaled secondaries yet.
319 	 * All the secondary threads on this subcore is waiting
320 	 * for primary thread to signal them to go ahead.
321 	 *
322 	 * For threads from subcore which isn't in guest, they all will
323 	 * wait until all other subcores on this core exit the guest.
324 	 *
325 	 * Now set the resync required bit. If you are the first to
326 	 * set this bit then kvmppc_tb_resync_required() function will
327 	 * return true. For rest all other subcores
328 	 * kvmppc_tb_resync_required() will return false.
329 	 *
330 	 * If resync_req == true, then this thread is responsible to
331 	 * initiate TB resync after hmi handler has completed.
332 	 * All other threads on this core will wait until this thread
333 	 * clears the resync required bit flag.
334 	 */
335 	resync_req = kvmppc_tb_resync_required();
336 
337 	/* Reset the subcore status to indicate it has exited guest */
338 	kvmppc_subcore_exit_guest();
339 
340 	/*
341 	 * Wait for other subcores on this core to exit the guest.
342 	 * All the primary threads and threads from subcore that are
343 	 * not in guest will wait here until all subcores are out
344 	 * of guest context.
345 	 */
346 	wait_for_subcore_guest_exit();
347 
348 	/*
349 	 * At this point we are sure that primary threads from each
350 	 * subcore on this core have completed guest->host partition
351 	 * switch. Now it is safe to call HMI handler.
352 	 */
353 	if (ppc_md.hmi_exception_early)
354 		ppc_md.hmi_exception_early(NULL);
355 
356 	/*
357 	 * Check if this thread is responsible to resync TB.
358 	 * All other threads will wait until this thread completes the
359 	 * TB resync.
360 	 */
361 	if (resync_req) {
362 		opal_resync_timebase();
363 		/* Reset TB resync req bit */
364 		kvmppc_tb_resync_done();
365 	} else {
366 		wait_for_tb_resync();
367 	}
368 
369 	/*
370 	 * Reset tb_offset_applied so the guest exit code won't try
371 	 * to subtract the previous timebase offset from the timebase.
372 	 */
373 	if (local_paca->kvm_hstate.kvm_vcore)
374 		local_paca->kvm_hstate.kvm_vcore->tb_offset_applied = 0;
375 
376 	return 0;
377 }
378