1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/err.h>
7 #include <linux/module.h>
8 #include <linux/kvm_host.h>
9 #include <asm/cacheflush.h>
10 #include <asm/cpufeature.h>
11 #include <asm/kvm_csr.h>
12 #include "trace.h"
13
14 unsigned long vpid_mask;
15 struct kvm_world_switch *kvm_loongarch_ops;
16 static int gcsr_flag[CSR_MAX_NUMS];
17 static struct kvm_context __percpu *vmcs;
18
get_gcsr_flag(int csr)19 int get_gcsr_flag(int csr)
20 {
21 if (csr < CSR_MAX_NUMS)
22 return gcsr_flag[csr];
23
24 return INVALID_GCSR;
25 }
26
set_gcsr_sw_flag(int csr)27 static inline void set_gcsr_sw_flag(int csr)
28 {
29 if (csr < CSR_MAX_NUMS)
30 gcsr_flag[csr] |= SW_GCSR;
31 }
32
set_gcsr_hw_flag(int csr)33 static inline void set_gcsr_hw_flag(int csr)
34 {
35 if (csr < CSR_MAX_NUMS)
36 gcsr_flag[csr] |= HW_GCSR;
37 }
38
39 /*
40 * The default value of gcsr_flag[CSR] is 0, and we use this
41 * function to set the flag to 1 (SW_GCSR) or 2 (HW_GCSR) if the
42 * gcsr is software or hardware. It will be used by get/set_gcsr,
43 * if gcsr_flag is HW we should use gcsrrd/gcsrwr to access it,
44 * else use software csr to emulate it.
45 */
kvm_init_gcsr_flag(void)46 static void kvm_init_gcsr_flag(void)
47 {
48 set_gcsr_hw_flag(LOONGARCH_CSR_CRMD);
49 set_gcsr_hw_flag(LOONGARCH_CSR_PRMD);
50 set_gcsr_hw_flag(LOONGARCH_CSR_EUEN);
51 set_gcsr_hw_flag(LOONGARCH_CSR_MISC);
52 set_gcsr_hw_flag(LOONGARCH_CSR_ECFG);
53 set_gcsr_hw_flag(LOONGARCH_CSR_ESTAT);
54 set_gcsr_hw_flag(LOONGARCH_CSR_ERA);
55 set_gcsr_hw_flag(LOONGARCH_CSR_BADV);
56 set_gcsr_hw_flag(LOONGARCH_CSR_BADI);
57 set_gcsr_hw_flag(LOONGARCH_CSR_EENTRY);
58 set_gcsr_hw_flag(LOONGARCH_CSR_TLBIDX);
59 set_gcsr_hw_flag(LOONGARCH_CSR_TLBEHI);
60 set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO0);
61 set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO1);
62 set_gcsr_hw_flag(LOONGARCH_CSR_ASID);
63 set_gcsr_hw_flag(LOONGARCH_CSR_PGDL);
64 set_gcsr_hw_flag(LOONGARCH_CSR_PGDH);
65 set_gcsr_hw_flag(LOONGARCH_CSR_PGD);
66 set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL0);
67 set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL1);
68 set_gcsr_hw_flag(LOONGARCH_CSR_STLBPGSIZE);
69 set_gcsr_hw_flag(LOONGARCH_CSR_RVACFG);
70 set_gcsr_hw_flag(LOONGARCH_CSR_CPUID);
71 set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG1);
72 set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG2);
73 set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG3);
74 set_gcsr_hw_flag(LOONGARCH_CSR_KS0);
75 set_gcsr_hw_flag(LOONGARCH_CSR_KS1);
76 set_gcsr_hw_flag(LOONGARCH_CSR_KS2);
77 set_gcsr_hw_flag(LOONGARCH_CSR_KS3);
78 set_gcsr_hw_flag(LOONGARCH_CSR_KS4);
79 set_gcsr_hw_flag(LOONGARCH_CSR_KS5);
80 set_gcsr_hw_flag(LOONGARCH_CSR_KS6);
81 set_gcsr_hw_flag(LOONGARCH_CSR_KS7);
82 set_gcsr_hw_flag(LOONGARCH_CSR_TMID);
83 set_gcsr_hw_flag(LOONGARCH_CSR_TCFG);
84 set_gcsr_hw_flag(LOONGARCH_CSR_TVAL);
85 set_gcsr_hw_flag(LOONGARCH_CSR_TINTCLR);
86 set_gcsr_hw_flag(LOONGARCH_CSR_CNTC);
87 set_gcsr_hw_flag(LOONGARCH_CSR_LLBCTL);
88 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRENTRY);
89 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRBADV);
90 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRERA);
91 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRSAVE);
92 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO0);
93 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO1);
94 set_gcsr_hw_flag(LOONGARCH_CSR_TLBREHI);
95 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRPRMD);
96 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN0);
97 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN1);
98 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN2);
99 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN3);
100
101 set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL1);
102 set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL2);
103 set_gcsr_sw_flag(LOONGARCH_CSR_MERRCTL);
104 set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO1);
105 set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO2);
106 set_gcsr_sw_flag(LOONGARCH_CSR_MERRENTRY);
107 set_gcsr_sw_flag(LOONGARCH_CSR_MERRERA);
108 set_gcsr_sw_flag(LOONGARCH_CSR_MERRSAVE);
109 set_gcsr_sw_flag(LOONGARCH_CSR_CTAG);
110 set_gcsr_sw_flag(LOONGARCH_CSR_DEBUG);
111 set_gcsr_sw_flag(LOONGARCH_CSR_DERA);
112 set_gcsr_sw_flag(LOONGARCH_CSR_DESAVE);
113
114 set_gcsr_sw_flag(LOONGARCH_CSR_FWPC);
115 set_gcsr_sw_flag(LOONGARCH_CSR_FWPS);
116 set_gcsr_sw_flag(LOONGARCH_CSR_MWPC);
117 set_gcsr_sw_flag(LOONGARCH_CSR_MWPS);
118
119 set_gcsr_sw_flag(LOONGARCH_CSR_DB0ADDR);
120 set_gcsr_sw_flag(LOONGARCH_CSR_DB0MASK);
121 set_gcsr_sw_flag(LOONGARCH_CSR_DB0CTRL);
122 set_gcsr_sw_flag(LOONGARCH_CSR_DB0ASID);
123 set_gcsr_sw_flag(LOONGARCH_CSR_DB1ADDR);
124 set_gcsr_sw_flag(LOONGARCH_CSR_DB1MASK);
125 set_gcsr_sw_flag(LOONGARCH_CSR_DB1CTRL);
126 set_gcsr_sw_flag(LOONGARCH_CSR_DB1ASID);
127 set_gcsr_sw_flag(LOONGARCH_CSR_DB2ADDR);
128 set_gcsr_sw_flag(LOONGARCH_CSR_DB2MASK);
129 set_gcsr_sw_flag(LOONGARCH_CSR_DB2CTRL);
130 set_gcsr_sw_flag(LOONGARCH_CSR_DB2ASID);
131 set_gcsr_sw_flag(LOONGARCH_CSR_DB3ADDR);
132 set_gcsr_sw_flag(LOONGARCH_CSR_DB3MASK);
133 set_gcsr_sw_flag(LOONGARCH_CSR_DB3CTRL);
134 set_gcsr_sw_flag(LOONGARCH_CSR_DB3ASID);
135 set_gcsr_sw_flag(LOONGARCH_CSR_DB4ADDR);
136 set_gcsr_sw_flag(LOONGARCH_CSR_DB4MASK);
137 set_gcsr_sw_flag(LOONGARCH_CSR_DB4CTRL);
138 set_gcsr_sw_flag(LOONGARCH_CSR_DB4ASID);
139 set_gcsr_sw_flag(LOONGARCH_CSR_DB5ADDR);
140 set_gcsr_sw_flag(LOONGARCH_CSR_DB5MASK);
141 set_gcsr_sw_flag(LOONGARCH_CSR_DB5CTRL);
142 set_gcsr_sw_flag(LOONGARCH_CSR_DB5ASID);
143 set_gcsr_sw_flag(LOONGARCH_CSR_DB6ADDR);
144 set_gcsr_sw_flag(LOONGARCH_CSR_DB6MASK);
145 set_gcsr_sw_flag(LOONGARCH_CSR_DB6CTRL);
146 set_gcsr_sw_flag(LOONGARCH_CSR_DB6ASID);
147 set_gcsr_sw_flag(LOONGARCH_CSR_DB7ADDR);
148 set_gcsr_sw_flag(LOONGARCH_CSR_DB7MASK);
149 set_gcsr_sw_flag(LOONGARCH_CSR_DB7CTRL);
150 set_gcsr_sw_flag(LOONGARCH_CSR_DB7ASID);
151
152 set_gcsr_sw_flag(LOONGARCH_CSR_IB0ADDR);
153 set_gcsr_sw_flag(LOONGARCH_CSR_IB0MASK);
154 set_gcsr_sw_flag(LOONGARCH_CSR_IB0CTRL);
155 set_gcsr_sw_flag(LOONGARCH_CSR_IB0ASID);
156 set_gcsr_sw_flag(LOONGARCH_CSR_IB1ADDR);
157 set_gcsr_sw_flag(LOONGARCH_CSR_IB1MASK);
158 set_gcsr_sw_flag(LOONGARCH_CSR_IB1CTRL);
159 set_gcsr_sw_flag(LOONGARCH_CSR_IB1ASID);
160 set_gcsr_sw_flag(LOONGARCH_CSR_IB2ADDR);
161 set_gcsr_sw_flag(LOONGARCH_CSR_IB2MASK);
162 set_gcsr_sw_flag(LOONGARCH_CSR_IB2CTRL);
163 set_gcsr_sw_flag(LOONGARCH_CSR_IB2ASID);
164 set_gcsr_sw_flag(LOONGARCH_CSR_IB3ADDR);
165 set_gcsr_sw_flag(LOONGARCH_CSR_IB3MASK);
166 set_gcsr_sw_flag(LOONGARCH_CSR_IB3CTRL);
167 set_gcsr_sw_flag(LOONGARCH_CSR_IB3ASID);
168 set_gcsr_sw_flag(LOONGARCH_CSR_IB4ADDR);
169 set_gcsr_sw_flag(LOONGARCH_CSR_IB4MASK);
170 set_gcsr_sw_flag(LOONGARCH_CSR_IB4CTRL);
171 set_gcsr_sw_flag(LOONGARCH_CSR_IB4ASID);
172 set_gcsr_sw_flag(LOONGARCH_CSR_IB5ADDR);
173 set_gcsr_sw_flag(LOONGARCH_CSR_IB5MASK);
174 set_gcsr_sw_flag(LOONGARCH_CSR_IB5CTRL);
175 set_gcsr_sw_flag(LOONGARCH_CSR_IB5ASID);
176 set_gcsr_sw_flag(LOONGARCH_CSR_IB6ADDR);
177 set_gcsr_sw_flag(LOONGARCH_CSR_IB6MASK);
178 set_gcsr_sw_flag(LOONGARCH_CSR_IB6CTRL);
179 set_gcsr_sw_flag(LOONGARCH_CSR_IB6ASID);
180 set_gcsr_sw_flag(LOONGARCH_CSR_IB7ADDR);
181 set_gcsr_sw_flag(LOONGARCH_CSR_IB7MASK);
182 set_gcsr_sw_flag(LOONGARCH_CSR_IB7CTRL);
183 set_gcsr_sw_flag(LOONGARCH_CSR_IB7ASID);
184
185 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL0);
186 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR0);
187 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL1);
188 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR1);
189 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL2);
190 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR2);
191 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL3);
192 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR3);
193 }
194
kvm_update_vpid(struct kvm_vcpu * vcpu,int cpu)195 static void kvm_update_vpid(struct kvm_vcpu *vcpu, int cpu)
196 {
197 unsigned long vpid;
198 struct kvm_context *context;
199
200 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
201 vpid = context->vpid_cache + 1;
202 if (!(vpid & vpid_mask)) {
203 /* finish round of vpid loop */
204 if (unlikely(!vpid))
205 vpid = vpid_mask + 1;
206
207 ++vpid; /* vpid 0 reserved for root */
208
209 /* start new vpid cycle */
210 kvm_flush_tlb_all();
211 }
212
213 context->vpid_cache = vpid;
214 vcpu->arch.vpid = vpid;
215 }
216
kvm_check_vpid(struct kvm_vcpu * vcpu)217 void kvm_check_vpid(struct kvm_vcpu *vcpu)
218 {
219 int cpu;
220 bool migrated;
221 unsigned long ver, old, vpid;
222 struct kvm_context *context;
223
224 cpu = smp_processor_id();
225 /*
226 * Are we entering guest context on a different CPU to last time?
227 * If so, the vCPU's guest TLB state on this CPU may be stale.
228 */
229 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
230 migrated = (vcpu->cpu != cpu);
231
232 /*
233 * Check if our vpid is of an older version
234 *
235 * We also discard the stored vpid if we've executed on
236 * another CPU, as the guest mappings may have changed without
237 * hypervisor knowledge.
238 */
239 ver = vcpu->arch.vpid & ~vpid_mask;
240 old = context->vpid_cache & ~vpid_mask;
241 if (migrated || (ver != old)) {
242 kvm_update_vpid(vcpu, cpu);
243 trace_kvm_vpid_change(vcpu, vcpu->arch.vpid);
244 vcpu->cpu = cpu;
245 kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
246 }
247
248 /* Restore GSTAT(0x50).vpid */
249 vpid = (vcpu->arch.vpid & vpid_mask) << CSR_GSTAT_GID_SHIFT;
250 change_csr_gstat(vpid_mask << CSR_GSTAT_GID_SHIFT, vpid);
251 }
252
kvm_init_vmcs(struct kvm * kvm)253 void kvm_init_vmcs(struct kvm *kvm)
254 {
255 kvm->arch.vmcs = vmcs;
256 }
257
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)258 long kvm_arch_dev_ioctl(struct file *filp,
259 unsigned int ioctl, unsigned long arg)
260 {
261 return -ENOIOCTLCMD;
262 }
263
kvm_arch_enable_virtualization_cpu(void)264 int kvm_arch_enable_virtualization_cpu(void)
265 {
266 unsigned long env, gcfg = 0;
267
268 env = read_csr_gcfg();
269
270 /* First init gcfg, gstat, gintc, gtlbc. All guest use the same config */
271 write_csr_gcfg(0);
272 write_csr_gstat(0);
273 write_csr_gintc(0);
274 clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI);
275
276 /*
277 * Enable virtualization features granting guest direct control of
278 * certain features:
279 * GCI=2: Trap on init or unimplement cache instruction.
280 * TORU=0: Trap on Root Unimplement.
281 * CACTRL=1: Root control cache.
282 * TOP=0: Trap on Previlege.
283 * TOE=0: Trap on Exception.
284 * TIT=0: Trap on Timer.
285 */
286 if (env & CSR_GCFG_GCIP_ALL)
287 gcfg |= CSR_GCFG_GCI_SECURE;
288 if (env & CSR_GCFG_MATC_ROOT)
289 gcfg |= CSR_GCFG_MATC_ROOT;
290
291 write_csr_gcfg(gcfg);
292
293 kvm_flush_tlb_all();
294
295 /* Enable using TGID */
296 set_csr_gtlbc(CSR_GTLBC_USETGID);
297 kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx",
298 read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc());
299
300 return 0;
301 }
302
kvm_arch_disable_virtualization_cpu(void)303 void kvm_arch_disable_virtualization_cpu(void)
304 {
305 write_csr_gcfg(0);
306 write_csr_gstat(0);
307 write_csr_gintc(0);
308 clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI);
309
310 /* Flush any remaining guest TLB entries */
311 kvm_flush_tlb_all();
312 }
313
kvm_loongarch_env_init(void)314 static int kvm_loongarch_env_init(void)
315 {
316 int cpu, order;
317 void *addr;
318 struct kvm_context *context;
319
320 vmcs = alloc_percpu(struct kvm_context);
321 if (!vmcs) {
322 pr_err("kvm: failed to allocate percpu kvm_context\n");
323 return -ENOMEM;
324 }
325
326 kvm_loongarch_ops = kzalloc(sizeof(*kvm_loongarch_ops), GFP_KERNEL);
327 if (!kvm_loongarch_ops) {
328 free_percpu(vmcs);
329 vmcs = NULL;
330 return -ENOMEM;
331 }
332
333 /*
334 * PGD register is shared between root kernel and kvm hypervisor.
335 * So world switch entry should be in DMW area rather than TLB area
336 * to avoid page fault reenter.
337 *
338 * In future if hardware pagetable walking is supported, we won't
339 * need to copy world switch code to DMW area.
340 */
341 order = get_order(kvm_exception_size + kvm_enter_guest_size);
342 addr = (void *)__get_free_pages(GFP_KERNEL, order);
343 if (!addr) {
344 free_percpu(vmcs);
345 vmcs = NULL;
346 kfree(kvm_loongarch_ops);
347 kvm_loongarch_ops = NULL;
348 return -ENOMEM;
349 }
350
351 memcpy(addr, kvm_exc_entry, kvm_exception_size);
352 memcpy(addr + kvm_exception_size, kvm_enter_guest, kvm_enter_guest_size);
353 flush_icache_range((unsigned long)addr, (unsigned long)addr + kvm_exception_size + kvm_enter_guest_size);
354 kvm_loongarch_ops->exc_entry = addr;
355 kvm_loongarch_ops->enter_guest = addr + kvm_exception_size;
356 kvm_loongarch_ops->page_order = order;
357
358 vpid_mask = read_csr_gstat();
359 vpid_mask = (vpid_mask & CSR_GSTAT_GIDBIT) >> CSR_GSTAT_GIDBIT_SHIFT;
360 if (vpid_mask)
361 vpid_mask = GENMASK(vpid_mask - 1, 0);
362
363 for_each_possible_cpu(cpu) {
364 context = per_cpu_ptr(vmcs, cpu);
365 context->vpid_cache = vpid_mask + 1;
366 context->last_vcpu = NULL;
367 }
368
369 kvm_init_gcsr_flag();
370
371 return 0;
372 }
373
kvm_loongarch_env_exit(void)374 static void kvm_loongarch_env_exit(void)
375 {
376 unsigned long addr;
377
378 if (vmcs)
379 free_percpu(vmcs);
380
381 if (kvm_loongarch_ops) {
382 if (kvm_loongarch_ops->exc_entry) {
383 addr = (unsigned long)kvm_loongarch_ops->exc_entry;
384 free_pages(addr, kvm_loongarch_ops->page_order);
385 }
386 kfree(kvm_loongarch_ops);
387 }
388 }
389
kvm_loongarch_init(void)390 static int kvm_loongarch_init(void)
391 {
392 int r;
393
394 if (!cpu_has_lvz) {
395 kvm_info("Hardware virtualization not available\n");
396 return -ENODEV;
397 }
398 r = kvm_loongarch_env_init();
399 if (r)
400 return r;
401
402 return kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
403 }
404
kvm_loongarch_exit(void)405 static void kvm_loongarch_exit(void)
406 {
407 kvm_exit();
408 kvm_loongarch_env_exit();
409 }
410
411 module_init(kvm_loongarch_init);
412 module_exit(kvm_loongarch_exit);
413
414 #ifdef MODULE
415 static const struct cpu_feature kvm_feature[] = {
416 { .feature = cpu_feature(LOONGARCH_LVZ) },
417 {},
418 };
419 MODULE_DEVICE_TABLE(cpu, kvm_feature);
420 #endif
421