1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2021 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Atish Patra <atish.patra@wdc.com>
7 */
8
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/sbi.h>
13 #include <asm/kvm_vcpu_sbi.h>
14
kvm_sbi_hsm_vcpu_start(struct kvm_vcpu * vcpu)15 static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
16 {
17 struct kvm_cpu_context *reset_cntx;
18 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
19 struct kvm_vcpu *target_vcpu;
20 unsigned long target_vcpuid = cp->a0;
21 int ret = 0;
22
23 target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
24 if (!target_vcpu)
25 return SBI_ERR_INVALID_PARAM;
26
27 spin_lock(&target_vcpu->arch.mp_state_lock);
28
29 if (!kvm_riscv_vcpu_stopped(target_vcpu)) {
30 ret = SBI_ERR_ALREADY_AVAILABLE;
31 goto out;
32 }
33
34 spin_lock(&target_vcpu->arch.reset_cntx_lock);
35 reset_cntx = &target_vcpu->arch.guest_reset_context;
36 /* start address */
37 reset_cntx->sepc = cp->a1;
38 /* target vcpu id to start */
39 reset_cntx->a0 = target_vcpuid;
40 /* private data passed from kernel */
41 reset_cntx->a1 = cp->a2;
42 spin_unlock(&target_vcpu->arch.reset_cntx_lock);
43
44 kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu);
45
46 __kvm_riscv_vcpu_power_on(target_vcpu);
47
48 out:
49 spin_unlock(&target_vcpu->arch.mp_state_lock);
50
51 return ret;
52 }
53
kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu * vcpu)54 static int kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu *vcpu)
55 {
56 int ret = 0;
57
58 spin_lock(&vcpu->arch.mp_state_lock);
59
60 if (kvm_riscv_vcpu_stopped(vcpu)) {
61 ret = SBI_ERR_FAILURE;
62 goto out;
63 }
64
65 __kvm_riscv_vcpu_power_off(vcpu);
66
67 out:
68 spin_unlock(&vcpu->arch.mp_state_lock);
69
70 return ret;
71 }
72
kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu * vcpu)73 static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
74 {
75 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
76 unsigned long target_vcpuid = cp->a0;
77 struct kvm_vcpu *target_vcpu;
78
79 target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
80 if (!target_vcpu)
81 return SBI_ERR_INVALID_PARAM;
82 if (!kvm_riscv_vcpu_stopped(target_vcpu))
83 return SBI_HSM_STATE_STARTED;
84 else if (vcpu->stat.generic.blocking)
85 return SBI_HSM_STATE_SUSPENDED;
86 else
87 return SBI_HSM_STATE_STOPPED;
88 }
89
kvm_sbi_ext_hsm_handler(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_vcpu_sbi_return * retdata)90 static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
91 struct kvm_vcpu_sbi_return *retdata)
92 {
93 int ret = 0;
94 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
95 unsigned long funcid = cp->a6;
96
97 switch (funcid) {
98 case SBI_EXT_HSM_HART_START:
99 ret = kvm_sbi_hsm_vcpu_start(vcpu);
100 break;
101 case SBI_EXT_HSM_HART_STOP:
102 ret = kvm_sbi_hsm_vcpu_stop(vcpu);
103 break;
104 case SBI_EXT_HSM_HART_STATUS:
105 ret = kvm_sbi_hsm_vcpu_get_status(vcpu);
106 if (ret >= 0) {
107 retdata->out_val = ret;
108 retdata->err_val = 0;
109 }
110 return 0;
111 case SBI_EXT_HSM_HART_SUSPEND:
112 switch (cp->a0) {
113 case SBI_HSM_SUSPEND_RET_DEFAULT:
114 kvm_riscv_vcpu_wfi(vcpu);
115 break;
116 case SBI_HSM_SUSPEND_NON_RET_DEFAULT:
117 ret = SBI_ERR_NOT_SUPPORTED;
118 break;
119 default:
120 ret = SBI_ERR_INVALID_PARAM;
121 }
122 break;
123 default:
124 ret = SBI_ERR_NOT_SUPPORTED;
125 }
126
127 retdata->err_val = ret;
128
129 return 0;
130 }
131
132 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm = {
133 .extid_start = SBI_EXT_HSM,
134 .extid_end = SBI_EXT_HSM,
135 .handler = kvm_sbi_ext_hsm_handler,
136 };
137