1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 */
6
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_emulate.h>
9 #include <trace/events/kvm.h>
10
11 #include "trace.h"
12
kvm_mmio_write_buf(void * buf,unsigned int len,unsigned long data)13 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
14 {
15 void *datap = NULL;
16 union {
17 u8 byte;
18 u16 hword;
19 u32 word;
20 u64 dword;
21 } tmp;
22
23 switch (len) {
24 case 1:
25 tmp.byte = data;
26 datap = &tmp.byte;
27 break;
28 case 2:
29 tmp.hword = data;
30 datap = &tmp.hword;
31 break;
32 case 4:
33 tmp.word = data;
34 datap = &tmp.word;
35 break;
36 case 8:
37 tmp.dword = data;
38 datap = &tmp.dword;
39 break;
40 }
41
42 memcpy(buf, datap, len);
43 }
44
kvm_mmio_read_buf(const void * buf,unsigned int len)45 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
46 {
47 unsigned long data = 0;
48 union {
49 u16 hword;
50 u32 word;
51 u64 dword;
52 } tmp;
53
54 switch (len) {
55 case 1:
56 data = *(u8 *)buf;
57 break;
58 case 2:
59 memcpy(&tmp.hword, buf, len);
60 data = tmp.hword;
61 break;
62 case 4:
63 memcpy(&tmp.word, buf, len);
64 data = tmp.word;
65 break;
66 case 8:
67 memcpy(&tmp.dword, buf, len);
68 data = tmp.dword;
69 break;
70 }
71
72 return data;
73 }
74
75 /**
76 * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
77 * or in-kernel IO emulation
78 *
79 * @vcpu: The VCPU pointer
80 */
kvm_handle_mmio_return(struct kvm_vcpu * vcpu)81 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
82 {
83 unsigned long data;
84 unsigned int len;
85 int mask;
86
87 /* Detect an already handled MMIO return */
88 if (unlikely(!vcpu->mmio_needed))
89 return 1;
90
91 vcpu->mmio_needed = 0;
92
93 if (!kvm_vcpu_dabt_iswrite(vcpu)) {
94 struct kvm_run *run = vcpu->run;
95
96 len = kvm_vcpu_dabt_get_as(vcpu);
97 data = kvm_mmio_read_buf(run->mmio.data, len);
98
99 if (kvm_vcpu_dabt_issext(vcpu) &&
100 len < sizeof(unsigned long)) {
101 mask = 1U << ((len * 8) - 1);
102 data = (data ^ mask) - mask;
103 }
104
105 if (!kvm_vcpu_dabt_issf(vcpu))
106 data = data & 0xffffffff;
107
108 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
109 &data);
110 data = vcpu_data_host_to_guest(vcpu, data, len);
111 vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
112 }
113
114 /*
115 * The MMIO instruction is emulated and should not be re-executed
116 * in the guest.
117 */
118 kvm_incr_pc(vcpu);
119
120 return 1;
121 }
122
io_mem_abort(struct kvm_vcpu * vcpu,phys_addr_t fault_ipa)123 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
124 {
125 struct kvm_run *run = vcpu->run;
126 unsigned long data;
127 unsigned long rt;
128 int ret;
129 bool is_write;
130 int len;
131 u8 data_buf[8];
132
133 /*
134 * No valid syndrome? Ask userspace for help if it has
135 * volunteered to do so, and bail out otherwise.
136 *
137 * In the protected VM case, there isn't much userspace can do
138 * though, so directly deliver an exception to the guest.
139 */
140 if (!kvm_vcpu_dabt_isvalid(vcpu)) {
141 trace_kvm_mmio_nisv(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
142 kvm_vcpu_get_hfar(vcpu), fault_ipa);
143
144 if (vcpu_is_protected(vcpu)) {
145 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
146 return 1;
147 }
148
149 if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
150 &vcpu->kvm->arch.flags)) {
151 run->exit_reason = KVM_EXIT_ARM_NISV;
152 run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
153 run->arm_nisv.fault_ipa = fault_ipa;
154 return 0;
155 }
156
157 return -ENOSYS;
158 }
159
160 /*
161 * Prepare MMIO operation. First decode the syndrome data we get
162 * from the CPU. Then try if some in-kernel emulation feels
163 * responsible, otherwise let user space do its magic.
164 */
165 is_write = kvm_vcpu_dabt_iswrite(vcpu);
166 len = kvm_vcpu_dabt_get_as(vcpu);
167 rt = kvm_vcpu_dabt_get_rd(vcpu);
168
169 if (is_write) {
170 data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
171 len);
172
173 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
174 kvm_mmio_write_buf(data_buf, len, data);
175
176 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
177 data_buf);
178 } else {
179 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
180 fault_ipa, NULL);
181
182 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
183 data_buf);
184 }
185
186 /* Now prepare kvm_run for the potential return to userland. */
187 run->mmio.is_write = is_write;
188 run->mmio.phys_addr = fault_ipa;
189 run->mmio.len = len;
190 vcpu->mmio_needed = 1;
191
192 if (!ret) {
193 /* We handled the access successfully in the kernel. */
194 if (!is_write)
195 memcpy(run->mmio.data, data_buf, len);
196 vcpu->stat.mmio_exit_kernel++;
197 kvm_handle_mmio_return(vcpu);
198 return 1;
199 }
200
201 if (is_write)
202 memcpy(run->mmio.data, data_buf, len);
203 vcpu->stat.mmio_exit_user++;
204 run->exit_reason = KVM_EXIT_MMIO;
205 return 0;
206 }
207