1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Anup Patel <anup.patel@wdc.com>
7 */
8
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/kvm_host.h>
14
15 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
16 KVM_GENERIC_VM_STATS()
17 };
18 static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
19 sizeof(struct kvm_vm_stat) / sizeof(u64));
20
21 const struct kvm_stats_header kvm_vm_stats_header = {
22 .name_size = KVM_STATS_NAME_SIZE,
23 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
24 .id_offset = sizeof(struct kvm_stats_header),
25 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
26 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
27 sizeof(kvm_vm_stats_desc),
28 };
29
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)30 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
31 {
32 int r;
33
34 r = kvm_riscv_gstage_alloc_pgd(kvm);
35 if (r)
36 return r;
37
38 r = kvm_riscv_gstage_vmid_init(kvm);
39 if (r) {
40 kvm_riscv_gstage_free_pgd(kvm);
41 return r;
42 }
43
44 kvm_riscv_aia_init_vm(kvm);
45
46 kvm_riscv_guest_timer_init(kvm);
47
48 return 0;
49 }
50
kvm_arch_destroy_vm(struct kvm * kvm)51 void kvm_arch_destroy_vm(struct kvm *kvm)
52 {
53 kvm_destroy_vcpus(kvm);
54
55 kvm_riscv_aia_destroy_vm(kvm);
56 }
57
kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irql,bool line_status)58 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irql,
59 bool line_status)
60 {
61 if (!irqchip_in_kernel(kvm))
62 return -ENXIO;
63
64 return kvm_riscv_aia_inject_irq(kvm, irql->irq, irql->level);
65 }
66
kvm_set_msi(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)67 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
68 struct kvm *kvm, int irq_source_id,
69 int level, bool line_status)
70 {
71 struct kvm_msi msi;
72
73 if (!level)
74 return -1;
75
76 msi.address_lo = e->msi.address_lo;
77 msi.address_hi = e->msi.address_hi;
78 msi.data = e->msi.data;
79 msi.flags = e->msi.flags;
80 msi.devid = e->msi.devid;
81
82 return kvm_riscv_aia_inject_msi(kvm, &msi);
83 }
84
kvm_riscv_set_irq(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)85 static int kvm_riscv_set_irq(struct kvm_kernel_irq_routing_entry *e,
86 struct kvm *kvm, int irq_source_id,
87 int level, bool line_status)
88 {
89 return kvm_riscv_aia_inject_irq(kvm, e->irqchip.pin, level);
90 }
91
kvm_riscv_setup_default_irq_routing(struct kvm * kvm,u32 lines)92 int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines)
93 {
94 struct kvm_irq_routing_entry *ents;
95 int i, rc;
96
97 ents = kcalloc(lines, sizeof(*ents), GFP_KERNEL);
98 if (!ents)
99 return -ENOMEM;
100
101 for (i = 0; i < lines; i++) {
102 ents[i].gsi = i;
103 ents[i].type = KVM_IRQ_ROUTING_IRQCHIP;
104 ents[i].u.irqchip.irqchip = 0;
105 ents[i].u.irqchip.pin = i;
106 }
107 rc = kvm_set_irq_routing(kvm, ents, lines, 0);
108 kfree(ents);
109
110 return rc;
111 }
112
kvm_arch_can_set_irq_routing(struct kvm * kvm)113 bool kvm_arch_can_set_irq_routing(struct kvm *kvm)
114 {
115 return irqchip_in_kernel(kvm);
116 }
117
kvm_set_routing_entry(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e,const struct kvm_irq_routing_entry * ue)118 int kvm_set_routing_entry(struct kvm *kvm,
119 struct kvm_kernel_irq_routing_entry *e,
120 const struct kvm_irq_routing_entry *ue)
121 {
122 int r = -EINVAL;
123
124 switch (ue->type) {
125 case KVM_IRQ_ROUTING_IRQCHIP:
126 e->set = kvm_riscv_set_irq;
127 e->irqchip.irqchip = ue->u.irqchip.irqchip;
128 e->irqchip.pin = ue->u.irqchip.pin;
129 if ((e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) ||
130 (e->irqchip.irqchip >= KVM_NR_IRQCHIPS))
131 goto out;
132 break;
133 case KVM_IRQ_ROUTING_MSI:
134 e->set = kvm_set_msi;
135 e->msi.address_lo = ue->u.msi.address_lo;
136 e->msi.address_hi = ue->u.msi.address_hi;
137 e->msi.data = ue->u.msi.data;
138 e->msi.flags = ue->flags;
139 e->msi.devid = ue->u.msi.devid;
140 break;
141 default:
142 goto out;
143 }
144 r = 0;
145 out:
146 return r;
147 }
148
kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)149 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
150 struct kvm *kvm, int irq_source_id, int level,
151 bool line_status)
152 {
153 if (!level)
154 return -EWOULDBLOCK;
155
156 switch (e->type) {
157 case KVM_IRQ_ROUTING_MSI:
158 return kvm_set_msi(e, kvm, irq_source_id, level, line_status);
159
160 case KVM_IRQ_ROUTING_IRQCHIP:
161 return kvm_riscv_set_irq(e, kvm, irq_source_id,
162 level, line_status);
163 }
164
165 return -EWOULDBLOCK;
166 }
167
kvm_arch_irqchip_in_kernel(struct kvm * kvm)168 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
169 {
170 return irqchip_in_kernel(kvm);
171 }
172
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)173 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
174 {
175 int r;
176
177 switch (ext) {
178 case KVM_CAP_IRQCHIP:
179 r = kvm_riscv_aia_available();
180 break;
181 case KVM_CAP_IOEVENTFD:
182 case KVM_CAP_USER_MEMORY:
183 case KVM_CAP_SYNC_MMU:
184 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
185 case KVM_CAP_ONE_REG:
186 case KVM_CAP_READONLY_MEM:
187 case KVM_CAP_MP_STATE:
188 case KVM_CAP_IMMEDIATE_EXIT:
189 case KVM_CAP_SET_GUEST_DEBUG:
190 r = 1;
191 break;
192 case KVM_CAP_NR_VCPUS:
193 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
194 break;
195 case KVM_CAP_MAX_VCPUS:
196 r = KVM_MAX_VCPUS;
197 break;
198 case KVM_CAP_NR_MEMSLOTS:
199 r = KVM_USER_MEM_SLOTS;
200 break;
201 case KVM_CAP_VM_GPA_BITS:
202 r = kvm_riscv_gstage_gpa_bits();
203 break;
204 default:
205 r = 0;
206 break;
207 }
208
209 return r;
210 }
211
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)212 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
213 {
214 return -EINVAL;
215 }
216