1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VGIC: KVM DEVICE API
4 *
5 * Copyright (C) 2015 ARM Ltd.
6 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 */
8 #include <linux/kvm_host.h>
9 #include <kvm/arm_vgic.h>
10 #include <linux/uaccess.h>
11 #include <asm/kvm_mmu.h>
12 #include <asm/cputype.h>
13 #include "vgic.h"
14
15 /* common helpers */
16
vgic_check_iorange(struct kvm * kvm,phys_addr_t ioaddr,phys_addr_t addr,phys_addr_t alignment,phys_addr_t size)17 int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr,
18 phys_addr_t addr, phys_addr_t alignment,
19 phys_addr_t size)
20 {
21 if (!IS_VGIC_ADDR_UNDEF(ioaddr))
22 return -EEXIST;
23
24 if (!IS_ALIGNED(addr, alignment) || !IS_ALIGNED(size, alignment))
25 return -EINVAL;
26
27 if (addr + size < addr)
28 return -EINVAL;
29
30 if (addr & ~kvm_phys_mask(&kvm->arch.mmu) ||
31 (addr + size) > kvm_phys_size(&kvm->arch.mmu))
32 return -E2BIG;
33
34 return 0;
35 }
36
vgic_check_type(struct kvm * kvm,int type_needed)37 static int vgic_check_type(struct kvm *kvm, int type_needed)
38 {
39 if (kvm->arch.vgic.vgic_model != type_needed)
40 return -ENODEV;
41 else
42 return 0;
43 }
44
kvm_set_legacy_vgic_v2_addr(struct kvm * kvm,struct kvm_arm_device_addr * dev_addr)45 int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr)
46 {
47 struct vgic_dist *vgic = &kvm->arch.vgic;
48 int r;
49
50 mutex_lock(&kvm->arch.config_lock);
51 switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
52 case KVM_VGIC_V2_ADDR_TYPE_DIST:
53 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
54 if (!r)
55 r = vgic_check_iorange(kvm, vgic->vgic_dist_base, dev_addr->addr,
56 SZ_4K, KVM_VGIC_V2_DIST_SIZE);
57 if (!r)
58 vgic->vgic_dist_base = dev_addr->addr;
59 break;
60 case KVM_VGIC_V2_ADDR_TYPE_CPU:
61 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
62 if (!r)
63 r = vgic_check_iorange(kvm, vgic->vgic_cpu_base, dev_addr->addr,
64 SZ_4K, KVM_VGIC_V2_CPU_SIZE);
65 if (!r)
66 vgic->vgic_cpu_base = dev_addr->addr;
67 break;
68 default:
69 r = -ENODEV;
70 }
71
72 mutex_unlock(&kvm->arch.config_lock);
73
74 return r;
75 }
76
77 /**
78 * kvm_vgic_addr - set or get vgic VM base addresses
79 * @kvm: pointer to the vm struct
80 * @attr: pointer to the attribute being retrieved/updated
81 * @write: if true set the address in the VM address space, if false read the
82 * address
83 *
84 * Set or get the vgic base addresses for the distributor and the virtual CPU
85 * interface in the VM physical address space. These addresses are properties
86 * of the emulated core/SoC and therefore user space initially knows this
87 * information.
88 * Check them for sanity (alignment, double assignment). We can't check for
89 * overlapping regions in case of a virtual GICv3 here, since we don't know
90 * the number of VCPUs yet, so we defer this check to map_resources().
91 */
kvm_vgic_addr(struct kvm * kvm,struct kvm_device_attr * attr,bool write)92 static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool write)
93 {
94 u64 __user *uaddr = (u64 __user *)attr->addr;
95 struct vgic_dist *vgic = &kvm->arch.vgic;
96 phys_addr_t *addr_ptr, alignment, size;
97 u64 undef_value = VGIC_ADDR_UNDEF;
98 u64 addr;
99 int r;
100
101 /* Reading a redistributor region addr implies getting the index */
102 if (write || attr->attr == KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION)
103 if (get_user(addr, uaddr))
104 return -EFAULT;
105
106 /*
107 * Since we can't hold config_lock while registering the redistributor
108 * iodevs, take the slots_lock immediately.
109 */
110 mutex_lock(&kvm->slots_lock);
111 switch (attr->attr) {
112 case KVM_VGIC_V2_ADDR_TYPE_DIST:
113 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
114 addr_ptr = &vgic->vgic_dist_base;
115 alignment = SZ_4K;
116 size = KVM_VGIC_V2_DIST_SIZE;
117 break;
118 case KVM_VGIC_V2_ADDR_TYPE_CPU:
119 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
120 addr_ptr = &vgic->vgic_cpu_base;
121 alignment = SZ_4K;
122 size = KVM_VGIC_V2_CPU_SIZE;
123 break;
124 case KVM_VGIC_V3_ADDR_TYPE_DIST:
125 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
126 addr_ptr = &vgic->vgic_dist_base;
127 alignment = SZ_64K;
128 size = KVM_VGIC_V3_DIST_SIZE;
129 break;
130 case KVM_VGIC_V3_ADDR_TYPE_REDIST: {
131 struct vgic_redist_region *rdreg;
132
133 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
134 if (r)
135 break;
136 if (write) {
137 r = vgic_v3_set_redist_base(kvm, 0, addr, 0);
138 goto out;
139 }
140 rdreg = list_first_entry_or_null(&vgic->rd_regions,
141 struct vgic_redist_region, list);
142 if (!rdreg)
143 addr_ptr = &undef_value;
144 else
145 addr_ptr = &rdreg->base;
146 break;
147 }
148 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
149 {
150 struct vgic_redist_region *rdreg;
151 u8 index;
152
153 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
154 if (r)
155 break;
156
157 index = addr & KVM_VGIC_V3_RDIST_INDEX_MASK;
158
159 if (write) {
160 gpa_t base = addr & KVM_VGIC_V3_RDIST_BASE_MASK;
161 u32 count = FIELD_GET(KVM_VGIC_V3_RDIST_COUNT_MASK, addr);
162 u8 flags = FIELD_GET(KVM_VGIC_V3_RDIST_FLAGS_MASK, addr);
163
164 if (!count || flags)
165 r = -EINVAL;
166 else
167 r = vgic_v3_set_redist_base(kvm, index,
168 base, count);
169 goto out;
170 }
171
172 rdreg = vgic_v3_rdist_region_from_index(kvm, index);
173 if (!rdreg) {
174 r = -ENOENT;
175 goto out;
176 }
177
178 addr = index;
179 addr |= rdreg->base;
180 addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT;
181 goto out;
182 }
183 default:
184 r = -ENODEV;
185 }
186
187 if (r)
188 goto out;
189
190 mutex_lock(&kvm->arch.config_lock);
191 if (write) {
192 r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
193 if (!r)
194 *addr_ptr = addr;
195 } else {
196 addr = *addr_ptr;
197 }
198 mutex_unlock(&kvm->arch.config_lock);
199
200 out:
201 mutex_unlock(&kvm->slots_lock);
202
203 if (!r && !write)
204 r = put_user(addr, uaddr);
205
206 return r;
207 }
208
vgic_set_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)209 static int vgic_set_common_attr(struct kvm_device *dev,
210 struct kvm_device_attr *attr)
211 {
212 int r;
213
214 switch (attr->group) {
215 case KVM_DEV_ARM_VGIC_GRP_ADDR:
216 r = kvm_vgic_addr(dev->kvm, attr, true);
217 return (r == -ENODEV) ? -ENXIO : r;
218 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
219 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
220 u32 val;
221 int ret = 0;
222
223 if (get_user(val, uaddr))
224 return -EFAULT;
225
226 /*
227 * We require:
228 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
229 * - at most 1024 interrupts
230 * - a multiple of 32 interrupts
231 */
232 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
233 val > VGIC_MAX_RESERVED ||
234 (val & 31))
235 return -EINVAL;
236
237 mutex_lock(&dev->kvm->arch.config_lock);
238
239 /*
240 * Either userspace has already configured NR_IRQS or
241 * the vgic has already been initialized and vgic_init()
242 * supplied a default amount of SPIs.
243 */
244 if (dev->kvm->arch.vgic.nr_spis)
245 ret = -EBUSY;
246 else
247 dev->kvm->arch.vgic.nr_spis =
248 val - VGIC_NR_PRIVATE_IRQS;
249
250 mutex_unlock(&dev->kvm->arch.config_lock);
251
252 return ret;
253 }
254 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
255 switch (attr->attr) {
256 case KVM_DEV_ARM_VGIC_CTRL_INIT:
257 mutex_lock(&dev->kvm->arch.config_lock);
258 r = vgic_init(dev->kvm);
259 mutex_unlock(&dev->kvm->arch.config_lock);
260 return r;
261 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
262 /*
263 * OK, this one isn't common at all, but we
264 * want to handle all control group attributes
265 * in a single place.
266 */
267 if (vgic_check_type(dev->kvm, KVM_DEV_TYPE_ARM_VGIC_V3))
268 return -ENXIO;
269 mutex_lock(&dev->kvm->lock);
270
271 if (!lock_all_vcpus(dev->kvm)) {
272 mutex_unlock(&dev->kvm->lock);
273 return -EBUSY;
274 }
275
276 mutex_lock(&dev->kvm->arch.config_lock);
277 r = vgic_v3_save_pending_tables(dev->kvm);
278 mutex_unlock(&dev->kvm->arch.config_lock);
279 unlock_all_vcpus(dev->kvm);
280 mutex_unlock(&dev->kvm->lock);
281 return r;
282 }
283 break;
284 }
285 }
286
287 return -ENXIO;
288 }
289
vgic_get_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)290 static int vgic_get_common_attr(struct kvm_device *dev,
291 struct kvm_device_attr *attr)
292 {
293 int r = -ENXIO;
294
295 switch (attr->group) {
296 case KVM_DEV_ARM_VGIC_GRP_ADDR:
297 r = kvm_vgic_addr(dev->kvm, attr, false);
298 return (r == -ENODEV) ? -ENXIO : r;
299 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
300 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
301
302 r = put_user(dev->kvm->arch.vgic.nr_spis +
303 VGIC_NR_PRIVATE_IRQS, uaddr);
304 break;
305 }
306 }
307
308 return r;
309 }
310
vgic_create(struct kvm_device * dev,u32 type)311 static int vgic_create(struct kvm_device *dev, u32 type)
312 {
313 return kvm_vgic_create(dev->kvm, type);
314 }
315
vgic_destroy(struct kvm_device * dev)316 static void vgic_destroy(struct kvm_device *dev)
317 {
318 kfree(dev);
319 }
320
kvm_register_vgic_device(unsigned long type)321 int kvm_register_vgic_device(unsigned long type)
322 {
323 int ret = -ENODEV;
324
325 switch (type) {
326 case KVM_DEV_TYPE_ARM_VGIC_V2:
327 ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
328 KVM_DEV_TYPE_ARM_VGIC_V2);
329 break;
330 case KVM_DEV_TYPE_ARM_VGIC_V3:
331 ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
332 KVM_DEV_TYPE_ARM_VGIC_V3);
333
334 if (ret)
335 break;
336 ret = kvm_vgic_register_its_device();
337 break;
338 }
339
340 return ret;
341 }
342
vgic_v2_parse_attr(struct kvm_device * dev,struct kvm_device_attr * attr,struct vgic_reg_attr * reg_attr)343 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
344 struct vgic_reg_attr *reg_attr)
345 {
346 int cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr);
347
348 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
349 reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid);
350 if (!reg_attr->vcpu)
351 return -EINVAL;
352
353 return 0;
354 }
355
356 /**
357 * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
358 *
359 * @dev: kvm device handle
360 * @attr: kvm device attribute
361 * @is_write: true if userspace is writing a register
362 */
vgic_v2_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)363 static int vgic_v2_attr_regs_access(struct kvm_device *dev,
364 struct kvm_device_attr *attr,
365 bool is_write)
366 {
367 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
368 struct vgic_reg_attr reg_attr;
369 gpa_t addr;
370 struct kvm_vcpu *vcpu;
371 int ret;
372 u32 val;
373
374 ret = vgic_v2_parse_attr(dev, attr, ®_attr);
375 if (ret)
376 return ret;
377
378 vcpu = reg_attr.vcpu;
379 addr = reg_attr.addr;
380
381 if (is_write)
382 if (get_user(val, uaddr))
383 return -EFAULT;
384
385 mutex_lock(&dev->kvm->lock);
386
387 if (!lock_all_vcpus(dev->kvm)) {
388 mutex_unlock(&dev->kvm->lock);
389 return -EBUSY;
390 }
391
392 mutex_lock(&dev->kvm->arch.config_lock);
393
394 ret = vgic_init(dev->kvm);
395 if (ret)
396 goto out;
397
398 switch (attr->group) {
399 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
400 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
401 break;
402 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
403 ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, &val);
404 break;
405 default:
406 ret = -EINVAL;
407 break;
408 }
409
410 out:
411 mutex_unlock(&dev->kvm->arch.config_lock);
412 unlock_all_vcpus(dev->kvm);
413 mutex_unlock(&dev->kvm->lock);
414
415 if (!ret && !is_write)
416 ret = put_user(val, uaddr);
417
418 return ret;
419 }
420
vgic_v2_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)421 static int vgic_v2_set_attr(struct kvm_device *dev,
422 struct kvm_device_attr *attr)
423 {
424 switch (attr->group) {
425 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
426 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
427 return vgic_v2_attr_regs_access(dev, attr, true);
428 default:
429 return vgic_set_common_attr(dev, attr);
430 }
431 }
432
vgic_v2_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)433 static int vgic_v2_get_attr(struct kvm_device *dev,
434 struct kvm_device_attr *attr)
435 {
436 switch (attr->group) {
437 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
438 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
439 return vgic_v2_attr_regs_access(dev, attr, false);
440 default:
441 return vgic_get_common_attr(dev, attr);
442 }
443 }
444
vgic_v2_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)445 static int vgic_v2_has_attr(struct kvm_device *dev,
446 struct kvm_device_attr *attr)
447 {
448 switch (attr->group) {
449 case KVM_DEV_ARM_VGIC_GRP_ADDR:
450 switch (attr->attr) {
451 case KVM_VGIC_V2_ADDR_TYPE_DIST:
452 case KVM_VGIC_V2_ADDR_TYPE_CPU:
453 return 0;
454 }
455 break;
456 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
457 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
458 return vgic_v2_has_attr_regs(dev, attr);
459 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
460 return 0;
461 case KVM_DEV_ARM_VGIC_GRP_CTRL:
462 switch (attr->attr) {
463 case KVM_DEV_ARM_VGIC_CTRL_INIT:
464 return 0;
465 }
466 }
467 return -ENXIO;
468 }
469
470 struct kvm_device_ops kvm_arm_vgic_v2_ops = {
471 .name = "kvm-arm-vgic-v2",
472 .create = vgic_create,
473 .destroy = vgic_destroy,
474 .set_attr = vgic_v2_set_attr,
475 .get_attr = vgic_v2_get_attr,
476 .has_attr = vgic_v2_has_attr,
477 };
478
vgic_v3_parse_attr(struct kvm_device * dev,struct kvm_device_attr * attr,struct vgic_reg_attr * reg_attr)479 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
480 struct vgic_reg_attr *reg_attr)
481 {
482 unsigned long vgic_mpidr, mpidr_reg;
483
484 /*
485 * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
486 * attr might not hold MPIDR. Hence assume vcpu0.
487 */
488 if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
489 vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
490 KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
491
492 mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
493 reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
494 } else {
495 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
496 }
497
498 if (!reg_attr->vcpu)
499 return -EINVAL;
500
501 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
502
503 return 0;
504 }
505
506 /*
507 * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
508 *
509 * @dev: kvm device handle
510 * @attr: kvm device attribute
511 * @is_write: true if userspace is writing a register
512 */
vgic_v3_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)513 static int vgic_v3_attr_regs_access(struct kvm_device *dev,
514 struct kvm_device_attr *attr,
515 bool is_write)
516 {
517 struct vgic_reg_attr reg_attr;
518 gpa_t addr;
519 struct kvm_vcpu *vcpu;
520 bool uaccess;
521 u32 val;
522 int ret;
523
524 ret = vgic_v3_parse_attr(dev, attr, ®_attr);
525 if (ret)
526 return ret;
527
528 vcpu = reg_attr.vcpu;
529 addr = reg_attr.addr;
530
531 switch (attr->group) {
532 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
533 /* Sysregs uaccess is performed by the sysreg handling code */
534 uaccess = false;
535 break;
536 default:
537 uaccess = true;
538 }
539
540 if (uaccess && is_write) {
541 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
542 if (get_user(val, uaddr))
543 return -EFAULT;
544 }
545
546 mutex_lock(&dev->kvm->lock);
547
548 if (!lock_all_vcpus(dev->kvm)) {
549 mutex_unlock(&dev->kvm->lock);
550 return -EBUSY;
551 }
552
553 mutex_lock(&dev->kvm->arch.config_lock);
554
555 if (unlikely(!vgic_initialized(dev->kvm))) {
556 ret = -EBUSY;
557 goto out;
558 }
559
560 switch (attr->group) {
561 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
562 ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &val);
563 break;
564 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
565 ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &val);
566 break;
567 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
568 ret = vgic_v3_cpu_sysregs_uaccess(vcpu, attr, is_write);
569 break;
570 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
571 unsigned int info, intid;
572
573 info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
574 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT;
575 if (info == VGIC_LEVEL_INFO_LINE_LEVEL) {
576 intid = attr->attr &
577 KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
578 ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
579 intid, &val);
580 } else {
581 ret = -EINVAL;
582 }
583 break;
584 }
585 default:
586 ret = -EINVAL;
587 break;
588 }
589
590 out:
591 mutex_unlock(&dev->kvm->arch.config_lock);
592 unlock_all_vcpus(dev->kvm);
593 mutex_unlock(&dev->kvm->lock);
594
595 if (!ret && uaccess && !is_write) {
596 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
597 ret = put_user(val, uaddr);
598 }
599
600 return ret;
601 }
602
vgic_v3_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)603 static int vgic_v3_set_attr(struct kvm_device *dev,
604 struct kvm_device_attr *attr)
605 {
606 switch (attr->group) {
607 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
608 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
609 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
610 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
611 return vgic_v3_attr_regs_access(dev, attr, true);
612 default:
613 return vgic_set_common_attr(dev, attr);
614 }
615 }
616
vgic_v3_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)617 static int vgic_v3_get_attr(struct kvm_device *dev,
618 struct kvm_device_attr *attr)
619 {
620 switch (attr->group) {
621 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
622 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
623 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
624 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
625 return vgic_v3_attr_regs_access(dev, attr, false);
626 default:
627 return vgic_get_common_attr(dev, attr);
628 }
629 }
630
vgic_v3_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)631 static int vgic_v3_has_attr(struct kvm_device *dev,
632 struct kvm_device_attr *attr)
633 {
634 switch (attr->group) {
635 case KVM_DEV_ARM_VGIC_GRP_ADDR:
636 switch (attr->attr) {
637 case KVM_VGIC_V3_ADDR_TYPE_DIST:
638 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
639 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
640 return 0;
641 }
642 break;
643 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
644 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
645 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
646 return vgic_v3_has_attr_regs(dev, attr);
647 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
648 return 0;
649 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
650 if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
651 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) ==
652 VGIC_LEVEL_INFO_LINE_LEVEL)
653 return 0;
654 break;
655 }
656 case KVM_DEV_ARM_VGIC_GRP_CTRL:
657 switch (attr->attr) {
658 case KVM_DEV_ARM_VGIC_CTRL_INIT:
659 return 0;
660 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
661 return 0;
662 }
663 }
664 return -ENXIO;
665 }
666
667 struct kvm_device_ops kvm_arm_vgic_v3_ops = {
668 .name = "kvm-arm-vgic-v3",
669 .create = vgic_create,
670 .destroy = vgic_destroy,
671 .set_attr = vgic_v3_set_attr,
672 .get_attr = vgic_v3_get_attr,
673 .has_attr = vgic_v3_has_attr,
674 };
675