Lines Matching +full:irq +full:- +full:device

1 // SPDX-License-Identifier: GPL-2.0-only
17 #include <linux/irqchip/arm-gic-v3.h>
24 #include "vgic-mmio.h"
31 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
44 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_add_lpi()
45 struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq; in vgic_add_lpi() local
50 if (irq) in vgic_add_lpi()
51 return irq; in vgic_add_lpi()
53 irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT); in vgic_add_lpi()
54 if (!irq) in vgic_add_lpi()
55 return ERR_PTR(-ENOMEM); in vgic_add_lpi()
57 ret = xa_reserve_irq(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT); in vgic_add_lpi()
59 kfree(irq); in vgic_add_lpi()
63 INIT_LIST_HEAD(&irq->ap_list); in vgic_add_lpi()
64 raw_spin_lock_init(&irq->irq_lock); in vgic_add_lpi()
66 irq->config = VGIC_CONFIG_EDGE; in vgic_add_lpi()
67 kref_init(&irq->refcount); in vgic_add_lpi()
68 irq->intid = intid; in vgic_add_lpi()
69 irq->target_vcpu = vcpu; in vgic_add_lpi()
70 irq->group = 1; in vgic_add_lpi()
72 xa_lock_irqsave(&dist->lpi_xa, flags); in vgic_add_lpi()
78 oldirq = xa_load(&dist->lpi_xa, intid); in vgic_add_lpi()
81 kfree(irq); in vgic_add_lpi()
82 irq = oldirq; in vgic_add_lpi()
87 ret = xa_err(__xa_store(&dist->lpi_xa, intid, irq, 0)); in vgic_add_lpi()
89 xa_release(&dist->lpi_xa, intid); in vgic_add_lpi()
90 kfree(irq); in vgic_add_lpi()
94 xa_unlock_irqrestore(&dist->lpi_xa, flags); in vgic_add_lpi()
107 ret = update_lpi_config(kvm, irq, NULL, false); in vgic_add_lpi()
109 vgic_put_irq(kvm, irq); in vgic_add_lpi()
113 ret = vgic_v3_lpi_sync_pending_status(kvm, irq); in vgic_add_lpi()
115 vgic_put_irq(kvm, irq); in vgic_add_lpi()
119 return irq; in vgic_add_lpi()
142 ((coll)->target_addr != COLLECTION_NOT_MAPPED))
147 struct vgic_irq *irq; member
153 * struct vgic_its_abi - ITS abi ops and settings
155 * @dte_esz: device table entry size
190 return &its_table_abi_versions[its->abi_rev]; in vgic_its_get_abi()
197 its->abi_rev = rev; in vgic_its_set_abi()
199 return abi->commit(its); in vgic_its_set_abi()
203 * Find and returns a device in the device table for an ITS.
208 struct its_device *device; in find_its_device() local
210 list_for_each_entry(device, &its->device_list, dev_list) in find_its_device()
211 if (device_id == device->device_id) in find_its_device()
212 return device; in find_its_device()
219 * Device ID/Event ID pair on an ITS.
225 struct its_device *device; in find_ite() local
228 device = find_its_device(its, device_id); in find_ite()
229 if (device == NULL) in find_ite()
232 list_for_each_entry(ite, &device->itt_head, ite_list) in find_ite()
233 if (ite->event_id == event_id) in find_ite()
241 list_for_each_entry(dev, &(its)->device_list, dev_list) \
242 list_for_each_entry(ite, &(dev)->itt_head, ite_list)
247 #define VITS_MAX_EVENTID (BIT(VITS_TYPER_IDBITS) - 1)
249 #define VITS_MAX_DEVID (BIT(VITS_TYPER_DEVBITS) - 1)
250 #define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
251 #define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)
261 list_for_each_entry(collection, &its->collection_list, coll_list) { in find_collection()
262 if (coll_id == collection->collection_id) in find_collection()
275 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
278 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, in update_lpi_config() argument
281 u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser); in update_lpi_config()
286 ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET, in update_lpi_config()
292 raw_spin_lock_irqsave(&irq->irq_lock, flags); in update_lpi_config()
294 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { in update_lpi_config()
295 irq->priority = LPI_PROP_PRIORITY(prop); in update_lpi_config()
296 irq->enabled = LPI_PROP_ENABLE_BIT(prop); in update_lpi_config()
298 if (!irq->hw) { in update_lpi_config()
299 vgic_queue_irq_unlock(kvm, irq, flags); in update_lpi_config()
304 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in update_lpi_config()
306 if (irq->hw) in update_lpi_config()
307 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); in update_lpi_config()
312 static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) in update_affinity() argument
317 raw_spin_lock_irqsave(&irq->irq_lock, flags); in update_affinity()
318 irq->target_vcpu = vcpu; in update_affinity()
319 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in update_affinity()
321 if (irq->hw) { in update_affinity()
324 ret = its_get_vlpi(irq->host_irq, &map); in update_affinity()
329 atomic_dec(&map.vpe->vlpi_count); in update_affinity()
330 map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; in update_affinity()
331 atomic_inc(&map.vpe->vlpi_count); in update_affinity()
333 ret = its_map_vlpi(irq->host_irq, &map); in update_affinity()
342 return kvm_get_vcpu_by_id(kvm, col->target_addr); in collection_to_vcpu()
355 if (!its_is_collection_mapped(ite->collection)) in update_affinity_ite()
358 vcpu = collection_to_vcpu(kvm, ite->collection); in update_affinity_ite()
359 update_affinity(ite->irq, vcpu); in update_affinity_ite()
369 struct its_device *device; in update_affinity_collection() local
372 for_each_lpi_its(device, ite, its) { in update_affinity_collection()
373 if (ite->collection != coll) in update_affinity_collection()
394 gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); in its_sync_lpi_pending_table()
395 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in its_sync_lpi_pending_table()
397 struct vgic_irq *irq; in its_sync_lpi_pending_table() local
398 int last_byte_offset = -1; in its_sync_lpi_pending_table()
402 xa_for_each(&dist->lpi_xa, intid, irq) { in its_sync_lpi_pending_table()
413 ret = kvm_read_guest_lock(vcpu->kvm, in its_sync_lpi_pending_table()
422 irq = vgic_get_irq(vcpu->kvm, NULL, intid); in its_sync_lpi_pending_table()
423 if (!irq) in its_sync_lpi_pending_table()
426 raw_spin_lock_irqsave(&irq->irq_lock, flags); in its_sync_lpi_pending_table()
427 if (irq->target_vcpu == vcpu) in its_sync_lpi_pending_table()
428 irq->pending_latch = pendmask & (1U << bit_nr); in its_sync_lpi_pending_table()
429 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in its_sync_lpi_pending_table()
430 vgic_put_irq(vcpu->kvm, irq); in its_sync_lpi_pending_table()
449 * DevBits low - as least for the time being. in vgic_mmio_read_its_typer()
453 reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT; in vgic_mmio_read_its_typer()
464 val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK; in vgic_mmio_read_its_iidr()
477 return -EINVAL; in vgic_mmio_uaccess_write_its_iidr()
515 return ERR_PTR(-EINVAL); in __vgic_doorbell_to_its()
517 if (kvm_io_dev->ops != &kvm_io_gic_ops) in __vgic_doorbell_to_its()
518 return ERR_PTR(-EINVAL); in __vgic_doorbell_to_its()
521 if (iodev->iodev_type != IODEV_ITS) in __vgic_doorbell_to_its()
522 return ERR_PTR(-EINVAL); in __vgic_doorbell_to_its()
524 return iodev->its; in __vgic_doorbell_to_its()
538 struct vgic_irq *irq; in vgic_its_check_cache() local
549 irq = xa_load(&its->translation_cache, cache_key); in vgic_its_check_cache()
550 if (!vgic_try_get_irq_kref(irq)) in vgic_its_check_cache()
551 irq = NULL; in vgic_its_check_cache()
555 return irq; in vgic_its_check_cache()
560 struct vgic_irq *irq) in vgic_its_cache_translation() argument
566 if (irq->hw) in vgic_its_cache_translation()
570 * The irq refcount is guaranteed to be nonzero while holding the in vgic_its_cache_translation()
573 lockdep_assert_held(&its->its_lock); in vgic_its_cache_translation()
574 vgic_get_irq_kref(irq); in vgic_its_cache_translation()
581 old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT); in vgic_its_cache_translation()
588 struct kvm *kvm = its->dev->kvm; in vgic_its_invalidate_cache()
589 struct vgic_irq *irq; in vgic_its_invalidate_cache() local
592 xa_for_each(&its->translation_cache, idx, irq) { in vgic_its_invalidate_cache()
593 xa_erase(&its->translation_cache, idx); in vgic_its_invalidate_cache()
594 vgic_put_irq(kvm, irq); in vgic_its_invalidate_cache()
605 list_for_each_entry_rcu(dev, &kvm->devices, vm_node) { in vgic_its_invalidate_all_caches()
606 if (dev->ops != &kvm_arm_vgic_its_ops) in vgic_its_invalidate_all_caches()
609 its = dev->private; in vgic_its_invalidate_all_caches()
617 u32 devid, u32 eventid, struct vgic_irq **irq) in vgic_its_resolve_lpi() argument
622 if (!its->enabled) in vgic_its_resolve_lpi()
623 return -EBUSY; in vgic_its_resolve_lpi()
626 if (!ite || !its_is_collection_mapped(ite->collection)) in vgic_its_resolve_lpi()
629 vcpu = collection_to_vcpu(kvm, ite->collection); in vgic_its_resolve_lpi()
634 return -EBUSY; in vgic_its_resolve_lpi()
636 vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq); in vgic_its_resolve_lpi()
638 *irq = ite->irq; in vgic_its_resolve_lpi()
647 return ERR_PTR(-ENODEV); in vgic_msi_to_its()
649 if (!(msi->flags & KVM_MSI_VALID_DEVID)) in vgic_msi_to_its()
650 return ERR_PTR(-EINVAL); in vgic_msi_to_its()
652 address = (u64)msi->address_hi << 32 | msi->address_lo; in vgic_msi_to_its()
659 * and make this IRQ pending, possibly injecting it.
667 struct vgic_irq *irq = NULL; in vgic_its_trigger_msi() local
671 err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq); in vgic_its_trigger_msi()
675 if (irq->hw) in vgic_its_trigger_msi()
676 return irq_set_irqchip_state(irq->host_irq, in vgic_its_trigger_msi()
679 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_its_trigger_msi()
680 irq->pending_latch = true; in vgic_its_trigger_msi()
681 vgic_queue_irq_unlock(kvm, irq, flags); in vgic_its_trigger_msi()
688 struct vgic_irq *irq; in vgic_its_inject_cached_translation() local
692 db = (u64)msi->address_hi << 32 | msi->address_lo; in vgic_its_inject_cached_translation()
693 irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data); in vgic_its_inject_cached_translation()
694 if (!irq) in vgic_its_inject_cached_translation()
695 return -EWOULDBLOCK; in vgic_its_inject_cached_translation()
697 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_its_inject_cached_translation()
698 irq->pending_latch = true; in vgic_its_inject_cached_translation()
699 vgic_queue_irq_unlock(kvm, irq, flags); in vgic_its_inject_cached_translation()
700 vgic_put_irq(kvm, irq); in vgic_its_inject_cached_translation()
723 mutex_lock(&its->its_lock); in vgic_its_inject_msi()
724 ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data); in vgic_its_inject_msi()
725 mutex_unlock(&its->its_lock); in vgic_its_inject_msi()
744 list_del(&ite->ite_list); in its_free_ite()
747 if (ite->irq) { in its_free_ite()
748 if (ite->irq->hw) in its_free_ite()
749 WARN_ON(its_unmap_vlpi(ite->irq->host_irq)); in its_free_ite()
751 vgic_put_irq(kvm, ite->irq); in its_free_ite()
759 return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1); in its_cmd_mask_field()
784 if (ite && its_is_collection_mapped(ite->collection)) { in vgic_its_cmd_handle_discard()
817 if (!its_is_collection_mapped(ite->collection)) in vgic_its_cmd_handle_movi()
824 ite->collection = collection; in vgic_its_cmd_handle_movi()
829 return update_affinity(ite->irq, vcpu); in vgic_its_cmd_handle_movi()
838 idx = srcu_read_lock(&its->dev->kvm->srcu); in __is_visible_gfn_locked()
839 ret = kvm_is_visible_gfn(its->dev->kvm, gfn); in __is_visible_gfn_locked()
840 srcu_read_unlock(&its->dev->kvm->srcu, idx); in __is_visible_gfn_locked()
866 /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */ in vgic_its_check_id()
893 /* Each 1st level entry is represented by a 64-bit value. */ in vgic_its_check_id()
894 if (kvm_read_guest_lock(its->dev->kvm, in vgic_its_check_id()
920 * Translation Table, which starts at device->itt_addr.
922 static bool vgic_its_check_event_id(struct vgic_its *its, struct its_device *device, in vgic_its_check_event_id() argument
926 int ite_esz = abi->ite_esz; in vgic_its_check_event_id()
929 /* max table size is: BIT_ULL(device->num_eventid_bits) * ite_esz */ in vgic_its_check_event_id()
930 if (event_id >= BIT_ULL(device->num_eventid_bits)) in vgic_its_check_event_id()
933 gpa = device->itt_addr + event_id * ite_esz; in vgic_its_check_event_id()
949 return -ENOMEM; in vgic_its_alloc_collection()
951 collection->collection_id = coll_id; in vgic_its_alloc_collection()
952 collection->target_addr = COLLECTION_NOT_MAPPED; in vgic_its_alloc_collection()
954 list_add_tail(&collection->coll_list, &its->collection_list); in vgic_its_alloc_collection()
963 struct its_device *device; in vgic_its_free_collection() local
975 for_each_lpi_its(device, ite, its) in vgic_its_free_collection()
976 if (ite->collection && in vgic_its_free_collection()
977 ite->collection->collection_id == coll_id) in vgic_its_free_collection()
978 ite->collection = NULL; in vgic_its_free_collection()
980 list_del(&collection->coll_list); in vgic_its_free_collection()
985 static struct its_ite *vgic_its_alloc_ite(struct its_device *device, in vgic_its_alloc_ite() argument
993 return ERR_PTR(-ENOMEM); in vgic_its_alloc_ite()
995 ite->event_id = event_id; in vgic_its_alloc_ite()
996 ite->collection = collection; in vgic_its_alloc_ite()
998 list_add_tail(&ite->ite_list, &device->itt_head); in vgic_its_alloc_ite()
1014 struct its_device *device; in vgic_its_cmd_handle_mapi() local
1016 struct vgic_irq *irq; in vgic_its_cmd_handle_mapi() local
1019 device = find_its_device(its, device_id); in vgic_its_cmd_handle_mapi()
1020 if (!device) in vgic_its_cmd_handle_mapi()
1023 if (!vgic_its_check_event_id(its, device, event_id)) in vgic_its_cmd_handle_mapi()
1031 lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser)) in vgic_its_cmd_handle_mapi()
1042 if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL)) in vgic_its_cmd_handle_mapi()
1051 ite = vgic_its_alloc_ite(device, collection, event_id); in vgic_its_cmd_handle_mapi()
1061 irq = vgic_add_lpi(kvm, lpi_nr, vcpu); in vgic_its_cmd_handle_mapi()
1062 if (IS_ERR(irq)) { in vgic_its_cmd_handle_mapi()
1066 return PTR_ERR(irq); in vgic_its_cmd_handle_mapi()
1068 ite->irq = irq; in vgic_its_cmd_handle_mapi()
1075 struct its_device *device) in vgic_its_free_device() argument
1080 * The spec says that unmapping a device with still valid in vgic_its_free_device()
1084 list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list) in vgic_its_free_device()
1089 list_del(&device->dev_list); in vgic_its_free_device()
1090 kfree(device); in vgic_its_free_device()
1098 list_for_each_entry_safe(cur, temp, &its->device_list, dev_list) in vgic_its_free_device_list()
1107 list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list) in vgic_its_free_collection_list()
1108 vgic_its_free_collection(its, cur->collection_id); in vgic_its_free_collection_list()
1116 struct its_device *device; in vgic_its_alloc_device() local
1118 device = kzalloc(sizeof(*device), GFP_KERNEL_ACCOUNT); in vgic_its_alloc_device()
1119 if (!device) in vgic_its_alloc_device()
1120 return ERR_PTR(-ENOMEM); in vgic_its_alloc_device()
1122 device->device_id = device_id; in vgic_its_alloc_device()
1123 device->itt_addr = itt_addr; in vgic_its_alloc_device()
1124 device->num_eventid_bits = num_eventid_bits; in vgic_its_alloc_device()
1125 INIT_LIST_HEAD(&device->itt_head); in vgic_its_alloc_device()
1127 list_add_tail(&device->dev_list, &its->device_list); in vgic_its_alloc_device()
1128 return device; in vgic_its_alloc_device()
1132 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
1142 struct its_device *device; in vgic_its_cmd_handle_mapd() local
1144 if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL)) in vgic_its_cmd_handle_mapd()
1150 device = find_its_device(its, device_id); in vgic_its_cmd_handle_mapd()
1153 * The spec says that calling MAPD on an already mapped device in vgic_its_cmd_handle_mapd()
1154 * invalidates all cached data for this device. We implement this in vgic_its_cmd_handle_mapd()
1155 * by removing the mapping and re-establishing it. in vgic_its_cmd_handle_mapd()
1157 if (device) in vgic_its_cmd_handle_mapd()
1158 vgic_its_free_device(kvm, its, device); in vgic_its_cmd_handle_mapd()
1161 * The spec does not say whether unmapping a not-mapped device in vgic_its_cmd_handle_mapd()
1167 device = vgic_its_alloc_device(its, device_id, itt_addr, in vgic_its_cmd_handle_mapd()
1170 return PTR_ERR_OR_ZERO(device); in vgic_its_cmd_handle_mapd()
1202 if (!vgic_its_check_id(its, its->baser_coll_table, in vgic_its_cmd_handle_mapc()
1210 collection->target_addr = vcpu->vcpu_id; in vgic_its_cmd_handle_mapc()
1212 collection->target_addr = vcpu->vcpu_id; in vgic_its_cmd_handle_mapc()
1236 ite->irq->pending_latch = false; in vgic_its_cmd_handle_clear()
1238 if (ite->irq->hw) in vgic_its_cmd_handle_clear()
1239 return irq_set_irqchip_state(ite->irq->host_irq, in vgic_its_cmd_handle_clear()
1245 int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq) in vgic_its_inv_lpi() argument
1247 return update_lpi_config(kvm, irq, NULL, true); in vgic_its_inv_lpi()
1266 return vgic_its_inv_lpi(kvm, ite->irq); in vgic_its_cmd_handle_inv()
1270 * vgic_its_invall - invalidate all LPIs targeting a given vcpu
1279 struct kvm *kvm = vcpu->kvm; in vgic_its_invall()
1280 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_its_invall()
1281 struct vgic_irq *irq; in vgic_its_invall() local
1284 xa_for_each(&dist->lpi_xa, intid, irq) { in vgic_its_invall()
1285 irq = vgic_get_irq(kvm, NULL, intid); in vgic_its_invall()
1286 if (!irq) in vgic_its_invall()
1289 update_lpi_config(kvm, irq, vcpu, false); in vgic_its_invall()
1290 vgic_put_irq(kvm, irq); in vgic_its_invall()
1293 if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm) in vgic_its_invall()
1294 its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe); in vgic_its_invall()
1300 * The INVALL command requests flushing of all IRQ data in this collection.
1302 * of mapped LPIs and update the configuration for each IRQ which targets
1303 * the specified vcpu. The configuration will be read from the in-memory
1328 * However the spec says that no IRQ must target the old redistributor
1335 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_its_cmd_handle_movall()
1337 struct vgic_irq *irq; in vgic_its_cmd_handle_movall() local
1350 xa_for_each(&dist->lpi_xa, intid, irq) { in vgic_its_cmd_handle_movall()
1351 irq = vgic_get_irq(kvm, NULL, intid); in vgic_its_cmd_handle_movall()
1352 if (!irq) in vgic_its_cmd_handle_movall()
1355 update_affinity(irq, vcpu2); in vgic_its_cmd_handle_movall()
1357 vgic_put_irq(kvm, irq); in vgic_its_cmd_handle_movall()
1385 int ret = -ENODEV; in vgic_its_handle_command()
1387 mutex_lock(&its->its_lock); in vgic_its_handle_command()
1427 mutex_unlock(&its->its_lock); in vgic_its_handle_command()
1472 return extract_bytes(its->cbaser, addr & 7, len); in vgic_mmio_read_its_cbaser()
1480 if (its->enabled) in vgic_mmio_write_its_cbaser()
1483 mutex_lock(&its->cmd_lock); in vgic_mmio_write_its_cbaser()
1484 its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val); in vgic_mmio_write_its_cbaser()
1485 its->cbaser = vgic_sanitise_its_cbaser(its->cbaser); in vgic_mmio_write_its_cbaser()
1486 its->creadr = 0; in vgic_mmio_write_its_cbaser()
1491 its->cwriter = its->creadr; in vgic_mmio_write_its_cbaser()
1492 mutex_unlock(&its->cmd_lock); in vgic_mmio_write_its_cbaser()
1506 if (!its->enabled) in vgic_its_process_commands()
1509 cbaser = GITS_CBASER_ADDRESS(its->cbaser); in vgic_its_process_commands()
1511 while (its->cwriter != its->creadr) { in vgic_its_process_commands()
1512 int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr, in vgic_its_process_commands()
1524 its->creadr += ITS_CMD_SIZE; in vgic_its_process_commands()
1525 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser)) in vgic_its_process_commands()
1526 its->creadr = 0; in vgic_its_process_commands()
1545 mutex_lock(&its->cmd_lock); in vgic_mmio_write_its_cwriter()
1547 reg = update_64bit_reg(its->cwriter, addr & 7, len, val); in vgic_mmio_write_its_cwriter()
1549 if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) { in vgic_mmio_write_its_cwriter()
1550 mutex_unlock(&its->cmd_lock); in vgic_mmio_write_its_cwriter()
1553 its->cwriter = reg; in vgic_mmio_write_its_cwriter()
1557 mutex_unlock(&its->cmd_lock); in vgic_mmio_write_its_cwriter()
1564 return extract_bytes(its->cwriter, addr & 0x7, len); in vgic_mmio_read_its_cwriter()
1571 return extract_bytes(its->creadr, addr & 0x7, len); in vgic_mmio_read_its_creadr()
1582 mutex_lock(&its->cmd_lock); in vgic_mmio_uaccess_write_its_creadr()
1584 if (its->enabled) { in vgic_mmio_uaccess_write_its_creadr()
1585 ret = -EBUSY; in vgic_mmio_uaccess_write_its_creadr()
1590 if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) { in vgic_mmio_uaccess_write_its_creadr()
1591 ret = -EINVAL; in vgic_mmio_uaccess_write_its_creadr()
1595 its->creadr = cmd_offset; in vgic_mmio_uaccess_write_its_creadr()
1597 mutex_unlock(&its->cmd_lock); in vgic_mmio_uaccess_write_its_creadr()
1610 reg = its->baser_device_table; in vgic_mmio_read_its_baser()
1613 reg = its->baser_coll_table; in vgic_mmio_read_its_baser()
1634 if (its->enabled) in vgic_mmio_write_its_baser()
1639 regptr = &its->baser_device_table; in vgic_mmio_write_its_baser()
1640 entry_size = abi->dte_esz; in vgic_mmio_write_its_baser()
1644 regptr = &its->baser_coll_table; in vgic_mmio_write_its_baser()
1645 entry_size = abi->cte_esz; in vgic_mmio_write_its_baser()
1657 reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT; in vgic_mmio_write_its_baser()
1665 mutex_lock(&its->its_lock); in vgic_mmio_write_its_baser()
1674 mutex_unlock(&its->its_lock); in vgic_mmio_write_its_baser()
1684 mutex_lock(&its->cmd_lock); in vgic_mmio_read_its_ctlr()
1685 if (its->creadr == its->cwriter) in vgic_mmio_read_its_ctlr()
1687 if (its->enabled) in vgic_mmio_read_its_ctlr()
1689 mutex_unlock(&its->cmd_lock); in vgic_mmio_read_its_ctlr()
1698 mutex_lock(&its->cmd_lock); in vgic_mmio_write_its_ctlr()
1702 * device/collection BASER are invalid in vgic_mmio_write_its_ctlr()
1704 if (!its->enabled && (val & GITS_CTLR_ENABLE) && in vgic_mmio_write_its_ctlr()
1705 (!(its->baser_device_table & GITS_BASER_VALID) || in vgic_mmio_write_its_ctlr()
1706 !(its->baser_coll_table & GITS_BASER_VALID) || in vgic_mmio_write_its_ctlr()
1707 !(its->cbaser & GITS_CBASER_VALID))) in vgic_mmio_write_its_ctlr()
1710 its->enabled = !!(val & GITS_CTLR_ENABLE); in vgic_mmio_write_its_ctlr()
1711 if (!its->enabled) in vgic_mmio_write_its_ctlr()
1721 mutex_unlock(&its->cmd_lock); in vgic_mmio_write_its_ctlr()
1781 if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ)) in vgic_enable_lpis()
1788 struct vgic_io_device *iodev = &its->iodev; in vgic_register_its_iodev()
1791 mutex_lock(&kvm->slots_lock); in vgic_register_its_iodev()
1792 if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) { in vgic_register_its_iodev()
1793 ret = -EBUSY; in vgic_register_its_iodev()
1797 its->vgic_its_base = addr; in vgic_register_its_iodev()
1798 iodev->regions = its_registers; in vgic_register_its_iodev()
1799 iodev->nr_regions = ARRAY_SIZE(its_registers); in vgic_register_its_iodev()
1800 kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops); in vgic_register_its_iodev()
1802 iodev->base_addr = its->vgic_its_base; in vgic_register_its_iodev()
1803 iodev->iodev_type = IODEV_ITS; in vgic_register_its_iodev()
1804 iodev->its = its; in vgic_register_its_iodev()
1805 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr, in vgic_register_its_iodev()
1806 KVM_VGIC_V3_ITS_SIZE, &iodev->dev); in vgic_register_its_iodev()
1808 mutex_unlock(&kvm->slots_lock); in vgic_register_its_iodev()
1830 return -ENODEV; in vgic_its_create()
1834 return -ENOMEM; in vgic_its_create()
1836 mutex_lock(&dev->kvm->arch.config_lock); in vgic_its_create()
1838 if (vgic_initialized(dev->kvm)) { in vgic_its_create()
1839 ret = vgic_v4_init(dev->kvm); in vgic_its_create()
1841 mutex_unlock(&dev->kvm->arch.config_lock); in vgic_its_create()
1847 mutex_init(&its->its_lock); in vgic_its_create()
1848 mutex_init(&its->cmd_lock); in vgic_its_create()
1852 mutex_lock(&its->cmd_lock); in vgic_its_create()
1853 mutex_lock(&its->its_lock); in vgic_its_create()
1854 mutex_unlock(&its->its_lock); in vgic_its_create()
1855 mutex_unlock(&its->cmd_lock); in vgic_its_create()
1858 its->vgic_its_base = VGIC_ADDR_UNDEF; in vgic_its_create()
1860 INIT_LIST_HEAD(&its->device_list); in vgic_its_create()
1861 INIT_LIST_HEAD(&its->collection_list); in vgic_its_create()
1862 xa_init(&its->translation_cache); in vgic_its_create()
1864 dev->kvm->arch.vgic.msis_require_devid = true; in vgic_its_create()
1865 dev->kvm->arch.vgic.has_its = true; in vgic_its_create()
1866 its->enabled = false; in vgic_its_create()
1867 its->dev = dev; in vgic_its_create()
1869 its->baser_device_table = INITIAL_BASER_VALUE | in vgic_its_create()
1871 its->baser_coll_table = INITIAL_BASER_VALUE | in vgic_its_create()
1873 dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE; in vgic_its_create()
1875 dev->private = its; in vgic_its_create()
1877 ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1); in vgic_its_create()
1879 mutex_unlock(&dev->kvm->arch.config_lock); in vgic_its_create()
1886 struct kvm *kvm = kvm_dev->kvm; in vgic_its_destroy()
1887 struct vgic_its *its = kvm_dev->private; in vgic_its_destroy()
1889 mutex_lock(&its->its_lock); in vgic_its_destroy()
1894 xa_destroy(&its->translation_cache); in vgic_its_destroy()
1896 mutex_unlock(&its->its_lock); in vgic_its_destroy()
1905 gpa_t offset = attr->attr; in vgic_its_has_attr_regs()
1911 return -EINVAL; in vgic_its_has_attr_regs()
1917 return -ENXIO; in vgic_its_has_attr_regs()
1932 its = dev->private; in vgic_its_attr_regs_access()
1933 offset = attr->attr; in vgic_its_attr_regs_access()
1936 * Although the spec supports upper/lower 32-bit accesses to in vgic_its_attr_regs_access()
1937 * 64-bit ITS registers, the userspace ABI requires 64-bit in vgic_its_attr_regs_access()
1938 * accesses to all 64-bit wide registers. We therefore only in vgic_its_attr_regs_access()
1939 * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID in vgic_its_attr_regs_access()
1948 return -EINVAL; in vgic_its_attr_regs_access()
1950 mutex_lock(&dev->kvm->lock); in vgic_its_attr_regs_access()
1952 if (!lock_all_vcpus(dev->kvm)) { in vgic_its_attr_regs_access()
1953 mutex_unlock(&dev->kvm->lock); in vgic_its_attr_regs_access()
1954 return -EBUSY; in vgic_its_attr_regs_access()
1957 mutex_lock(&dev->kvm->arch.config_lock); in vgic_its_attr_regs_access()
1959 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) { in vgic_its_attr_regs_access()
1960 ret = -ENXIO; in vgic_its_attr_regs_access()
1968 ret = -ENXIO; in vgic_its_attr_regs_access()
1972 addr = its->vgic_its_base + offset; in vgic_its_attr_regs_access()
1974 len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4; in vgic_its_attr_regs_access()
1977 if (region->uaccess_its_write) in vgic_its_attr_regs_access()
1978 ret = region->uaccess_its_write(dev->kvm, its, addr, in vgic_its_attr_regs_access()
1981 region->its_write(dev->kvm, its, addr, len, *reg); in vgic_its_attr_regs_access()
1983 *reg = region->its_read(dev->kvm, its, addr, len); in vgic_its_attr_regs_access()
1986 mutex_unlock(&dev->kvm->arch.config_lock); in vgic_its_attr_regs_access()
1987 unlock_all_vcpus(dev->kvm); in vgic_its_attr_regs_access()
1988 mutex_unlock(&dev->kvm->lock); in vgic_its_attr_regs_access()
1998 if (list_is_last(&dev->dev_list, h)) in compute_next_devid_offset()
2001 next_offset = next->device_id - dev->device_id; in compute_next_devid_offset()
2011 if (list_is_last(&ite->ite_list, h)) in compute_next_eventid_offset()
2014 next_offset = next->event_id - ite->event_id; in compute_next_eventid_offset()
2020 * typedef entry_fn_t - Callback called on a table entry restore path
2033 * scan_its_table - Scan a contiguous table in guest RAM and applies a function
2051 struct kvm *kvm = its->dev->kvm; in scan_its_table()
2078 len -= byte_offset; in scan_its_table()
2084 * vgic_its_save_ite - Save an interrupt translation entry at @gpa
2089 struct kvm *kvm = its->dev->kvm; in vgic_its_save_ite()
2093 next_offset = compute_next_eventid_offset(&dev->itt_head, ite); in vgic_its_save_ite()
2095 ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) | in vgic_its_save_ite()
2096 ite->collection->collection_id; in vgic_its_save_ite()
2102 * vgic_its_restore_ite - restore an interrupt translation entry
2114 struct kvm *kvm = its->dev->kvm; in vgic_its_restore_ite()
2118 struct vgic_irq *irq; in vgic_its_restore_ite() local
2134 return -EINVAL; in vgic_its_restore_ite()
2137 if (event_id + offset >= BIT_ULL(dev->num_eventid_bits)) in vgic_its_restore_ite()
2138 return -EINVAL; in vgic_its_restore_ite()
2142 return -EINVAL; in vgic_its_restore_ite()
2145 return -EINVAL; in vgic_its_restore_ite()
2152 vcpu = kvm_get_vcpu_by_id(kvm, collection->target_addr); in vgic_its_restore_ite()
2154 irq = vgic_add_lpi(kvm, lpi_id, vcpu); in vgic_its_restore_ite()
2155 if (IS_ERR(irq)) { in vgic_its_restore_ite()
2157 return PTR_ERR(irq); in vgic_its_restore_ite()
2159 ite->irq = irq; in vgic_its_restore_ite()
2170 if (itea->event_id < iteb->event_id) in vgic_its_ite_cmp()
2171 return -1; in vgic_its_ite_cmp()
2176 static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device) in vgic_its_save_itt() argument
2179 gpa_t base = device->itt_addr; in vgic_its_save_itt()
2182 int ite_esz = abi->ite_esz; in vgic_its_save_itt()
2184 list_sort(NULL, &device->itt_head, vgic_its_ite_cmp); in vgic_its_save_itt()
2186 list_for_each_entry(ite, &device->itt_head, ite_list) { in vgic_its_save_itt()
2187 gpa_t gpa = base + ite->event_id * ite_esz; in vgic_its_save_itt()
2195 if (ite->irq->hw && !kvm_vgic_global_state.has_gicv4_1) in vgic_its_save_itt()
2196 return -EACCES; in vgic_its_save_itt()
2198 ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz); in vgic_its_save_itt()
2206 * vgic_its_restore_itt - restore the ITT of a device
2209 * @dev: device handle
2216 gpa_t base = dev->itt_addr; in vgic_its_restore_itt()
2218 int ite_esz = abi->ite_esz; in vgic_its_restore_itt()
2219 size_t max_size = BIT_ULL(dev->num_eventid_bits) * ite_esz; in vgic_its_restore_itt()
2232 * vgic_its_save_dte - Save a device table entry at a given GPA
2235 * @dev: ITS device
2237 * @dte_esz: device table entry size
2242 struct kvm *kvm = its->dev->kvm; in vgic_its_save_dte()
2246 itt_addr_field = dev->itt_addr >> 8; in vgic_its_save_dte()
2247 next_offset = compute_next_devid_offset(&its->device_list, dev); in vgic_its_save_dte()
2251 (dev->num_eventid_bits - 1)); in vgic_its_save_dte()
2257 * vgic_its_restore_dte - restore a device table entry
2260 * @id: device id the DTE corresponds to
2271 u64 baser = its->baser_device_table; in vgic_its_restore_dte()
2293 return -EINVAL; in vgic_its_restore_dte()
2301 vgic_its_free_device(its->dev->kvm, its, dev); in vgic_its_restore_dte()
2314 if (deva->device_id < devb->device_id) in vgic_its_device_cmp()
2315 return -1; in vgic_its_device_cmp()
2321 * vgic_its_save_device_tables - Save the device table and all ITT
2325 * returns the GPA of the device entry
2330 u64 baser = its->baser_device_table; in vgic_its_save_device_tables()
2332 int dte_esz = abi->dte_esz; in vgic_its_save_device_tables()
2337 list_sort(NULL, &its->device_list, vgic_its_device_cmp); in vgic_its_save_device_tables()
2339 list_for_each_entry(dev, &its->device_list, dev_list) { in vgic_its_save_device_tables()
2344 dev->device_id, &eaddr)) in vgic_its_save_device_tables()
2345 return -EINVAL; in vgic_its_save_device_tables()
2359 * handle_l1_dte - callback used for L1 device table entries (2 stage case)
2374 int l2_start_id = id * (SZ_64K / abi->dte_esz); in handle_l1_dte()
2376 int dte_esz = abi->dte_esz; in handle_l1_dte()
2394 * vgic_its_restore_device_tables - Restore the device table and all ITT
2400 u64 baser = its->baser_device_table; in vgic_its_restore_device_tables()
2415 l1_esz = abi->dte_esz; in vgic_its_restore_device_tables()
2425 vgic_its_free_device_list(its->dev->kvm, its); in vgic_its_restore_device_tables()
2437 ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) | in vgic_its_save_cte()
2438 collection->collection_id); in vgic_its_save_cte()
2440 return vgic_write_guest_lock(its->dev->kvm, gpa, &val, esz); in vgic_its_save_cte()
2446 * interpreted as end-of-table), and a negative error value for generic errors.
2451 struct kvm *kvm = its->dev->kvm; in vgic_its_restore_cte()
2469 return -EINVAL; in vgic_its_restore_cte()
2473 return -EEXIST; in vgic_its_restore_cte()
2475 if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL)) in vgic_its_restore_cte()
2476 return -EINVAL; in vgic_its_restore_cte()
2481 collection->target_addr = target_addr; in vgic_its_restore_cte()
2486 * vgic_its_save_collection_table - Save the collection table into
2492 u64 baser = its->baser_coll_table; in vgic_its_save_collection_table()
2497 int ret, cte_esz = abi->cte_esz; in vgic_its_save_collection_table()
2504 list_for_each_entry(collection, &its->collection_list, coll_list) { in vgic_its_save_collection_table()
2521 ret = vgic_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz); in vgic_its_save_collection_table()
2526 * vgic_its_restore_collection_table - reads the collection table
2533 u64 baser = its->baser_coll_table; in vgic_its_restore_collection_table()
2534 int cte_esz = abi->cte_esz; in vgic_its_restore_collection_table()
2558 vgic_its_free_collection_list(its->dev->kvm, its); in vgic_its_restore_collection_table()
2564 * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
2579 * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
2593 vgic_its_free_collection_list(its->dev->kvm, its); in vgic_its_restore_tables_v0()
2602 its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK; in vgic_its_commit_v0()
2603 its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK; in vgic_its_commit_v0()
2605 its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5) in vgic_its_commit_v0()
2608 its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5) in vgic_its_commit_v0()
2616 its->baser_coll_table &= ~GITS_BASER_VALID; in vgic_its_reset()
2617 its->baser_device_table &= ~GITS_BASER_VALID; in vgic_its_reset()
2618 its->cbaser = 0; in vgic_its_reset()
2619 its->creadr = 0; in vgic_its_reset()
2620 its->cwriter = 0; in vgic_its_reset()
2621 its->enabled = 0; in vgic_its_reset()
2629 switch (attr->group) { in vgic_its_has_attr()
2631 switch (attr->attr) { in vgic_its_has_attr()
2637 switch (attr->attr) { in vgic_its_has_attr()
2651 return -ENXIO; in vgic_its_has_attr()
2662 mutex_lock(&kvm->lock); in vgic_its_ctrl()
2665 mutex_unlock(&kvm->lock); in vgic_its_ctrl()
2666 return -EBUSY; in vgic_its_ctrl()
2669 mutex_lock(&kvm->arch.config_lock); in vgic_its_ctrl()
2670 mutex_lock(&its->its_lock); in vgic_its_ctrl()
2677 ret = abi->save_tables(its); in vgic_its_ctrl()
2680 ret = abi->restore_tables(its); in vgic_its_ctrl()
2684 mutex_unlock(&its->its_lock); in vgic_its_ctrl()
2685 mutex_unlock(&kvm->arch.config_lock); in vgic_its_ctrl()
2687 mutex_unlock(&kvm->lock); in vgic_its_ctrl()
2692 * kvm_arch_allow_write_without_running_vcpu - allow writing guest memory
2703 struct vgic_dist *dist = &kvm->arch.vgic; in kvm_arch_allow_write_without_running_vcpu()
2705 return dist->table_write_in_progress; in kvm_arch_allow_write_without_running_vcpu()
2711 struct vgic_its *its = dev->private; in vgic_its_set_attr()
2714 switch (attr->group) { in vgic_its_set_attr()
2716 u64 __user *uaddr = (u64 __user *)(long)attr->addr; in vgic_its_set_attr()
2717 unsigned long type = (unsigned long)attr->attr; in vgic_its_set_attr()
2721 return -ENODEV; in vgic_its_set_attr()
2724 return -EFAULT; in vgic_its_set_attr()
2726 ret = vgic_check_iorange(dev->kvm, its->vgic_its_base, in vgic_its_set_attr()
2731 return vgic_register_its_iodev(dev->kvm, its, addr); in vgic_its_set_attr()
2734 return vgic_its_ctrl(dev->kvm, its, attr->attr); in vgic_its_set_attr()
2736 u64 __user *uaddr = (u64 __user *)(long)attr->addr; in vgic_its_set_attr()
2740 return -EFAULT; in vgic_its_set_attr()
2745 return -ENXIO; in vgic_its_set_attr()
2751 switch (attr->group) { in vgic_its_get_attr()
2753 struct vgic_its *its = dev->private; in vgic_its_get_attr()
2754 u64 addr = its->vgic_its_base; in vgic_its_get_attr()
2755 u64 __user *uaddr = (u64 __user *)(long)attr->addr; in vgic_its_get_attr()
2756 unsigned long type = (unsigned long)attr->attr; in vgic_its_get_attr()
2759 return -ENODEV; in vgic_its_get_attr()
2762 return -EFAULT; in vgic_its_get_attr()
2766 u64 __user *uaddr = (u64 __user *)(long)attr->addr; in vgic_its_get_attr()
2776 return -ENXIO; in vgic_its_get_attr()
2783 .name = "kvm-arm-vgic-its",