1  /* SPDX-License-Identifier: GPL-2.0-only */
2  /*
3   * Copyright (C) 2015, 2016 ARM Ltd.
4   */
5  #ifndef __KVM_ARM_VGIC_NEW_H__
6  #define __KVM_ARM_VGIC_NEW_H__
7  
8  #include <linux/irqchip/arm-gic-common.h>
9  #include <asm/kvm_mmu.h>
10  
11  #define PRODUCT_ID_KVM		0x4b	/* ASCII code K */
12  #define IMPLEMENTER_ARM		0x43b
13  
14  #define VGIC_ADDR_UNDEF		(-1)
15  #define IS_VGIC_ADDR_UNDEF(_x)  ((_x) == VGIC_ADDR_UNDEF)
16  
17  #define INTERRUPT_ID_BITS_SPIS	10
18  #define INTERRUPT_ID_BITS_ITS	16
19  #define VGIC_LPI_MAX_INTID	((1 << INTERRUPT_ID_BITS_ITS) - 1)
20  #define VGIC_PRI_BITS		5
21  
22  #define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS)
23  
24  #define VGIC_AFFINITY_0_SHIFT 0
25  #define VGIC_AFFINITY_0_MASK (0xffUL << VGIC_AFFINITY_0_SHIFT)
26  #define VGIC_AFFINITY_1_SHIFT 8
27  #define VGIC_AFFINITY_1_MASK (0xffUL << VGIC_AFFINITY_1_SHIFT)
28  #define VGIC_AFFINITY_2_SHIFT 16
29  #define VGIC_AFFINITY_2_MASK (0xffUL << VGIC_AFFINITY_2_SHIFT)
30  #define VGIC_AFFINITY_3_SHIFT 24
31  #define VGIC_AFFINITY_3_MASK (0xffUL << VGIC_AFFINITY_3_SHIFT)
32  
33  #define VGIC_AFFINITY_LEVEL(reg, level) \
34  	((((reg) & VGIC_AFFINITY_## level ##_MASK) \
35  	>> VGIC_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
36  
37  /*
38   * The Userspace encodes the affinity differently from the MPIDR,
39   * Below macro converts vgic userspace format to MPIDR reg format.
40   */
41  #define VGIC_TO_MPIDR(val) (VGIC_AFFINITY_LEVEL(val, 0) | \
42  			    VGIC_AFFINITY_LEVEL(val, 1) | \
43  			    VGIC_AFFINITY_LEVEL(val, 2) | \
44  			    VGIC_AFFINITY_LEVEL(val, 3))
45  
46  /*
47   * As per Documentation/virt/kvm/devices/arm-vgic-v3.rst,
48   * below macros are defined for CPUREG encoding.
49   */
50  #define KVM_REG_ARM_VGIC_SYSREG_OP0_MASK   0x000000000000c000
51  #define KVM_REG_ARM_VGIC_SYSREG_OP0_SHIFT  14
52  #define KVM_REG_ARM_VGIC_SYSREG_OP1_MASK   0x0000000000003800
53  #define KVM_REG_ARM_VGIC_SYSREG_OP1_SHIFT  11
54  #define KVM_REG_ARM_VGIC_SYSREG_CRN_MASK   0x0000000000000780
55  #define KVM_REG_ARM_VGIC_SYSREG_CRN_SHIFT  7
56  #define KVM_REG_ARM_VGIC_SYSREG_CRM_MASK   0x0000000000000078
57  #define KVM_REG_ARM_VGIC_SYSREG_CRM_SHIFT  3
58  #define KVM_REG_ARM_VGIC_SYSREG_OP2_MASK   0x0000000000000007
59  #define KVM_REG_ARM_VGIC_SYSREG_OP2_SHIFT  0
60  
61  #define KVM_DEV_ARM_VGIC_SYSREG_MASK (KVM_REG_ARM_VGIC_SYSREG_OP0_MASK | \
62  				      KVM_REG_ARM_VGIC_SYSREG_OP1_MASK | \
63  				      KVM_REG_ARM_VGIC_SYSREG_CRN_MASK | \
64  				      KVM_REG_ARM_VGIC_SYSREG_CRM_MASK | \
65  				      KVM_REG_ARM_VGIC_SYSREG_OP2_MASK)
66  
67  /*
68   * As per Documentation/virt/kvm/devices/arm-vgic-its.rst,
69   * below macros are defined for ITS table entry encoding.
70   */
71  #define KVM_ITS_CTE_VALID_SHIFT		63
72  #define KVM_ITS_CTE_VALID_MASK		BIT_ULL(63)
73  #define KVM_ITS_CTE_RDBASE_SHIFT	16
74  #define KVM_ITS_CTE_ICID_MASK		GENMASK_ULL(15, 0)
75  #define KVM_ITS_ITE_NEXT_SHIFT		48
76  #define KVM_ITS_ITE_PINTID_SHIFT	16
77  #define KVM_ITS_ITE_PINTID_MASK		GENMASK_ULL(47, 16)
78  #define KVM_ITS_ITE_ICID_MASK		GENMASK_ULL(15, 0)
79  #define KVM_ITS_DTE_VALID_SHIFT		63
80  #define KVM_ITS_DTE_VALID_MASK		BIT_ULL(63)
81  #define KVM_ITS_DTE_NEXT_SHIFT		49
82  #define KVM_ITS_DTE_NEXT_MASK		GENMASK_ULL(62, 49)
83  #define KVM_ITS_DTE_ITTADDR_SHIFT	5
84  #define KVM_ITS_DTE_ITTADDR_MASK	GENMASK_ULL(48, 5)
85  #define KVM_ITS_DTE_SIZE_MASK		GENMASK_ULL(4, 0)
86  #define KVM_ITS_L1E_VALID_MASK		BIT_ULL(63)
87  /* we only support 64 kB translation table page size */
88  #define KVM_ITS_L1E_ADDR_MASK		GENMASK_ULL(51, 16)
89  
90  #define KVM_VGIC_V3_RDIST_INDEX_MASK	GENMASK_ULL(11, 0)
91  #define KVM_VGIC_V3_RDIST_FLAGS_MASK	GENMASK_ULL(15, 12)
92  #define KVM_VGIC_V3_RDIST_FLAGS_SHIFT	12
93  #define KVM_VGIC_V3_RDIST_BASE_MASK	GENMASK_ULL(51, 16)
94  #define KVM_VGIC_V3_RDIST_COUNT_MASK	GENMASK_ULL(63, 52)
95  #define KVM_VGIC_V3_RDIST_COUNT_SHIFT	52
96  
97  #ifdef CONFIG_DEBUG_SPINLOCK
98  #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
99  #else
100  #define DEBUG_SPINLOCK_BUG_ON(p)
101  #endif
102  
vgic_get_implementation_rev(struct kvm_vcpu * vcpu)103  static inline u32 vgic_get_implementation_rev(struct kvm_vcpu *vcpu)
104  {
105  	return vcpu->kvm->arch.vgic.implementation_rev;
106  }
107  
108  /* Requires the irq_lock to be held by the caller. */
irq_is_pending(struct vgic_irq * irq)109  static inline bool irq_is_pending(struct vgic_irq *irq)
110  {
111  	if (irq->config == VGIC_CONFIG_EDGE)
112  		return irq->pending_latch;
113  	else
114  		return irq->pending_latch || irq->line_level;
115  }
116  
vgic_irq_is_mapped_level(struct vgic_irq * irq)117  static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq)
118  {
119  	return irq->config == VGIC_CONFIG_LEVEL && irq->hw;
120  }
121  
vgic_irq_get_lr_count(struct vgic_irq * irq)122  static inline int vgic_irq_get_lr_count(struct vgic_irq *irq)
123  {
124  	/* Account for the active state as an interrupt */
125  	if (vgic_irq_is_sgi(irq->intid) && irq->source)
126  		return hweight8(irq->source) + irq->active;
127  
128  	return irq_is_pending(irq) || irq->active;
129  }
130  
vgic_irq_is_multi_sgi(struct vgic_irq * irq)131  static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
132  {
133  	return vgic_irq_get_lr_count(irq) > 1;
134  }
135  
vgic_write_guest_lock(struct kvm * kvm,gpa_t gpa,const void * data,unsigned long len)136  static inline int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa,
137  					const void *data, unsigned long len)
138  {
139  	struct vgic_dist *dist = &kvm->arch.vgic;
140  	int ret;
141  
142  	dist->table_write_in_progress = true;
143  	ret = kvm_write_guest_lock(kvm, gpa, data, len);
144  	dist->table_write_in_progress = false;
145  
146  	return ret;
147  }
148  
149  /*
150   * This struct provides an intermediate representation of the fields contained
151   * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
152   * state to userspace can generate either GICv2 or GICv3 CPU interface
153   * registers regardless of the hardware backed GIC used.
154   */
155  struct vgic_vmcr {
156  	u32	grpen0;
157  	u32	grpen1;
158  
159  	u32	ackctl;
160  	u32	fiqen;
161  	u32	cbpr;
162  	u32	eoim;
163  
164  	u32	abpr;
165  	u32	bpr;
166  	u32	pmr;  /* Priority mask field in the GICC_PMR and
167  		       * ICC_PMR_EL1 priority field format */
168  };
169  
170  struct vgic_reg_attr {
171  	struct kvm_vcpu *vcpu;
172  	gpa_t addr;
173  };
174  
175  int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
176  		       struct vgic_reg_attr *reg_attr);
177  int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
178  		       struct vgic_reg_attr *reg_attr);
179  const struct vgic_register_region *
180  vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
181  		     gpa_t addr, int len);
182  struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
183  			      u32 intid);
184  void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
185  bool vgic_get_phys_line_level(struct vgic_irq *irq);
186  void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending);
187  void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active);
188  bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
189  			   unsigned long flags) __releases(&irq->irq_lock);
190  void vgic_kick_vcpus(struct kvm *kvm);
191  void vgic_irq_handle_resampling(struct vgic_irq *irq,
192  				bool lr_deactivated, bool lr_pending);
193  
194  int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr,
195  		       phys_addr_t addr, phys_addr_t alignment,
196  		       phys_addr_t size);
197  
198  void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
199  void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
200  void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
201  void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
202  int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
203  int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
204  			 int offset, u32 *val);
205  int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
206  			  int offset, u32 *val);
207  void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
208  void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
209  void vgic_v2_enable(struct kvm_vcpu *vcpu);
210  int vgic_v2_probe(const struct gic_kvm_info *info);
211  int vgic_v2_map_resources(struct kvm *kvm);
212  int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
213  			     enum vgic_type);
214  
215  void vgic_v2_init_lrs(void);
216  void vgic_v2_load(struct kvm_vcpu *vcpu);
217  void vgic_v2_put(struct kvm_vcpu *vcpu);
218  
219  void vgic_v2_save_state(struct kvm_vcpu *vcpu);
220  void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
221  
vgic_try_get_irq_kref(struct vgic_irq * irq)222  static inline bool vgic_try_get_irq_kref(struct vgic_irq *irq)
223  {
224  	if (!irq)
225  		return false;
226  
227  	if (irq->intid < VGIC_MIN_LPI)
228  		return true;
229  
230  	return kref_get_unless_zero(&irq->refcount);
231  }
232  
vgic_get_irq_kref(struct vgic_irq * irq)233  static inline void vgic_get_irq_kref(struct vgic_irq *irq)
234  {
235  	WARN_ON_ONCE(!vgic_try_get_irq_kref(irq));
236  }
237  
238  void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
239  void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
240  void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
241  void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
242  void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
243  void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
244  void vgic_v3_enable(struct kvm_vcpu *vcpu);
245  int vgic_v3_probe(const struct gic_kvm_info *info);
246  int vgic_v3_map_resources(struct kvm *kvm);
247  int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq);
248  int vgic_v3_save_pending_tables(struct kvm *kvm);
249  int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count);
250  int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
251  void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu);
252  bool vgic_v3_check_base(struct kvm *kvm);
253  
254  void vgic_v3_load(struct kvm_vcpu *vcpu);
255  void vgic_v3_put(struct kvm_vcpu *vcpu);
256  
257  bool vgic_has_its(struct kvm *kvm);
258  int kvm_vgic_register_its_device(void);
259  void vgic_enable_lpis(struct kvm_vcpu *vcpu);
260  void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu);
261  int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
262  int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
263  int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
264  			 int offset, u32 *val);
265  int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
266  			 int offset, u32 *val);
267  int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu,
268  				struct kvm_device_attr *attr, bool is_write);
269  int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
270  int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
271  				    u32 intid, u32 *val);
272  int kvm_register_vgic_device(unsigned long type);
273  void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
274  void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
275  int vgic_lazy_init(struct kvm *kvm);
276  int vgic_init(struct kvm *kvm);
277  
278  void vgic_debug_init(struct kvm *kvm);
279  void vgic_debug_destroy(struct kvm *kvm);
280  
vgic_v3_max_apr_idx(struct kvm_vcpu * vcpu)281  static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
282  {
283  	struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu;
284  
285  	/*
286  	 * num_pri_bits are initialized with HW supported values.
287  	 * We can rely safely on num_pri_bits even if VM has not
288  	 * restored ICC_CTLR_EL1 before restoring APnR registers.
289  	 */
290  	switch (cpu_if->num_pri_bits) {
291  	case 7: return 3;
292  	case 6: return 1;
293  	default: return 0;
294  	}
295  }
296  
297  static inline bool
vgic_v3_redist_region_full(struct vgic_redist_region * region)298  vgic_v3_redist_region_full(struct vgic_redist_region *region)
299  {
300  	if (!region->count)
301  		return false;
302  
303  	return (region->free_index >= region->count);
304  }
305  
306  struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rdregs);
307  
308  static inline size_t
vgic_v3_rd_region_size(struct kvm * kvm,struct vgic_redist_region * rdreg)309  vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
310  {
311  	if (!rdreg->count)
312  		return atomic_read(&kvm->online_vcpus) * KVM_VGIC_V3_REDIST_SIZE;
313  	else
314  		return rdreg->count * KVM_VGIC_V3_REDIST_SIZE;
315  }
316  
317  struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
318  							   u32 index);
319  void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg);
320  
321  bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
322  
vgic_dist_overlap(struct kvm * kvm,gpa_t base,size_t size)323  static inline bool vgic_dist_overlap(struct kvm *kvm, gpa_t base, size_t size)
324  {
325  	struct vgic_dist *d = &kvm->arch.vgic;
326  
327  	return (base + size > d->vgic_dist_base) &&
328  		(base < d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE);
329  }
330  
331  bool vgic_lpis_enabled(struct kvm_vcpu *vcpu);
332  int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
333  			 u32 devid, u32 eventid, struct vgic_irq **irq);
334  struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
335  int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi);
336  void vgic_its_invalidate_all_caches(struct kvm *kvm);
337  
338  /* GICv4.1 MMIO interface */
339  int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq);
340  int vgic_its_invall(struct kvm_vcpu *vcpu);
341  
342  bool vgic_supports_direct_msis(struct kvm *kvm);
343  int vgic_v4_init(struct kvm *kvm);
344  void vgic_v4_teardown(struct kvm *kvm);
345  void vgic_v4_configure_vsgis(struct kvm *kvm);
346  void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
347  int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
348  
349  void vcpu_set_ich_hcr(struct kvm_vcpu *vcpu);
350  
kvm_has_gicv3(struct kvm * kvm)351  static inline bool kvm_has_gicv3(struct kvm *kvm)
352  {
353  	return kvm_has_feat(kvm, ID_AA64PFR0_EL1, GIC, IMP);
354  }
355  
356  #endif
357