1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4   * Copyright (C) 2022 Ventana Micro Systems Inc.
5   *
6   * Authors:
7   *	Anup Patel <apatel@ventanamicro.com>
8   */
9  
10  #include <linux/bits.h>
11  #include <linux/irqchip/riscv-imsic.h>
12  #include <linux/kvm_host.h>
13  #include <linux/uaccess.h>
14  
unlock_vcpus(struct kvm * kvm,int vcpu_lock_idx)15  static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
16  {
17  	struct kvm_vcpu *tmp_vcpu;
18  
19  	for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
20  		tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
21  		mutex_unlock(&tmp_vcpu->mutex);
22  	}
23  }
24  
unlock_all_vcpus(struct kvm * kvm)25  static void unlock_all_vcpus(struct kvm *kvm)
26  {
27  	unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
28  }
29  
lock_all_vcpus(struct kvm * kvm)30  static bool lock_all_vcpus(struct kvm *kvm)
31  {
32  	struct kvm_vcpu *tmp_vcpu;
33  	unsigned long c;
34  
35  	kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
36  		if (!mutex_trylock(&tmp_vcpu->mutex)) {
37  			unlock_vcpus(kvm, c - 1);
38  			return false;
39  		}
40  	}
41  
42  	return true;
43  }
44  
aia_create(struct kvm_device * dev,u32 type)45  static int aia_create(struct kvm_device *dev, u32 type)
46  {
47  	int ret;
48  	unsigned long i;
49  	struct kvm *kvm = dev->kvm;
50  	struct kvm_vcpu *vcpu;
51  
52  	if (irqchip_in_kernel(kvm))
53  		return -EEXIST;
54  
55  	ret = -EBUSY;
56  	if (!lock_all_vcpus(kvm))
57  		return ret;
58  
59  	kvm_for_each_vcpu(i, vcpu, kvm) {
60  		if (vcpu->arch.ran_atleast_once)
61  			goto out_unlock;
62  	}
63  	ret = 0;
64  
65  	kvm->arch.aia.in_kernel = true;
66  
67  out_unlock:
68  	unlock_all_vcpus(kvm);
69  	return ret;
70  }
71  
aia_destroy(struct kvm_device * dev)72  static void aia_destroy(struct kvm_device *dev)
73  {
74  	kfree(dev);
75  }
76  
aia_config(struct kvm * kvm,unsigned long type,u32 * nr,bool write)77  static int aia_config(struct kvm *kvm, unsigned long type,
78  		      u32 *nr, bool write)
79  {
80  	struct kvm_aia *aia = &kvm->arch.aia;
81  
82  	/* Writes can only be done before irqchip is initialized */
83  	if (write && kvm_riscv_aia_initialized(kvm))
84  		return -EBUSY;
85  
86  	switch (type) {
87  	case KVM_DEV_RISCV_AIA_CONFIG_MODE:
88  		if (write) {
89  			switch (*nr) {
90  			case KVM_DEV_RISCV_AIA_MODE_EMUL:
91  				break;
92  			case KVM_DEV_RISCV_AIA_MODE_HWACCEL:
93  			case KVM_DEV_RISCV_AIA_MODE_AUTO:
94  				/*
95  				 * HW Acceleration and Auto modes only
96  				 * supported on host with non-zero guest
97  				 * external interrupts (i.e. non-zero
98  				 * VS-level IMSIC pages).
99  				 */
100  				if (!kvm_riscv_aia_nr_hgei)
101  					return -EINVAL;
102  				break;
103  			default:
104  				return -EINVAL;
105  			}
106  			aia->mode = *nr;
107  		} else
108  			*nr = aia->mode;
109  		break;
110  	case KVM_DEV_RISCV_AIA_CONFIG_IDS:
111  		if (write) {
112  			if ((*nr < KVM_DEV_RISCV_AIA_IDS_MIN) ||
113  			    (*nr >= KVM_DEV_RISCV_AIA_IDS_MAX) ||
114  			    ((*nr & KVM_DEV_RISCV_AIA_IDS_MIN) !=
115  			     KVM_DEV_RISCV_AIA_IDS_MIN) ||
116  			    (kvm_riscv_aia_max_ids <= *nr))
117  				return -EINVAL;
118  			aia->nr_ids = *nr;
119  		} else
120  			*nr = aia->nr_ids;
121  		break;
122  	case KVM_DEV_RISCV_AIA_CONFIG_SRCS:
123  		if (write) {
124  			if ((*nr >= KVM_DEV_RISCV_AIA_SRCS_MAX) ||
125  			    (*nr >= kvm_riscv_aia_max_ids))
126  				return -EINVAL;
127  			aia->nr_sources = *nr;
128  		} else
129  			*nr = aia->nr_sources;
130  		break;
131  	case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS:
132  		if (write) {
133  			if (*nr >= KVM_DEV_RISCV_AIA_GROUP_BITS_MAX)
134  				return -EINVAL;
135  			aia->nr_group_bits = *nr;
136  		} else
137  			*nr = aia->nr_group_bits;
138  		break;
139  	case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT:
140  		if (write) {
141  			if ((*nr < KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN) ||
142  			    (*nr >= KVM_DEV_RISCV_AIA_GROUP_SHIFT_MAX))
143  				return -EINVAL;
144  			aia->nr_group_shift = *nr;
145  		} else
146  			*nr = aia->nr_group_shift;
147  		break;
148  	case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS:
149  		if (write) {
150  			if (*nr >= KVM_DEV_RISCV_AIA_HART_BITS_MAX)
151  				return -EINVAL;
152  			aia->nr_hart_bits = *nr;
153  		} else
154  			*nr = aia->nr_hart_bits;
155  		break;
156  	case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS:
157  		if (write) {
158  			if (*nr >= KVM_DEV_RISCV_AIA_GUEST_BITS_MAX)
159  				return -EINVAL;
160  			aia->nr_guest_bits = *nr;
161  		} else
162  			*nr = aia->nr_guest_bits;
163  		break;
164  	default:
165  		return -ENXIO;
166  	}
167  
168  	return 0;
169  }
170  
aia_aplic_addr(struct kvm * kvm,u64 * addr,bool write)171  static int aia_aplic_addr(struct kvm *kvm, u64 *addr, bool write)
172  {
173  	struct kvm_aia *aia = &kvm->arch.aia;
174  
175  	if (write) {
176  		/* Writes can only be done before irqchip is initialized */
177  		if (kvm_riscv_aia_initialized(kvm))
178  			return -EBUSY;
179  
180  		if (*addr & (KVM_DEV_RISCV_APLIC_ALIGN - 1))
181  			return -EINVAL;
182  
183  		aia->aplic_addr = *addr;
184  	} else
185  		*addr = aia->aplic_addr;
186  
187  	return 0;
188  }
189  
aia_imsic_addr(struct kvm * kvm,u64 * addr,unsigned long vcpu_idx,bool write)190  static int aia_imsic_addr(struct kvm *kvm, u64 *addr,
191  			  unsigned long vcpu_idx, bool write)
192  {
193  	struct kvm_vcpu *vcpu;
194  	struct kvm_vcpu_aia *vcpu_aia;
195  
196  	vcpu = kvm_get_vcpu(kvm, vcpu_idx);
197  	if (!vcpu)
198  		return -EINVAL;
199  	vcpu_aia = &vcpu->arch.aia_context;
200  
201  	if (write) {
202  		/* Writes can only be done before irqchip is initialized */
203  		if (kvm_riscv_aia_initialized(kvm))
204  			return -EBUSY;
205  
206  		if (*addr & (KVM_DEV_RISCV_IMSIC_ALIGN - 1))
207  			return -EINVAL;
208  	}
209  
210  	mutex_lock(&vcpu->mutex);
211  	if (write)
212  		vcpu_aia->imsic_addr = *addr;
213  	else
214  		*addr = vcpu_aia->imsic_addr;
215  	mutex_unlock(&vcpu->mutex);
216  
217  	return 0;
218  }
219  
aia_imsic_ppn(struct kvm_aia * aia,gpa_t addr)220  static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr)
221  {
222  	u32 h, l;
223  	gpa_t mask = 0;
224  
225  	h = aia->nr_hart_bits + aia->nr_guest_bits +
226  	    IMSIC_MMIO_PAGE_SHIFT - 1;
227  	mask = GENMASK_ULL(h, 0);
228  
229  	if (aia->nr_group_bits) {
230  		h = aia->nr_group_bits + aia->nr_group_shift - 1;
231  		l = aia->nr_group_shift;
232  		mask |= GENMASK_ULL(h, l);
233  	}
234  
235  	return (addr & ~mask) >> IMSIC_MMIO_PAGE_SHIFT;
236  }
237  
aia_imsic_hart_index(struct kvm_aia * aia,gpa_t addr)238  static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr)
239  {
240  	u32 hart = 0, group = 0;
241  
242  	if (aia->nr_hart_bits)
243  		hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
244  		       GENMASK_ULL(aia->nr_hart_bits - 1, 0);
245  	if (aia->nr_group_bits)
246  		group = (addr >> aia->nr_group_shift) &
247  			GENMASK_ULL(aia->nr_group_bits - 1, 0);
248  
249  	return (group << aia->nr_hart_bits) | hart;
250  }
251  
aia_init(struct kvm * kvm)252  static int aia_init(struct kvm *kvm)
253  {
254  	int ret, i;
255  	unsigned long idx;
256  	struct kvm_vcpu *vcpu;
257  	struct kvm_vcpu_aia *vaia;
258  	struct kvm_aia *aia = &kvm->arch.aia;
259  	gpa_t base_ppn = KVM_RISCV_AIA_UNDEF_ADDR;
260  
261  	/* Irqchip can be initialized only once */
262  	if (kvm_riscv_aia_initialized(kvm))
263  		return -EBUSY;
264  
265  	/* We might be in the middle of creating a VCPU? */
266  	if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
267  		return -EBUSY;
268  
269  	/* Number of sources should be less than or equals number of IDs */
270  	if (aia->nr_ids < aia->nr_sources)
271  		return -EINVAL;
272  
273  	/* APLIC base is required for non-zero number of sources */
274  	if (aia->nr_sources && aia->aplic_addr == KVM_RISCV_AIA_UNDEF_ADDR)
275  		return -EINVAL;
276  
277  	/* Initialize APLIC */
278  	ret = kvm_riscv_aia_aplic_init(kvm);
279  	if (ret)
280  		return ret;
281  
282  	/* Iterate over each VCPU */
283  	kvm_for_each_vcpu(idx, vcpu, kvm) {
284  		vaia = &vcpu->arch.aia_context;
285  
286  		/* IMSIC base is required */
287  		if (vaia->imsic_addr == KVM_RISCV_AIA_UNDEF_ADDR) {
288  			ret = -EINVAL;
289  			goto fail_cleanup_imsics;
290  		}
291  
292  		/* All IMSICs should have matching base PPN */
293  		if (base_ppn == KVM_RISCV_AIA_UNDEF_ADDR)
294  			base_ppn = aia_imsic_ppn(aia, vaia->imsic_addr);
295  		if (base_ppn != aia_imsic_ppn(aia, vaia->imsic_addr)) {
296  			ret = -EINVAL;
297  			goto fail_cleanup_imsics;
298  		}
299  
300  		/* Update HART index of the IMSIC based on IMSIC base */
301  		vaia->hart_index = aia_imsic_hart_index(aia,
302  							vaia->imsic_addr);
303  
304  		/* Initialize IMSIC for this VCPU */
305  		ret = kvm_riscv_vcpu_aia_imsic_init(vcpu);
306  		if (ret)
307  			goto fail_cleanup_imsics;
308  	}
309  
310  	/* Set the initialized flag */
311  	kvm->arch.aia.initialized = true;
312  
313  	return 0;
314  
315  fail_cleanup_imsics:
316  	for (i = idx - 1; i >= 0; i--) {
317  		vcpu = kvm_get_vcpu(kvm, i);
318  		if (!vcpu)
319  			continue;
320  		kvm_riscv_vcpu_aia_imsic_cleanup(vcpu);
321  	}
322  	kvm_riscv_aia_aplic_cleanup(kvm);
323  	return ret;
324  }
325  
aia_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)326  static int aia_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
327  {
328  	u32 nr;
329  	u64 addr;
330  	int nr_vcpus, r = -ENXIO;
331  	unsigned long v, type = (unsigned long)attr->attr;
332  	void __user *uaddr = (void __user *)(long)attr->addr;
333  
334  	switch (attr->group) {
335  	case KVM_DEV_RISCV_AIA_GRP_CONFIG:
336  		if (copy_from_user(&nr, uaddr, sizeof(nr)))
337  			return -EFAULT;
338  
339  		mutex_lock(&dev->kvm->lock);
340  		r = aia_config(dev->kvm, type, &nr, true);
341  		mutex_unlock(&dev->kvm->lock);
342  
343  		break;
344  
345  	case KVM_DEV_RISCV_AIA_GRP_ADDR:
346  		if (copy_from_user(&addr, uaddr, sizeof(addr)))
347  			return -EFAULT;
348  
349  		nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
350  		mutex_lock(&dev->kvm->lock);
351  		if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC)
352  			r = aia_aplic_addr(dev->kvm, &addr, true);
353  		else if (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
354  			r = aia_imsic_addr(dev->kvm, &addr,
355  			    type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), true);
356  		mutex_unlock(&dev->kvm->lock);
357  
358  		break;
359  
360  	case KVM_DEV_RISCV_AIA_GRP_CTRL:
361  		switch (type) {
362  		case KVM_DEV_RISCV_AIA_CTRL_INIT:
363  			mutex_lock(&dev->kvm->lock);
364  			r = aia_init(dev->kvm);
365  			mutex_unlock(&dev->kvm->lock);
366  			break;
367  		}
368  
369  		break;
370  	case KVM_DEV_RISCV_AIA_GRP_APLIC:
371  		if (copy_from_user(&nr, uaddr, sizeof(nr)))
372  			return -EFAULT;
373  
374  		mutex_lock(&dev->kvm->lock);
375  		r = kvm_riscv_aia_aplic_set_attr(dev->kvm, type, nr);
376  		mutex_unlock(&dev->kvm->lock);
377  
378  		break;
379  	case KVM_DEV_RISCV_AIA_GRP_IMSIC:
380  		if (copy_from_user(&v, uaddr, sizeof(v)))
381  			return -EFAULT;
382  
383  		mutex_lock(&dev->kvm->lock);
384  		r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, true, &v);
385  		mutex_unlock(&dev->kvm->lock);
386  
387  		break;
388  	}
389  
390  	return r;
391  }
392  
aia_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)393  static int aia_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
394  {
395  	u32 nr;
396  	u64 addr;
397  	int nr_vcpus, r = -ENXIO;
398  	void __user *uaddr = (void __user *)(long)attr->addr;
399  	unsigned long v, type = (unsigned long)attr->attr;
400  
401  	switch (attr->group) {
402  	case KVM_DEV_RISCV_AIA_GRP_CONFIG:
403  		if (copy_from_user(&nr, uaddr, sizeof(nr)))
404  			return -EFAULT;
405  
406  		mutex_lock(&dev->kvm->lock);
407  		r = aia_config(dev->kvm, type, &nr, false);
408  		mutex_unlock(&dev->kvm->lock);
409  		if (r)
410  			return r;
411  
412  		if (copy_to_user(uaddr, &nr, sizeof(nr)))
413  			return -EFAULT;
414  
415  		break;
416  	case KVM_DEV_RISCV_AIA_GRP_ADDR:
417  		if (copy_from_user(&addr, uaddr, sizeof(addr)))
418  			return -EFAULT;
419  
420  		nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
421  		mutex_lock(&dev->kvm->lock);
422  		if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC)
423  			r = aia_aplic_addr(dev->kvm, &addr, false);
424  		else if (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
425  			r = aia_imsic_addr(dev->kvm, &addr,
426  			    type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), false);
427  		mutex_unlock(&dev->kvm->lock);
428  		if (r)
429  			return r;
430  
431  		if (copy_to_user(uaddr, &addr, sizeof(addr)))
432  			return -EFAULT;
433  
434  		break;
435  	case KVM_DEV_RISCV_AIA_GRP_APLIC:
436  		if (copy_from_user(&nr, uaddr, sizeof(nr)))
437  			return -EFAULT;
438  
439  		mutex_lock(&dev->kvm->lock);
440  		r = kvm_riscv_aia_aplic_get_attr(dev->kvm, type, &nr);
441  		mutex_unlock(&dev->kvm->lock);
442  		if (r)
443  			return r;
444  
445  		if (copy_to_user(uaddr, &nr, sizeof(nr)))
446  			return -EFAULT;
447  
448  		break;
449  	case KVM_DEV_RISCV_AIA_GRP_IMSIC:
450  		if (copy_from_user(&v, uaddr, sizeof(v)))
451  			return -EFAULT;
452  
453  		mutex_lock(&dev->kvm->lock);
454  		r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, false, &v);
455  		mutex_unlock(&dev->kvm->lock);
456  		if (r)
457  			return r;
458  
459  		if (copy_to_user(uaddr, &v, sizeof(v)))
460  			return -EFAULT;
461  
462  		break;
463  	}
464  
465  	return r;
466  }
467  
aia_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)468  static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
469  {
470  	int nr_vcpus;
471  
472  	switch (attr->group) {
473  	case KVM_DEV_RISCV_AIA_GRP_CONFIG:
474  		switch (attr->attr) {
475  		case KVM_DEV_RISCV_AIA_CONFIG_MODE:
476  		case KVM_DEV_RISCV_AIA_CONFIG_IDS:
477  		case KVM_DEV_RISCV_AIA_CONFIG_SRCS:
478  		case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS:
479  		case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT:
480  		case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS:
481  		case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS:
482  			return 0;
483  		}
484  		break;
485  	case KVM_DEV_RISCV_AIA_GRP_ADDR:
486  		nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
487  		if (attr->attr == KVM_DEV_RISCV_AIA_ADDR_APLIC)
488  			return 0;
489  		else if (attr->attr < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
490  			return 0;
491  		break;
492  	case KVM_DEV_RISCV_AIA_GRP_CTRL:
493  		switch (attr->attr) {
494  		case KVM_DEV_RISCV_AIA_CTRL_INIT:
495  			return 0;
496  		}
497  		break;
498  	case KVM_DEV_RISCV_AIA_GRP_APLIC:
499  		return kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr);
500  	case KVM_DEV_RISCV_AIA_GRP_IMSIC:
501  		return kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr);
502  	}
503  
504  	return -ENXIO;
505  }
506  
507  struct kvm_device_ops kvm_riscv_aia_device_ops = {
508  	.name = "kvm-riscv-aia",
509  	.create = aia_create,
510  	.destroy = aia_destroy,
511  	.set_attr = aia_set_attr,
512  	.get_attr = aia_get_attr,
513  	.has_attr = aia_has_attr,
514  };
515  
kvm_riscv_vcpu_aia_update(struct kvm_vcpu * vcpu)516  int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
517  {
518  	/* Proceed only if AIA was initialized successfully */
519  	if (!kvm_riscv_aia_initialized(vcpu->kvm))
520  		return 1;
521  
522  	/* Update the IMSIC HW state before entering guest mode */
523  	return kvm_riscv_vcpu_aia_imsic_update(vcpu);
524  }
525  
kvm_riscv_vcpu_aia_reset(struct kvm_vcpu * vcpu)526  void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
527  {
528  	struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
529  	struct kvm_vcpu_aia_csr *reset_csr =
530  				&vcpu->arch.aia_context.guest_reset_csr;
531  
532  	if (!kvm_riscv_aia_available())
533  		return;
534  	memcpy(csr, reset_csr, sizeof(*csr));
535  
536  	/* Proceed only if AIA was initialized successfully */
537  	if (!kvm_riscv_aia_initialized(vcpu->kvm))
538  		return;
539  
540  	/* Reset the IMSIC context */
541  	kvm_riscv_vcpu_aia_imsic_reset(vcpu);
542  }
543  
kvm_riscv_vcpu_aia_init(struct kvm_vcpu * vcpu)544  int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu)
545  {
546  	struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context;
547  
548  	if (!kvm_riscv_aia_available())
549  		return 0;
550  
551  	/*
552  	 * We don't do any memory allocations over here because these
553  	 * will be done after AIA device is initialized by the user-space.
554  	 *
555  	 * Refer, aia_init() implementation for more details.
556  	 */
557  
558  	/* Initialize default values in AIA vcpu context */
559  	vaia->imsic_addr = KVM_RISCV_AIA_UNDEF_ADDR;
560  	vaia->hart_index = vcpu->vcpu_idx;
561  
562  	return 0;
563  }
564  
kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu * vcpu)565  void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu)
566  {
567  	/* Proceed only if AIA was initialized successfully */
568  	if (!kvm_riscv_aia_initialized(vcpu->kvm))
569  		return;
570  
571  	/* Cleanup IMSIC context */
572  	kvm_riscv_vcpu_aia_imsic_cleanup(vcpu);
573  }
574  
kvm_riscv_aia_inject_msi_by_id(struct kvm * kvm,u32 hart_index,u32 guest_index,u32 iid)575  int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index,
576  				   u32 guest_index, u32 iid)
577  {
578  	unsigned long idx;
579  	struct kvm_vcpu *vcpu;
580  
581  	/* Proceed only if AIA was initialized successfully */
582  	if (!kvm_riscv_aia_initialized(kvm))
583  		return -EBUSY;
584  
585  	/* Inject MSI to matching VCPU */
586  	kvm_for_each_vcpu(idx, vcpu, kvm) {
587  		if (vcpu->arch.aia_context.hart_index == hart_index)
588  			return kvm_riscv_vcpu_aia_imsic_inject(vcpu,
589  							       guest_index,
590  							       0, iid);
591  	}
592  
593  	return 0;
594  }
595  
kvm_riscv_aia_inject_msi(struct kvm * kvm,struct kvm_msi * msi)596  int kvm_riscv_aia_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
597  {
598  	gpa_t tppn, ippn;
599  	unsigned long idx;
600  	struct kvm_vcpu *vcpu;
601  	u32 g, toff, iid = msi->data;
602  	struct kvm_aia *aia = &kvm->arch.aia;
603  	gpa_t target = (((gpa_t)msi->address_hi) << 32) | msi->address_lo;
604  
605  	/* Proceed only if AIA was initialized successfully */
606  	if (!kvm_riscv_aia_initialized(kvm))
607  		return -EBUSY;
608  
609  	/* Convert target address to target PPN */
610  	tppn = target >> IMSIC_MMIO_PAGE_SHIFT;
611  
612  	/* Extract and clear Guest ID from target PPN */
613  	g = tppn & (BIT(aia->nr_guest_bits) - 1);
614  	tppn &= ~((gpa_t)(BIT(aia->nr_guest_bits) - 1));
615  
616  	/* Inject MSI to matching VCPU */
617  	kvm_for_each_vcpu(idx, vcpu, kvm) {
618  		ippn = vcpu->arch.aia_context.imsic_addr >>
619  					IMSIC_MMIO_PAGE_SHIFT;
620  		if (ippn == tppn) {
621  			toff = target & (IMSIC_MMIO_PAGE_SZ - 1);
622  			return kvm_riscv_vcpu_aia_imsic_inject(vcpu, g,
623  							       toff, iid);
624  		}
625  	}
626  
627  	return 0;
628  }
629  
kvm_riscv_aia_inject_irq(struct kvm * kvm,unsigned int irq,bool level)630  int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsigned int irq, bool level)
631  {
632  	/* Proceed only if AIA was initialized successfully */
633  	if (!kvm_riscv_aia_initialized(kvm))
634  		return -EBUSY;
635  
636  	/* Inject interrupt level change in APLIC */
637  	return kvm_riscv_aia_aplic_inject(kvm, irq, level);
638  }
639  
kvm_riscv_aia_init_vm(struct kvm * kvm)640  void kvm_riscv_aia_init_vm(struct kvm *kvm)
641  {
642  	struct kvm_aia *aia = &kvm->arch.aia;
643  
644  	if (!kvm_riscv_aia_available())
645  		return;
646  
647  	/*
648  	 * We don't do any memory allocations over here because these
649  	 * will be done after AIA device is initialized by the user-space.
650  	 *
651  	 * Refer, aia_init() implementation for more details.
652  	 */
653  
654  	/* Initialize default values in AIA global context */
655  	aia->mode = (kvm_riscv_aia_nr_hgei) ?
656  		KVM_DEV_RISCV_AIA_MODE_AUTO : KVM_DEV_RISCV_AIA_MODE_EMUL;
657  	aia->nr_ids = kvm_riscv_aia_max_ids - 1;
658  	aia->nr_sources = 0;
659  	aia->nr_group_bits = 0;
660  	aia->nr_group_shift = KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN;
661  	aia->nr_hart_bits = 0;
662  	aia->nr_guest_bits = 0;
663  	aia->aplic_addr = KVM_RISCV_AIA_UNDEF_ADDR;
664  }
665  
kvm_riscv_aia_destroy_vm(struct kvm * kvm)666  void kvm_riscv_aia_destroy_vm(struct kvm *kvm)
667  {
668  	/* Proceed only if AIA was initialized successfully */
669  	if (!kvm_riscv_aia_initialized(kvm))
670  		return;
671  
672  	/* Cleanup APLIC context */
673  	kvm_riscv_aia_aplic_cleanup(kvm);
674  }
675