1  /* SPDX-License-Identifier: GPL-2.0-only */
2  /*
3   * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4   * Copyright (C) 2022 Ventana Micro Systems Inc.
5   *
6   * Authors:
7   *	Anup Patel <apatel@ventanamicro.com>
8   */
9  
10  #ifndef __KVM_RISCV_AIA_H
11  #define __KVM_RISCV_AIA_H
12  
13  #include <linux/jump_label.h>
14  #include <linux/kvm_types.h>
15  #include <asm/csr.h>
16  
17  struct kvm_aia {
18  	/* In-kernel irqchip created */
19  	bool		in_kernel;
20  
21  	/* In-kernel irqchip initialized */
22  	bool		initialized;
23  
24  	/* Virtualization mode (Emulation, HW Accelerated, or Auto) */
25  	u32		mode;
26  
27  	/* Number of MSIs */
28  	u32		nr_ids;
29  
30  	/* Number of wired IRQs */
31  	u32		nr_sources;
32  
33  	/* Number of group bits in IMSIC address */
34  	u32		nr_group_bits;
35  
36  	/* Position of group bits in IMSIC address */
37  	u32		nr_group_shift;
38  
39  	/* Number of hart bits in IMSIC address */
40  	u32		nr_hart_bits;
41  
42  	/* Number of guest bits in IMSIC address */
43  	u32		nr_guest_bits;
44  
45  	/* Guest physical address of APLIC */
46  	gpa_t		aplic_addr;
47  
48  	/* Internal state of APLIC */
49  	void		*aplic_state;
50  };
51  
52  struct kvm_vcpu_aia_csr {
53  	unsigned long vsiselect;
54  	unsigned long hviprio1;
55  	unsigned long hviprio2;
56  	unsigned long vsieh;
57  	unsigned long hviph;
58  	unsigned long hviprio1h;
59  	unsigned long hviprio2h;
60  };
61  
62  struct kvm_vcpu_aia {
63  	/* CPU AIA CSR context of Guest VCPU */
64  	struct kvm_vcpu_aia_csr guest_csr;
65  
66  	/* CPU AIA CSR context upon Guest VCPU reset */
67  	struct kvm_vcpu_aia_csr guest_reset_csr;
68  
69  	/* Guest physical address of IMSIC for this VCPU */
70  	gpa_t		imsic_addr;
71  
72  	/* HART index of IMSIC extacted from guest physical address */
73  	u32		hart_index;
74  
75  	/* Internal state of IMSIC for this VCPU */
76  	void		*imsic_state;
77  };
78  
79  #define KVM_RISCV_AIA_UNDEF_ADDR	(-1)
80  
81  #define kvm_riscv_aia_initialized(k)	((k)->arch.aia.initialized)
82  
83  #define irqchip_in_kernel(k)		((k)->arch.aia.in_kernel)
84  
85  extern unsigned int kvm_riscv_aia_nr_hgei;
86  extern unsigned int kvm_riscv_aia_max_ids;
87  DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
88  #define kvm_riscv_aia_available() \
89  	static_branch_unlikely(&kvm_riscv_aia_available)
90  
91  extern struct kvm_device_ops kvm_riscv_aia_device_ops;
92  
93  void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu);
94  int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu);
95  
96  #define KVM_RISCV_AIA_IMSIC_TOPEI	(ISELECT_MASK + 1)
97  int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel,
98  				 unsigned long *val, unsigned long new_val,
99  				 unsigned long wr_mask);
100  int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type,
101  				bool write, unsigned long *val);
102  int kvm_riscv_aia_imsic_has_attr(struct kvm *kvm, unsigned long type);
103  void kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu *vcpu);
104  int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
105  				    u32 guest_index, u32 offset, u32 iid);
106  int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu);
107  void kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu *vcpu);
108  
109  int kvm_riscv_aia_aplic_set_attr(struct kvm *kvm, unsigned long type, u32 v);
110  int kvm_riscv_aia_aplic_get_attr(struct kvm *kvm, unsigned long type, u32 *v);
111  int kvm_riscv_aia_aplic_has_attr(struct kvm *kvm, unsigned long type);
112  int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level);
113  int kvm_riscv_aia_aplic_init(struct kvm *kvm);
114  void kvm_riscv_aia_aplic_cleanup(struct kvm *kvm);
115  
116  #ifdef CONFIG_32BIT
117  void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu);
118  void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu);
119  #else
kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu * vcpu)120  static inline void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
121  {
122  }
kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu * vcpu)123  static inline void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
124  {
125  }
126  #endif
127  bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
128  
129  void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu);
130  void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu);
131  void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu);
132  int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
133  			       unsigned long reg_num,
134  			       unsigned long *out_val);
135  int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
136  			       unsigned long reg_num,
137  			       unsigned long val);
138  
139  int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
140  				 unsigned int csr_num,
141  				 unsigned long *val,
142  				 unsigned long new_val,
143  				 unsigned long wr_mask);
144  int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
145  				unsigned long *val, unsigned long new_val,
146  				unsigned long wr_mask);
147  #define KVM_RISCV_VCPU_AIA_CSR_FUNCS \
148  { .base = CSR_SIREG,      .count = 1, .func = kvm_riscv_vcpu_aia_rmw_ireg }, \
149  { .base = CSR_STOPEI,     .count = 1, .func = kvm_riscv_vcpu_aia_rmw_topei },
150  
151  int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu);
152  void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu);
153  int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu);
154  void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu);
155  
156  int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index,
157  				   u32 guest_index, u32 iid);
158  int kvm_riscv_aia_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
159  int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsigned int irq, bool level);
160  
161  void kvm_riscv_aia_init_vm(struct kvm *kvm);
162  void kvm_riscv_aia_destroy_vm(struct kvm *kvm);
163  
164  int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
165  			     void __iomem **hgei_va, phys_addr_t *hgei_pa);
166  void kvm_riscv_aia_free_hgei(int cpu, int hgei);
167  void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable);
168  
169  void kvm_riscv_aia_enable(void);
170  void kvm_riscv_aia_disable(void);
171  int kvm_riscv_aia_init(void);
172  void kvm_riscv_aia_exit(void);
173  
174  #endif
175