1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _VHOST_H
3  #define _VHOST_H
4  
5  #include <linux/eventfd.h>
6  #include <linux/vhost.h>
7  #include <linux/mm.h>
8  #include <linux/mutex.h>
9  #include <linux/poll.h>
10  #include <linux/file.h>
11  #include <linux/uio.h>
12  #include <linux/virtio_config.h>
13  #include <linux/virtio_ring.h>
14  #include <linux/atomic.h>
15  #include <linux/vhost_iotlb.h>
16  #include <linux/irqbypass.h>
17  
18  struct vhost_work;
19  struct vhost_task;
20  typedef void (*vhost_work_fn_t)(struct vhost_work *work);
21  
22  #define VHOST_WORK_QUEUED 1
23  struct vhost_work {
24  	struct llist_node	node;
25  	vhost_work_fn_t		fn;
26  	unsigned long		flags;
27  };
28  
29  struct vhost_worker {
30  	struct vhost_task	*vtsk;
31  	struct vhost_dev	*dev;
32  	/* Used to serialize device wide flushing with worker swapping. */
33  	struct mutex		mutex;
34  	struct llist_head	work_list;
35  	u64			kcov_handle;
36  	u32			id;
37  	int			attachment_cnt;
38  	bool			killed;
39  };
40  
41  /* Poll a file (eventfd or socket) */
42  /* Note: there's nothing vhost specific about this structure. */
43  struct vhost_poll {
44  	poll_table		table;
45  	wait_queue_head_t	*wqh;
46  	wait_queue_entry_t	wait;
47  	struct vhost_work	work;
48  	__poll_t		mask;
49  	struct vhost_dev	*dev;
50  	struct vhost_virtqueue	*vq;
51  };
52  
53  void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
54  		     __poll_t mask, struct vhost_dev *dev,
55  		     struct vhost_virtqueue *vq);
56  int vhost_poll_start(struct vhost_poll *poll, struct file *file);
57  void vhost_poll_stop(struct vhost_poll *poll);
58  void vhost_poll_queue(struct vhost_poll *poll);
59  
60  void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
61  void vhost_dev_flush(struct vhost_dev *dev);
62  
63  struct vhost_log {
64  	u64 addr;
65  	u64 len;
66  };
67  
68  enum vhost_uaddr_type {
69  	VHOST_ADDR_DESC = 0,
70  	VHOST_ADDR_AVAIL = 1,
71  	VHOST_ADDR_USED = 2,
72  	VHOST_NUM_ADDRS = 3,
73  };
74  
75  struct vhost_vring_call {
76  	struct eventfd_ctx *ctx;
77  	struct irq_bypass_producer producer;
78  };
79  
80  /* The virtqueue structure describes a queue attached to a device. */
81  struct vhost_virtqueue {
82  	struct vhost_dev *dev;
83  	struct vhost_worker __rcu *worker;
84  
85  	/* The actual ring of buffers. */
86  	struct mutex mutex;
87  	unsigned int num;
88  	vring_desc_t __user *desc;
89  	vring_avail_t __user *avail;
90  	vring_used_t __user *used;
91  	const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
92  	struct file *kick;
93  	struct vhost_vring_call call_ctx;
94  	struct eventfd_ctx *error_ctx;
95  	struct eventfd_ctx *log_ctx;
96  
97  	struct vhost_poll poll;
98  
99  	/* The routine to call when the Guest pings us, or timeout. */
100  	vhost_work_fn_t handle_kick;
101  
102  	/* Last available index we saw.
103  	 * Values are limited to 0x7fff, and the high bit is used as
104  	 * a wrap counter when using VIRTIO_F_RING_PACKED. */
105  	u16 last_avail_idx;
106  
107  	/* Caches available index value from user. */
108  	u16 avail_idx;
109  
110  	/* Last index we used.
111  	 * Values are limited to 0x7fff, and the high bit is used as
112  	 * a wrap counter when using VIRTIO_F_RING_PACKED. */
113  	u16 last_used_idx;
114  
115  	/* Used flags */
116  	u16 used_flags;
117  
118  	/* Last used index value we have signalled on */
119  	u16 signalled_used;
120  
121  	/* Last used index value we have signalled on */
122  	bool signalled_used_valid;
123  
124  	/* Log writes to used structure. */
125  	bool log_used;
126  	u64 log_addr;
127  
128  	struct iovec iov[UIO_MAXIOV];
129  	struct iovec iotlb_iov[64];
130  	struct iovec *indirect;
131  	struct vring_used_elem *heads;
132  	/* Protected by virtqueue mutex. */
133  	struct vhost_iotlb *umem;
134  	struct vhost_iotlb *iotlb;
135  	void *private_data;
136  	u64 acked_features;
137  	u64 acked_backend_features;
138  	/* Log write descriptors */
139  	void __user *log_base;
140  	struct vhost_log *log;
141  	struct iovec log_iov[64];
142  
143  	/* Ring endianness. Defaults to legacy native endianness.
144  	 * Set to true when starting a modern virtio device. */
145  	bool is_le;
146  #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
147  	/* Ring endianness requested by userspace for cross-endian support. */
148  	bool user_be;
149  #endif
150  	u32 busyloop_timeout;
151  };
152  
153  struct vhost_msg_node {
154    union {
155  	  struct vhost_msg msg;
156  	  struct vhost_msg_v2 msg_v2;
157    };
158    struct vhost_virtqueue *vq;
159    struct list_head node;
160  };
161  
162  struct vhost_dev {
163  	struct mm_struct *mm;
164  	struct mutex mutex;
165  	struct vhost_virtqueue **vqs;
166  	int nvqs;
167  	struct eventfd_ctx *log_ctx;
168  	struct vhost_iotlb *umem;
169  	struct vhost_iotlb *iotlb;
170  	spinlock_t iotlb_lock;
171  	struct list_head read_list;
172  	struct list_head pending_list;
173  	wait_queue_head_t wait;
174  	int iov_limit;
175  	int weight;
176  	int byte_weight;
177  	struct xarray worker_xa;
178  	bool use_worker;
179  	int (*msg_handler)(struct vhost_dev *dev, u32 asid,
180  			   struct vhost_iotlb_msg *msg);
181  };
182  
183  bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
184  void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
185  		    int nvqs, int iov_limit, int weight, int byte_weight,
186  		    bool use_worker,
187  		    int (*msg_handler)(struct vhost_dev *dev, u32 asid,
188  				       struct vhost_iotlb_msg *msg));
189  long vhost_dev_set_owner(struct vhost_dev *dev);
190  bool vhost_dev_has_owner(struct vhost_dev *dev);
191  long vhost_dev_check_owner(struct vhost_dev *);
192  struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
193  void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
194  void vhost_dev_cleanup(struct vhost_dev *);
195  void vhost_dev_stop(struct vhost_dev *);
196  long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
197  long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
198  long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
199  			void __user *argp);
200  bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
201  bool vhost_log_access_ok(struct vhost_dev *);
202  void vhost_clear_msg(struct vhost_dev *dev);
203  
204  int vhost_get_vq_desc(struct vhost_virtqueue *,
205  		      struct iovec iov[], unsigned int iov_size,
206  		      unsigned int *out_num, unsigned int *in_num,
207  		      struct vhost_log *log, unsigned int *log_num);
208  void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
209  
210  bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work);
211  bool vhost_vq_has_work(struct vhost_virtqueue *vq);
212  bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
213  int vhost_vq_init_access(struct vhost_virtqueue *);
214  int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
215  int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
216  		     unsigned count);
217  void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
218  			       unsigned int id, int len);
219  void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
220  			       struct vring_used_elem *heads, unsigned count);
221  void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
222  void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
223  bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
224  bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
225  
226  int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
227  		    unsigned int log_num, u64 len,
228  		    struct iovec *iov, int count);
229  int vq_meta_prefetch(struct vhost_virtqueue *vq);
230  
231  struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
232  void vhost_enqueue_msg(struct vhost_dev *dev,
233  		       struct list_head *head,
234  		       struct vhost_msg_node *node);
235  struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
236  					 struct list_head *head);
237  void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
238  
239  __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
240  			    poll_table *wait);
241  ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
242  			    int noblock);
243  ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
244  			     struct iov_iter *from);
245  int vhost_init_device_iotlb(struct vhost_dev *d);
246  
247  void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
248  			  struct vhost_iotlb_map *map);
249  
250  #define vq_err(vq, fmt, ...) do {                                  \
251  		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
252  		if ((vq)->error_ctx)                               \
253  				eventfd_signal((vq)->error_ctx);\
254  	} while (0)
255  
256  enum {
257  	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
258  			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
259  			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
260  			 (1ULL << VHOST_F_LOG_ALL) |
261  			 (1ULL << VIRTIO_F_ANY_LAYOUT) |
262  			 (1ULL << VIRTIO_F_VERSION_1)
263  };
264  
265  /**
266   * vhost_vq_set_backend - Set backend.
267   *
268   * @vq            Virtqueue.
269   * @private_data  The private data.
270   *
271   * Context: Need to call with vq->mutex acquired.
272   */
vhost_vq_set_backend(struct vhost_virtqueue * vq,void * private_data)273  static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
274  					void *private_data)
275  {
276  	vq->private_data = private_data;
277  }
278  
279  /**
280   * vhost_vq_get_backend - Get backend.
281   *
282   * @vq            Virtqueue.
283   *
284   * Context: Need to call with vq->mutex acquired.
285   * Return: Private data previously set with vhost_vq_set_backend.
286   */
vhost_vq_get_backend(struct vhost_virtqueue * vq)287  static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
288  {
289  	return vq->private_data;
290  }
291  
vhost_has_feature(struct vhost_virtqueue * vq,int bit)292  static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
293  {
294  	return vq->acked_features & (1ULL << bit);
295  }
296  
vhost_backend_has_feature(struct vhost_virtqueue * vq,int bit)297  static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
298  {
299  	return vq->acked_backend_features & (1ULL << bit);
300  }
301  
302  #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
vhost_is_little_endian(struct vhost_virtqueue * vq)303  static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
304  {
305  	return vq->is_le;
306  }
307  #else
vhost_is_little_endian(struct vhost_virtqueue * vq)308  static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
309  {
310  	return virtio_legacy_is_little_endian() || vq->is_le;
311  }
312  #endif
313  
314  /* Memory accessors */
vhost16_to_cpu(struct vhost_virtqueue * vq,__virtio16 val)315  static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
316  {
317  	return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
318  }
319  
cpu_to_vhost16(struct vhost_virtqueue * vq,u16 val)320  static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
321  {
322  	return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
323  }
324  
vhost32_to_cpu(struct vhost_virtqueue * vq,__virtio32 val)325  static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
326  {
327  	return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
328  }
329  
cpu_to_vhost32(struct vhost_virtqueue * vq,u32 val)330  static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
331  {
332  	return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
333  }
334  
vhost64_to_cpu(struct vhost_virtqueue * vq,__virtio64 val)335  static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
336  {
337  	return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
338  }
339  
cpu_to_vhost64(struct vhost_virtqueue * vq,u64 val)340  static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
341  {
342  	return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
343  }
344  #endif
345