1  /* SPDX-License-Identifier: GPL-2.0-or-later */
2  /*
3   * Linux host-side vring helpers; for when the kernel needs to access
4   * someone else's vring.
5   *
6   * Copyright IBM Corporation, 2013.
7   * Parts taken from drivers/vhost/vhost.c Copyright 2009 Red Hat, Inc.
8   *
9   * Written by: Rusty Russell <rusty@rustcorp.com.au>
10   */
11  #ifndef _LINUX_VRINGH_H
12  #define _LINUX_VRINGH_H
13  #include <uapi/linux/virtio_ring.h>
14  #include <linux/virtio_byteorder.h>
15  #include <linux/uio.h>
16  #include <linux/slab.h>
17  #include <linux/spinlock.h>
18  #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
19  #include <linux/dma-direction.h>
20  #include <linux/vhost_iotlb.h>
21  #endif
22  #include <asm/barrier.h>
23  
24  /* virtio_ring with information needed for host access. */
25  struct vringh {
26  	/* Everything is little endian */
27  	bool little_endian;
28  
29  	/* Guest publishes used event idx (note: we always do). */
30  	bool event_indices;
31  
32  	/* Can we get away with weak barriers? */
33  	bool weak_barriers;
34  
35  	/* Use user's VA */
36  	bool use_va;
37  
38  	/* Last available index we saw (ie. where we're up to). */
39  	u16 last_avail_idx;
40  
41  	/* Last index we used. */
42  	u16 last_used_idx;
43  
44  	/* How many descriptors we've completed since last need_notify(). */
45  	u32 completed;
46  
47  	/* The vring (note: it may contain user pointers!) */
48  	struct vring vring;
49  
50  	/* IOTLB for this vring */
51  	struct vhost_iotlb *iotlb;
52  
53  	/* spinlock to synchronize IOTLB accesses */
54  	spinlock_t *iotlb_lock;
55  
56  	/* The function to call to notify the guest about added buffers */
57  	void (*notify)(struct vringh *);
58  };
59  
60  struct virtio_device;
61  typedef void vrh_callback_t(struct virtio_device *, struct vringh *);
62  
63  /**
64   * struct vringh_config_ops - ops for creating a host vring from a virtio driver
65   * @find_vrhs: find the host vrings and instantiate them
66   *	vdev: the virtio_device
67   *	nhvrs: the number of host vrings to find
68   *	hvrs: on success, includes new host vrings
69   *	callbacks: array of driver callbacks, for each host vring
70   *		include a NULL entry for vqs that do not need a callback
71   *	Returns 0 on success or error status
72   * @del_vrhs: free the host vrings found by find_vrhs().
73   */
74  struct vringh_config_ops {
75  	int (*find_vrhs)(struct virtio_device *vdev, unsigned nhvrs,
76  			 struct vringh *vrhs[], vrh_callback_t *callbacks[]);
77  	void (*del_vrhs)(struct virtio_device *vdev);
78  };
79  
80  /* The memory the vring can access, and what offset to apply. */
81  struct vringh_range {
82  	u64 start, end_incl;
83  	u64 offset;
84  };
85  
86  /**
87   * struct vringh_iov - iovec mangler.
88   * @iov: array of iovecs to operate on
89   * @consumed: number of bytes consumed within iov[i]
90   * @i: index of current iovec
91   * @used: number of iovecs present in @iov
92   * @max_num: maximum number of iovecs.
93   *           corresponds to allocated memory of @iov
94   *
95   * Mangles iovec in place, and restores it.
96   * Remaining data is iov + i, of used - i elements.
97   */
98  struct vringh_iov {
99  	struct iovec *iov;
100  	size_t consumed; /* Within iov[i] */
101  	unsigned i, used, max_num;
102  };
103  
104  /**
105   * struct vringh_kiov - kvec mangler.
106   * @iov: array of iovecs to operate on
107   * @consumed: number of bytes consumed within iov[i]
108   * @i: index of current iovec
109   * @used: number of iovecs present in @iov
110   * @max_num: maximum number of iovecs.
111   *           corresponds to allocated memory of @iov
112   *
113   * Mangles kvec in place, and restores it.
114   * Remaining data is iov + i, of used - i elements.
115   */
116  struct vringh_kiov {
117  	struct kvec *iov;
118  	size_t consumed; /* Within iov[i] */
119  	unsigned i, used, max_num;
120  };
121  
122  /* Flag on max_num to indicate we're kmalloced. */
123  #define VRINGH_IOV_ALLOCATED 0x8000000
124  
125  /* Helpers for userspace vrings. */
126  int vringh_init_user(struct vringh *vrh, u64 features,
127  		     unsigned int num, bool weak_barriers,
128  		     vring_desc_t __user *desc,
129  		     vring_avail_t __user *avail,
130  		     vring_used_t __user *used);
131  
vringh_iov_init(struct vringh_iov * iov,struct iovec * iovec,unsigned num)132  static inline void vringh_iov_init(struct vringh_iov *iov,
133  				   struct iovec *iovec, unsigned num)
134  {
135  	iov->used = iov->i = 0;
136  	iov->consumed = 0;
137  	iov->max_num = num;
138  	iov->iov = iovec;
139  }
140  
vringh_iov_reset(struct vringh_iov * iov)141  static inline void vringh_iov_reset(struct vringh_iov *iov)
142  {
143  	iov->iov[iov->i].iov_len += iov->consumed;
144  	iov->iov[iov->i].iov_base -= iov->consumed;
145  	iov->consumed = 0;
146  	iov->i = 0;
147  }
148  
vringh_iov_cleanup(struct vringh_iov * iov)149  static inline void vringh_iov_cleanup(struct vringh_iov *iov)
150  {
151  	if (iov->max_num & VRINGH_IOV_ALLOCATED)
152  		kfree(iov->iov);
153  	iov->max_num = iov->used = iov->i = iov->consumed = 0;
154  	iov->iov = NULL;
155  }
156  
157  /* Convert a descriptor into iovecs. */
158  int vringh_getdesc_user(struct vringh *vrh,
159  			struct vringh_iov *riov,
160  			struct vringh_iov *wiov,
161  			bool (*getrange)(struct vringh *vrh,
162  					 u64 addr, struct vringh_range *r),
163  			u16 *head);
164  
165  /* Copy bytes from readable vsg, consuming it (and incrementing wiov->i). */
166  ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len);
167  
168  /* Copy bytes into writable vsg, consuming it (and incrementing wiov->i). */
169  ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
170  			     const void *src, size_t len);
171  
172  /* Mark a descriptor as used. */
173  int vringh_complete_user(struct vringh *vrh, u16 head, u32 len);
174  int vringh_complete_multi_user(struct vringh *vrh,
175  			       const struct vring_used_elem used[],
176  			       unsigned num_used);
177  
178  /* Pretend we've never seen descriptor (for easy error handling). */
179  void vringh_abandon_user(struct vringh *vrh, unsigned int num);
180  
181  /* Do we need to fire the eventfd to notify the other side? */
182  int vringh_need_notify_user(struct vringh *vrh);
183  
184  bool vringh_notify_enable_user(struct vringh *vrh);
185  void vringh_notify_disable_user(struct vringh *vrh);
186  
187  /* Helpers for kernelspace vrings. */
188  int vringh_init_kern(struct vringh *vrh, u64 features,
189  		     unsigned int num, bool weak_barriers,
190  		     struct vring_desc *desc,
191  		     struct vring_avail *avail,
192  		     struct vring_used *used);
193  
vringh_kiov_init(struct vringh_kiov * kiov,struct kvec * kvec,unsigned num)194  static inline void vringh_kiov_init(struct vringh_kiov *kiov,
195  				    struct kvec *kvec, unsigned num)
196  {
197  	kiov->used = kiov->i = 0;
198  	kiov->consumed = 0;
199  	kiov->max_num = num;
200  	kiov->iov = kvec;
201  }
202  
vringh_kiov_reset(struct vringh_kiov * kiov)203  static inline void vringh_kiov_reset(struct vringh_kiov *kiov)
204  {
205  	kiov->iov[kiov->i].iov_len += kiov->consumed;
206  	kiov->iov[kiov->i].iov_base -= kiov->consumed;
207  	kiov->consumed = 0;
208  	kiov->i = 0;
209  }
210  
vringh_kiov_cleanup(struct vringh_kiov * kiov)211  static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov)
212  {
213  	if (kiov->max_num & VRINGH_IOV_ALLOCATED)
214  		kfree(kiov->iov);
215  	kiov->max_num = kiov->used = kiov->i = kiov->consumed = 0;
216  	kiov->iov = NULL;
217  }
218  
vringh_kiov_length(struct vringh_kiov * kiov)219  static inline size_t vringh_kiov_length(struct vringh_kiov *kiov)
220  {
221  	size_t len = 0;
222  	int i;
223  
224  	for (i = kiov->i; i < kiov->used; i++)
225  		len += kiov->iov[i].iov_len;
226  
227  	return len;
228  }
229  
230  void vringh_kiov_advance(struct vringh_kiov *kiov, size_t len);
231  
232  int vringh_getdesc_kern(struct vringh *vrh,
233  			struct vringh_kiov *riov,
234  			struct vringh_kiov *wiov,
235  			u16 *head,
236  			gfp_t gfp);
237  
238  ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len);
239  ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
240  			     const void *src, size_t len);
241  void vringh_abandon_kern(struct vringh *vrh, unsigned int num);
242  int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len);
243  
244  bool vringh_notify_enable_kern(struct vringh *vrh);
245  void vringh_notify_disable_kern(struct vringh *vrh);
246  
247  int vringh_need_notify_kern(struct vringh *vrh);
248  
249  /* Notify the guest about buffers added to the used ring */
vringh_notify(struct vringh * vrh)250  static inline void vringh_notify(struct vringh *vrh)
251  {
252  	if (vrh->notify)
253  		vrh->notify(vrh);
254  }
255  
vringh_is_little_endian(const struct vringh * vrh)256  static inline bool vringh_is_little_endian(const struct vringh *vrh)
257  {
258  	return vrh->little_endian ||
259  		virtio_legacy_is_little_endian();
260  }
261  
vringh16_to_cpu(const struct vringh * vrh,__virtio16 val)262  static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
263  {
264  	return __virtio16_to_cpu(vringh_is_little_endian(vrh), val);
265  }
266  
cpu_to_vringh16(const struct vringh * vrh,u16 val)267  static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
268  {
269  	return __cpu_to_virtio16(vringh_is_little_endian(vrh), val);
270  }
271  
vringh32_to_cpu(const struct vringh * vrh,__virtio32 val)272  static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
273  {
274  	return __virtio32_to_cpu(vringh_is_little_endian(vrh), val);
275  }
276  
cpu_to_vringh32(const struct vringh * vrh,u32 val)277  static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
278  {
279  	return __cpu_to_virtio32(vringh_is_little_endian(vrh), val);
280  }
281  
vringh64_to_cpu(const struct vringh * vrh,__virtio64 val)282  static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
283  {
284  	return __virtio64_to_cpu(vringh_is_little_endian(vrh), val);
285  }
286  
cpu_to_vringh64(const struct vringh * vrh,u64 val)287  static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
288  {
289  	return __cpu_to_virtio64(vringh_is_little_endian(vrh), val);
290  }
291  
292  #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
293  
294  void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
295  		      spinlock_t *iotlb_lock);
296  
297  int vringh_init_iotlb(struct vringh *vrh, u64 features,
298  		      unsigned int num, bool weak_barriers,
299  		      struct vring_desc *desc,
300  		      struct vring_avail *avail,
301  		      struct vring_used *used);
302  
303  int vringh_init_iotlb_va(struct vringh *vrh, u64 features,
304  			 unsigned int num, bool weak_barriers,
305  			 struct vring_desc *desc,
306  			 struct vring_avail *avail,
307  			 struct vring_used *used);
308  
309  int vringh_getdesc_iotlb(struct vringh *vrh,
310  			 struct vringh_kiov *riov,
311  			 struct vringh_kiov *wiov,
312  			 u16 *head,
313  			 gfp_t gfp);
314  
315  ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
316  			      struct vringh_kiov *riov,
317  			      void *dst, size_t len);
318  ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
319  			      struct vringh_kiov *wiov,
320  			      const void *src, size_t len);
321  
322  void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num);
323  
324  int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len);
325  
326  bool vringh_notify_enable_iotlb(struct vringh *vrh);
327  void vringh_notify_disable_iotlb(struct vringh *vrh);
328  
329  int vringh_need_notify_iotlb(struct vringh *vrh);
330  
331  #endif /* CONFIG_VHOST_IOTLB */
332  
333  #endif /* _LINUX_VRINGH_H */
334