1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
4   */
5  
6  #include <linux/vfio.h>
7  #include <linux/cdx/cdx_bus.h>
8  
9  #include "private.h"
10  
vfio_cdx_open_device(struct vfio_device * core_vdev)11  static int vfio_cdx_open_device(struct vfio_device *core_vdev)
12  {
13  	struct vfio_cdx_device *vdev =
14  		container_of(core_vdev, struct vfio_cdx_device, vdev);
15  	struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
16  	int count = cdx_dev->res_count;
17  	int i, ret;
18  
19  	vdev->regions = kcalloc(count, sizeof(struct vfio_cdx_region),
20  				GFP_KERNEL_ACCOUNT);
21  	if (!vdev->regions)
22  		return -ENOMEM;
23  
24  	for (i = 0; i < count; i++) {
25  		struct resource *res = &cdx_dev->res[i];
26  
27  		vdev->regions[i].addr = res->start;
28  		vdev->regions[i].size = resource_size(res);
29  		vdev->regions[i].type = res->flags;
30  		/*
31  		 * Only regions addressed with PAGE granularity may be
32  		 * MMAP'ed securely.
33  		 */
34  		if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
35  		    !(vdev->regions[i].size & ~PAGE_MASK))
36  			vdev->regions[i].flags |=
37  					VFIO_REGION_INFO_FLAG_MMAP;
38  		vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
39  		if (!(cdx_dev->res[i].flags & IORESOURCE_READONLY))
40  			vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
41  	}
42  	ret = cdx_dev_reset(core_vdev->dev);
43  	if (ret) {
44  		kfree(vdev->regions);
45  		vdev->regions = NULL;
46  		return ret;
47  	}
48  	ret = cdx_clear_master(cdx_dev);
49  	if (ret)
50  		vdev->flags &= ~BME_SUPPORT;
51  	else
52  		vdev->flags |= BME_SUPPORT;
53  
54  	return 0;
55  }
56  
vfio_cdx_close_device(struct vfio_device * core_vdev)57  static void vfio_cdx_close_device(struct vfio_device *core_vdev)
58  {
59  	struct vfio_cdx_device *vdev =
60  		container_of(core_vdev, struct vfio_cdx_device, vdev);
61  
62  	kfree(vdev->regions);
63  	cdx_dev_reset(core_vdev->dev);
64  	vfio_cdx_irqs_cleanup(vdev);
65  }
66  
vfio_cdx_bm_ctrl(struct vfio_device * core_vdev,u32 flags,void __user * arg,size_t argsz)67  static int vfio_cdx_bm_ctrl(struct vfio_device *core_vdev, u32 flags,
68  			    void __user *arg, size_t argsz)
69  {
70  	size_t minsz =
71  		offsetofend(struct vfio_device_feature_bus_master, op);
72  	struct vfio_cdx_device *vdev =
73  		container_of(core_vdev, struct vfio_cdx_device, vdev);
74  	struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
75  	struct vfio_device_feature_bus_master ops;
76  	int ret;
77  
78  	if (!(vdev->flags & BME_SUPPORT))
79  		return -ENOTTY;
80  
81  	ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
82  				 sizeof(ops));
83  	if (ret != 1)
84  		return ret;
85  
86  	if (copy_from_user(&ops, arg, minsz))
87  		return -EFAULT;
88  
89  	switch (ops.op) {
90  	case VFIO_DEVICE_FEATURE_CLEAR_MASTER:
91  		return cdx_clear_master(cdx_dev);
92  	case VFIO_DEVICE_FEATURE_SET_MASTER:
93  		return cdx_set_master(cdx_dev);
94  	default:
95  		return -EINVAL;
96  	}
97  }
98  
vfio_cdx_ioctl_feature(struct vfio_device * device,u32 flags,void __user * arg,size_t argsz)99  static int vfio_cdx_ioctl_feature(struct vfio_device *device, u32 flags,
100  				  void __user *arg, size_t argsz)
101  {
102  	switch (flags & VFIO_DEVICE_FEATURE_MASK) {
103  	case VFIO_DEVICE_FEATURE_BUS_MASTER:
104  		return vfio_cdx_bm_ctrl(device, flags, arg, argsz);
105  	default:
106  		return -ENOTTY;
107  	}
108  }
109  
vfio_cdx_ioctl_get_info(struct vfio_cdx_device * vdev,struct vfio_device_info __user * arg)110  static int vfio_cdx_ioctl_get_info(struct vfio_cdx_device *vdev,
111  				   struct vfio_device_info __user *arg)
112  {
113  	unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
114  	struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
115  	struct vfio_device_info info;
116  
117  	if (copy_from_user(&info, arg, minsz))
118  		return -EFAULT;
119  
120  	if (info.argsz < minsz)
121  		return -EINVAL;
122  
123  	info.flags = VFIO_DEVICE_FLAGS_CDX;
124  	info.flags |= VFIO_DEVICE_FLAGS_RESET;
125  
126  	info.num_regions = cdx_dev->res_count;
127  	info.num_irqs = cdx_dev->num_msi ? 1 : 0;
128  
129  	return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
130  }
131  
vfio_cdx_ioctl_get_region_info(struct vfio_cdx_device * vdev,struct vfio_region_info __user * arg)132  static int vfio_cdx_ioctl_get_region_info(struct vfio_cdx_device *vdev,
133  					  struct vfio_region_info __user *arg)
134  {
135  	unsigned long minsz = offsetofend(struct vfio_region_info, offset);
136  	struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
137  	struct vfio_region_info info;
138  
139  	if (copy_from_user(&info, arg, minsz))
140  		return -EFAULT;
141  
142  	if (info.argsz < minsz)
143  		return -EINVAL;
144  
145  	if (info.index >= cdx_dev->res_count)
146  		return -EINVAL;
147  
148  	/* map offset to the physical address */
149  	info.offset = vfio_cdx_index_to_offset(info.index);
150  	info.size = vdev->regions[info.index].size;
151  	info.flags = vdev->regions[info.index].flags;
152  
153  	return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
154  }
155  
vfio_cdx_ioctl_get_irq_info(struct vfio_cdx_device * vdev,struct vfio_irq_info __user * arg)156  static int vfio_cdx_ioctl_get_irq_info(struct vfio_cdx_device *vdev,
157  				       struct vfio_irq_info __user *arg)
158  {
159  	unsigned long minsz = offsetofend(struct vfio_irq_info, count);
160  	struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
161  	struct vfio_irq_info info;
162  
163  	if (copy_from_user(&info, arg, minsz))
164  		return -EFAULT;
165  
166  	if (info.argsz < minsz)
167  		return -EINVAL;
168  
169  	if (info.index >= 1)
170  		return -EINVAL;
171  
172  	if (!cdx_dev->num_msi)
173  		return -EINVAL;
174  
175  	info.flags = VFIO_IRQ_INFO_EVENTFD | VFIO_IRQ_INFO_NORESIZE;
176  	info.count = cdx_dev->num_msi;
177  
178  	return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
179  }
180  
vfio_cdx_ioctl_set_irqs(struct vfio_cdx_device * vdev,struct vfio_irq_set __user * arg)181  static int vfio_cdx_ioctl_set_irqs(struct vfio_cdx_device *vdev,
182  				   struct vfio_irq_set __user *arg)
183  {
184  	unsigned long minsz = offsetofend(struct vfio_irq_set, count);
185  	struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
186  	struct vfio_irq_set hdr;
187  	size_t data_size = 0;
188  	u8 *data = NULL;
189  	int ret = 0;
190  
191  	if (copy_from_user(&hdr, arg, minsz))
192  		return -EFAULT;
193  
194  	ret = vfio_set_irqs_validate_and_prepare(&hdr, cdx_dev->num_msi,
195  						 1, &data_size);
196  	if (ret)
197  		return ret;
198  
199  	if (data_size) {
200  		data = memdup_user(arg->data, data_size);
201  		if (IS_ERR(data))
202  			return PTR_ERR(data);
203  	}
204  
205  	ret = vfio_cdx_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
206  				      hdr.start, hdr.count, data);
207  	kfree(data);
208  
209  	return ret;
210  }
211  
vfio_cdx_ioctl(struct vfio_device * core_vdev,unsigned int cmd,unsigned long arg)212  static long vfio_cdx_ioctl(struct vfio_device *core_vdev,
213  			   unsigned int cmd, unsigned long arg)
214  {
215  	struct vfio_cdx_device *vdev =
216  		container_of(core_vdev, struct vfio_cdx_device, vdev);
217  	void __user *uarg = (void __user *)arg;
218  
219  	switch (cmd) {
220  	case VFIO_DEVICE_GET_INFO:
221  		return vfio_cdx_ioctl_get_info(vdev, uarg);
222  	case VFIO_DEVICE_GET_REGION_INFO:
223  		return vfio_cdx_ioctl_get_region_info(vdev, uarg);
224  	case VFIO_DEVICE_GET_IRQ_INFO:
225  		return vfio_cdx_ioctl_get_irq_info(vdev, uarg);
226  	case VFIO_DEVICE_SET_IRQS:
227  		return vfio_cdx_ioctl_set_irqs(vdev, uarg);
228  	case VFIO_DEVICE_RESET:
229  		return cdx_dev_reset(core_vdev->dev);
230  	default:
231  		return -ENOTTY;
232  	}
233  }
234  
vfio_cdx_mmap_mmio(struct vfio_cdx_region region,struct vm_area_struct * vma)235  static int vfio_cdx_mmap_mmio(struct vfio_cdx_region region,
236  			      struct vm_area_struct *vma)
237  {
238  	u64 size = vma->vm_end - vma->vm_start;
239  	u64 pgoff, base;
240  
241  	pgoff = vma->vm_pgoff &
242  		((1U << (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
243  	base = pgoff << PAGE_SHIFT;
244  
245  	if (base + size > region.size)
246  		return -EINVAL;
247  
248  	vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
249  	vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
250  
251  	return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
252  				  size, vma->vm_page_prot);
253  }
254  
vfio_cdx_mmap(struct vfio_device * core_vdev,struct vm_area_struct * vma)255  static int vfio_cdx_mmap(struct vfio_device *core_vdev,
256  			 struct vm_area_struct *vma)
257  {
258  	struct vfio_cdx_device *vdev =
259  		container_of(core_vdev, struct vfio_cdx_device, vdev);
260  	struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
261  	unsigned int index;
262  
263  	index = vma->vm_pgoff >> (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT);
264  
265  	if (index >= cdx_dev->res_count)
266  		return -EINVAL;
267  
268  	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
269  		return -EINVAL;
270  
271  	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ) &&
272  	    (vma->vm_flags & VM_READ))
273  		return -EPERM;
274  
275  	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE) &&
276  	    (vma->vm_flags & VM_WRITE))
277  		return -EPERM;
278  
279  	return vfio_cdx_mmap_mmio(vdev->regions[index], vma);
280  }
281  
282  static const struct vfio_device_ops vfio_cdx_ops = {
283  	.name		= "vfio-cdx",
284  	.open_device	= vfio_cdx_open_device,
285  	.close_device	= vfio_cdx_close_device,
286  	.ioctl		= vfio_cdx_ioctl,
287  	.device_feature = vfio_cdx_ioctl_feature,
288  	.mmap		= vfio_cdx_mmap,
289  	.bind_iommufd	= vfio_iommufd_physical_bind,
290  	.unbind_iommufd	= vfio_iommufd_physical_unbind,
291  	.attach_ioas	= vfio_iommufd_physical_attach_ioas,
292  };
293  
vfio_cdx_probe(struct cdx_device * cdx_dev)294  static int vfio_cdx_probe(struct cdx_device *cdx_dev)
295  {
296  	struct vfio_cdx_device *vdev;
297  	struct device *dev = &cdx_dev->dev;
298  	int ret;
299  
300  	vdev = vfio_alloc_device(vfio_cdx_device, vdev, dev,
301  				 &vfio_cdx_ops);
302  	if (IS_ERR(vdev))
303  		return PTR_ERR(vdev);
304  
305  	ret = vfio_register_group_dev(&vdev->vdev);
306  	if (ret)
307  		goto out_uninit;
308  
309  	dev_set_drvdata(dev, vdev);
310  	return 0;
311  
312  out_uninit:
313  	vfio_put_device(&vdev->vdev);
314  	return ret;
315  }
316  
vfio_cdx_remove(struct cdx_device * cdx_dev)317  static int vfio_cdx_remove(struct cdx_device *cdx_dev)
318  {
319  	struct device *dev = &cdx_dev->dev;
320  	struct vfio_cdx_device *vdev = dev_get_drvdata(dev);
321  
322  	vfio_unregister_group_dev(&vdev->vdev);
323  	vfio_put_device(&vdev->vdev);
324  
325  	return 0;
326  }
327  
328  static const struct cdx_device_id vfio_cdx_table[] = {
329  	{ CDX_DEVICE_DRIVER_OVERRIDE(CDX_ANY_ID, CDX_ANY_ID,
330  				     CDX_ID_F_VFIO_DRIVER_OVERRIDE) }, /* match all by default */
331  	{}
332  };
333  
334  MODULE_DEVICE_TABLE(cdx, vfio_cdx_table);
335  
336  static struct cdx_driver vfio_cdx_driver = {
337  	.probe		= vfio_cdx_probe,
338  	.remove		= vfio_cdx_remove,
339  	.match_id_table	= vfio_cdx_table,
340  	.driver	= {
341  		.name	= "vfio-cdx",
342  	},
343  	.driver_managed_dma = true,
344  };
345  
346  module_driver(vfio_cdx_driver, cdx_driver_register, cdx_driver_unregister);
347  
348  MODULE_LICENSE("GPL");
349  MODULE_DESCRIPTION("VFIO for CDX devices - User Level meta-driver");
350  MODULE_IMPORT_NS(CDX_BUS);
351