Lines Matching +full:virtio +full:- +full:iommu

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Intel Corporation.
20 #include <linux/iommu.h>
77 return as->id; in iotlb_to_asid()
82 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS]; in asid_to_as()
86 if (as->id == asid) in asid_to_as()
99 return &as->iotlb; in asid_to_iotlb()
104 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS]; in vhost_vdpa_alloc_as()
110 if (asid >= v->vdpa->nas) in vhost_vdpa_alloc_as()
117 vhost_iotlb_init(&as->iotlb, 0, 0); in vhost_vdpa_alloc_as()
118 as->id = asid; in vhost_vdpa_alloc_as()
119 hlist_add_head(&as->hash_link, head); in vhost_vdpa_alloc_as()
137 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_reset_map()
138 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_reset_map()
140 if (ops->reset_map) in vhost_vdpa_reset_map()
141 ops->reset_map(vdpa, asid); in vhost_vdpa_reset_map()
149 return -EINVAL; in vhost_vdpa_remove_as()
151 hlist_del(&as->hash_link); in vhost_vdpa_remove_as()
152 vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid); in vhost_vdpa_remove_as()
154 * Devices with vendor specific IOMMU may need to restore in vhost_vdpa_remove_as()
170 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev); in handle_vq_kick()
171 const struct vdpa_config_ops *ops = v->vdpa->config; in handle_vq_kick()
173 ops->kick_vq(v->vdpa, vq - v->vqs); in handle_vq_kick()
179 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx; in vhost_vdpa_virtqueue_cb()
190 struct eventfd_ctx *config_ctx = v->config_ctx; in vhost_vdpa_config_cb()
200 struct vhost_virtqueue *vq = &v->vqs[qid]; in vhost_vdpa_setup_vq_irq()
201 const struct vdpa_config_ops *ops = v->vdpa->config; in vhost_vdpa_setup_vq_irq()
202 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_setup_vq_irq()
205 if (!ops->get_vq_irq) in vhost_vdpa_setup_vq_irq()
208 irq = ops->get_vq_irq(vdpa, qid); in vhost_vdpa_setup_vq_irq()
212 if (!vq->call_ctx.ctx) in vhost_vdpa_setup_vq_irq()
215 vq->call_ctx.producer.irq = irq; in vhost_vdpa_setup_vq_irq()
216 ret = irq_bypass_register_producer(&vq->call_ctx.producer); in vhost_vdpa_setup_vq_irq()
218 dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n", in vhost_vdpa_setup_vq_irq()
219 qid, vq->call_ctx.producer.token, ret); in vhost_vdpa_setup_vq_irq()
224 struct vhost_virtqueue *vq = &v->vqs[qid]; in vhost_vdpa_unsetup_vq_irq()
226 irq_bypass_unregister_producer(&vq->call_ctx.producer); in vhost_vdpa_unsetup_vq_irq()
231 struct vdpa_device *vdpa = v->vdpa; in _compat_vdpa_reset()
234 v->suspended = false; in _compat_vdpa_reset()
236 if (v->vdev.vqs) { in _compat_vdpa_reset()
237 flags |= !vhost_backend_has_feature(v->vdev.vqs[0], in _compat_vdpa_reset()
247 v->in_batch = 0; in vhost_vdpa_reset()
253 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_bind_mm()
254 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_bind_mm()
256 if (!vdpa->use_va || !ops->bind_mm) in vhost_vdpa_bind_mm()
259 return ops->bind_mm(vdpa, v->vdev.mm); in vhost_vdpa_bind_mm()
264 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_unbind_mm()
265 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_unbind_mm()
267 if (!vdpa->use_va || !ops->unbind_mm) in vhost_vdpa_unbind_mm()
270 ops->unbind_mm(vdpa); in vhost_vdpa_unbind_mm()
275 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_get_device_id()
276 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_get_device_id()
279 device_id = ops->get_device_id(vdpa); in vhost_vdpa_get_device_id()
282 return -EFAULT; in vhost_vdpa_get_device_id()
289 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_get_status()
290 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_get_status()
293 status = ops->get_status(vdpa); in vhost_vdpa_get_status()
296 return -EFAULT; in vhost_vdpa_get_status()
303 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_set_status()
304 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_set_status()
306 u32 nvqs = v->nvqs; in vhost_vdpa_set_status()
311 return -EFAULT; in vhost_vdpa_set_status()
313 status_old = ops->get_status(vdpa); in vhost_vdpa_set_status()
320 return -EINVAL; in vhost_vdpa_set_status()
343 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_config_validate()
344 size_t size = vdpa->config->get_config_size(vdpa); in vhost_vdpa_config_validate()
346 if (c->len == 0 || c->off > size) in vhost_vdpa_config_validate()
347 return -EINVAL; in vhost_vdpa_config_validate()
349 if (c->len > size - c->off) in vhost_vdpa_config_validate()
350 return -E2BIG; in vhost_vdpa_config_validate()
358 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_get_config()
364 return -EFAULT; in vhost_vdpa_get_config()
366 return -EINVAL; in vhost_vdpa_get_config()
369 return -ENOMEM; in vhost_vdpa_get_config()
373 if (copy_to_user(c->buf, buf, config.len)) { in vhost_vdpa_get_config()
375 return -EFAULT; in vhost_vdpa_get_config()
385 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_set_config()
391 return -EFAULT; in vhost_vdpa_set_config()
393 return -EINVAL; in vhost_vdpa_set_config()
395 buf = vmemdup_user(c->buf, config.len); in vhost_vdpa_set_config()
407 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_can_suspend()
408 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_can_suspend()
410 return ops->suspend; in vhost_vdpa_can_suspend()
415 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_can_resume()
416 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_can_resume()
418 return ops->resume; in vhost_vdpa_can_resume()
423 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_has_desc_group()
424 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_has_desc_group()
426 return ops->get_vq_desc_group; in vhost_vdpa_has_desc_group()
431 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_get_features()
432 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_get_features()
435 features = ops->get_device_features(vdpa); in vhost_vdpa_get_features()
438 return -EFAULT; in vhost_vdpa_get_features()
445 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_get_backend_features()
446 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_get_backend_features()
448 if (!ops->get_backend_features) in vhost_vdpa_get_backend_features()
451 return ops->get_backend_features(vdpa); in vhost_vdpa_get_backend_features()
456 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_has_persistent_map()
457 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_has_persistent_map()
459 return (!ops->set_map && !ops->dma_map) || ops->reset_map || in vhost_vdpa_has_persistent_map()
465 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_set_features()
466 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_set_features()
467 struct vhost_dev *d = &v->vdev; in vhost_vdpa_set_features()
476 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK) in vhost_vdpa_set_features()
477 return -EBUSY; in vhost_vdpa_set_features()
480 return -EFAULT; in vhost_vdpa_set_features()
483 return -EINVAL; in vhost_vdpa_set_features()
486 actual_features = ops->get_driver_features(vdpa); in vhost_vdpa_set_features()
487 for (i = 0; i < d->nvqs; ++i) { in vhost_vdpa_set_features()
488 struct vhost_virtqueue *vq = d->vqs[i]; in vhost_vdpa_set_features()
490 mutex_lock(&vq->mutex); in vhost_vdpa_set_features()
491 vq->acked_features = actual_features; in vhost_vdpa_set_features()
492 mutex_unlock(&vq->mutex); in vhost_vdpa_set_features()
500 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_get_vring_num()
501 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_get_vring_num()
504 num = ops->get_vq_num_max(vdpa); in vhost_vdpa_get_vring_num()
507 return -EFAULT; in vhost_vdpa_get_vring_num()
514 if (v->config_ctx) { in vhost_vdpa_config_put()
515 eventfd_ctx_put(v->config_ctx); in vhost_vdpa_config_put()
516 v->config_ctx = NULL; in vhost_vdpa_config_put()
529 return -EFAULT; in vhost_vdpa_set_config_call()
532 swap(ctx, v->config_ctx); in vhost_vdpa_set_config_call()
537 if (IS_ERR(v->config_ctx)) { in vhost_vdpa_set_config_call()
538 long ret = PTR_ERR(v->config_ctx); in vhost_vdpa_set_config_call()
540 v->config_ctx = NULL; in vhost_vdpa_set_config_call()
544 v->vdpa->config->set_config_cb(v->vdpa, &cb); in vhost_vdpa_set_config_call()
552 .first = v->range.first, in vhost_vdpa_get_iova_range()
553 .last = v->range.last, in vhost_vdpa_get_iova_range()
557 return -EFAULT; in vhost_vdpa_get_iova_range()
563 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_get_config_size()
564 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_get_config_size()
567 size = ops->get_config_size(vdpa); in vhost_vdpa_get_config_size()
570 return -EFAULT; in vhost_vdpa_get_config_size()
577 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_get_vqs_count()
579 if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs))) in vhost_vdpa_get_vqs_count()
580 return -EFAULT; in vhost_vdpa_get_vqs_count()
592 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_suspend()
593 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_suspend()
596 if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK)) in vhost_vdpa_suspend()
599 if (!ops->suspend) in vhost_vdpa_suspend()
600 return -EOPNOTSUPP; in vhost_vdpa_suspend()
602 ret = ops->suspend(vdpa); in vhost_vdpa_suspend()
604 v->suspended = true; in vhost_vdpa_suspend()
615 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_resume()
616 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_resume()
619 if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK)) in vhost_vdpa_resume()
622 if (!ops->resume) in vhost_vdpa_resume()
623 return -EOPNOTSUPP; in vhost_vdpa_resume()
625 ret = ops->resume(vdpa); in vhost_vdpa_resume()
627 v->suspended = false; in vhost_vdpa_resume()
635 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_vring_ioctl()
636 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_vring_ioctl()
648 if (idx >= v->nvqs) in vhost_vdpa_vring_ioctl()
649 return -ENOBUFS; in vhost_vdpa_vring_ioctl()
651 idx = array_index_nospec(idx, v->nvqs); in vhost_vdpa_vring_ioctl()
652 vq = &v->vqs[idx]; in vhost_vdpa_vring_ioctl()
657 return -EFAULT; in vhost_vdpa_vring_ioctl()
658 ops->set_vq_ready(vdpa, idx, s.num); in vhost_vdpa_vring_ioctl()
661 if (!ops->get_vq_group) in vhost_vdpa_vring_ioctl()
662 return -EOPNOTSUPP; in vhost_vdpa_vring_ioctl()
664 s.num = ops->get_vq_group(vdpa, idx); in vhost_vdpa_vring_ioctl()
665 if (s.num >= vdpa->ngroups) in vhost_vdpa_vring_ioctl()
666 return -EIO; in vhost_vdpa_vring_ioctl()
668 return -EFAULT; in vhost_vdpa_vring_ioctl()
672 return -EOPNOTSUPP; in vhost_vdpa_vring_ioctl()
674 s.num = ops->get_vq_desc_group(vdpa, idx); in vhost_vdpa_vring_ioctl()
675 if (s.num >= vdpa->ngroups) in vhost_vdpa_vring_ioctl()
676 return -EIO; in vhost_vdpa_vring_ioctl()
678 return -EFAULT; in vhost_vdpa_vring_ioctl()
682 return -EFAULT; in vhost_vdpa_vring_ioctl()
683 if (s.num >= vdpa->nas) in vhost_vdpa_vring_ioctl()
684 return -EINVAL; in vhost_vdpa_vring_ioctl()
685 if (!ops->set_group_asid) in vhost_vdpa_vring_ioctl()
686 return -EOPNOTSUPP; in vhost_vdpa_vring_ioctl()
687 return ops->set_group_asid(vdpa, idx, s.num); in vhost_vdpa_vring_ioctl()
689 if (!ops->get_vq_size) in vhost_vdpa_vring_ioctl()
690 return -EOPNOTSUPP; in vhost_vdpa_vring_ioctl()
692 s.num = ops->get_vq_size(vdpa, idx); in vhost_vdpa_vring_ioctl()
694 return -EFAULT; in vhost_vdpa_vring_ioctl()
697 r = ops->get_vq_state(v->vdpa, idx, &vq_state); in vhost_vdpa_vring_ioctl()
702 vq->last_avail_idx = vq_state.packed.last_avail_idx | in vhost_vdpa_vring_ioctl()
704 vq->last_used_idx = vq_state.packed.last_used_idx | in vhost_vdpa_vring_ioctl()
707 vq->last_avail_idx = vq_state.split.avail_index; in vhost_vdpa_vring_ioctl()
711 if (vq->call_ctx.ctx) { in vhost_vdpa_vring_ioctl()
712 if (ops->get_status(vdpa) & in vhost_vdpa_vring_ioctl()
715 vq->call_ctx.producer.token = NULL; in vhost_vdpa_vring_ioctl()
720 r = vhost_vring_ioctl(&v->vdev, cmd, argp); in vhost_vdpa_vring_ioctl()
726 if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended) in vhost_vdpa_vring_ioctl()
727 return -EINVAL; in vhost_vdpa_vring_ioctl()
729 if (ops->set_vq_address(vdpa, idx, in vhost_vdpa_vring_ioctl()
730 (u64)(uintptr_t)vq->desc, in vhost_vdpa_vring_ioctl()
731 (u64)(uintptr_t)vq->avail, in vhost_vdpa_vring_ioctl()
732 (u64)(uintptr_t)vq->used)) in vhost_vdpa_vring_ioctl()
733 r = -EINVAL; in vhost_vdpa_vring_ioctl()
737 if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended) in vhost_vdpa_vring_ioctl()
738 return -EINVAL; in vhost_vdpa_vring_ioctl()
741 vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff; in vhost_vdpa_vring_ioctl()
742 vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000); in vhost_vdpa_vring_ioctl()
743 vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff; in vhost_vdpa_vring_ioctl()
744 vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000); in vhost_vdpa_vring_ioctl()
746 vq_state.split.avail_index = vq->last_avail_idx; in vhost_vdpa_vring_ioctl()
748 r = ops->set_vq_state(vdpa, idx, &vq_state); in vhost_vdpa_vring_ioctl()
752 if (vq->call_ctx.ctx) { in vhost_vdpa_vring_ioctl()
755 cb.trigger = vq->call_ctx.ctx; in vhost_vdpa_vring_ioctl()
756 vq->call_ctx.producer.token = vq->call_ctx.ctx; in vhost_vdpa_vring_ioctl()
757 if (ops->get_status(vdpa) & in vhost_vdpa_vring_ioctl()
765 ops->set_vq_cb(vdpa, idx, &cb); in vhost_vdpa_vring_ioctl()
769 ops->set_vq_num(vdpa, idx, vq->num); in vhost_vdpa_vring_ioctl()
779 struct vhost_vdpa *v = filep->private_data; in vhost_vdpa_unlocked_ioctl()
780 struct vhost_dev *d = &v->vdev; in vhost_vdpa_unlocked_ioctl()
788 return -EFAULT; in vhost_vdpa_unlocked_ioctl()
795 return -EOPNOTSUPP; in vhost_vdpa_unlocked_ioctl()
798 return -EOPNOTSUPP; in vhost_vdpa_unlocked_ioctl()
801 return -EOPNOTSUPP; in vhost_vdpa_unlocked_ioctl()
804 return -EINVAL; in vhost_vdpa_unlocked_ioctl()
807 return -EOPNOTSUPP; in vhost_vdpa_unlocked_ioctl()
810 return -EOPNOTSUPP; in vhost_vdpa_unlocked_ioctl()
811 vhost_set_backend_features(&v->vdev, features); in vhost_vdpa_unlocked_ioctl()
815 mutex_lock(&d->mutex); in vhost_vdpa_unlocked_ioctl()
843 if (copy_to_user(argp, &v->vdpa->ngroups, in vhost_vdpa_unlocked_ioctl()
844 sizeof(v->vdpa->ngroups))) in vhost_vdpa_unlocked_ioctl()
845 r = -EFAULT; in vhost_vdpa_unlocked_ioctl()
848 if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas))) in vhost_vdpa_unlocked_ioctl()
849 r = -EFAULT; in vhost_vdpa_unlocked_ioctl()
853 r = -ENOIOCTLCMD; in vhost_vdpa_unlocked_ioctl()
870 r = -EFAULT; in vhost_vdpa_unlocked_ioctl()
888 r = vhost_dev_ioctl(&v->vdev, cmd, argp); in vhost_vdpa_unlocked_ioctl()
889 if (r == -ENOIOCTLCMD) in vhost_vdpa_unlocked_ioctl()
905 mutex_unlock(&d->mutex); in vhost_vdpa_unlocked_ioctl()
911 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_general_unmap()
912 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_general_unmap()
913 if (ops->dma_map) { in vhost_vdpa_general_unmap()
914 ops->dma_unmap(vdpa, asid, map->start, map->size); in vhost_vdpa_general_unmap()
915 } else if (ops->set_map == NULL) { in vhost_vdpa_general_unmap()
916 iommu_unmap(v->domain, map->start, map->size); in vhost_vdpa_general_unmap()
923 struct vhost_dev *dev = &v->vdev; in vhost_vdpa_pa_unmap()
929 pinned = PFN_DOWN(map->size); in vhost_vdpa_pa_unmap()
930 for (pfn = PFN_DOWN(map->addr); in vhost_vdpa_pa_unmap()
931 pinned > 0; pfn++, pinned--) { in vhost_vdpa_pa_unmap()
933 if (map->perm & VHOST_ACCESS_WO) in vhost_vdpa_pa_unmap()
937 atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm); in vhost_vdpa_pa_unmap()
950 map_file = (struct vdpa_map_file *)map->opaque; in vhost_vdpa_va_unmap()
951 fput(map_file->file); in vhost_vdpa_va_unmap()
962 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_iotlb_unmap()
964 if (vdpa->use_va) in vhost_vdpa_iotlb_unmap()
995 struct vhost_dev *dev = &v->vdev; in vhost_vdpa_map()
996 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_map()
997 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_map()
1001 r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1, in vhost_vdpa_map()
1006 if (ops->dma_map) { in vhost_vdpa_map()
1007 r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque); in vhost_vdpa_map()
1008 } else if (ops->set_map) { in vhost_vdpa_map()
1009 if (!v->in_batch) in vhost_vdpa_map()
1010 r = ops->set_map(vdpa, asid, iotlb); in vhost_vdpa_map()
1012 r = iommu_map(v->domain, iova, pa, size, in vhost_vdpa_map()
1017 vhost_iotlb_del_range(iotlb, iova, iova + size - 1); in vhost_vdpa_map()
1021 if (!vdpa->use_va) in vhost_vdpa_map()
1022 atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm); in vhost_vdpa_map()
1031 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_unmap()
1032 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_unmap()
1035 vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid); in vhost_vdpa_unmap()
1037 if (ops->set_map) { in vhost_vdpa_unmap()
1038 if (!v->in_batch) in vhost_vdpa_unmap()
1039 ops->set_map(vdpa, asid, iotlb); in vhost_vdpa_unmap()
1048 struct vhost_dev *dev = &v->vdev; in vhost_vdpa_va_map()
1054 mmap_read_lock(dev->mm); in vhost_vdpa_va_map()
1057 vma = find_vma(dev->mm, uaddr); in vhost_vdpa_va_map()
1059 ret = -EINVAL; in vhost_vdpa_va_map()
1062 map_size = min(size, vma->vm_end - uaddr); in vhost_vdpa_va_map()
1063 if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) && in vhost_vdpa_va_map()
1064 !(vma->vm_flags & (VM_IO | VM_PFNMAP)))) in vhost_vdpa_va_map()
1069 ret = -ENOMEM; in vhost_vdpa_va_map()
1072 offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start; in vhost_vdpa_va_map()
1073 map_file->offset = offset; in vhost_vdpa_va_map()
1074 map_file->file = get_file(vma->vm_file); in vhost_vdpa_va_map()
1078 fput(map_file->file); in vhost_vdpa_va_map()
1083 size -= map_size; in vhost_vdpa_va_map()
1088 vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova); in vhost_vdpa_va_map()
1090 mmap_read_unlock(dev->mm); in vhost_vdpa_va_map()
1099 struct vhost_dev *dev = &v->vdev; in vhost_vdpa_pa_map()
1112 return -ENOMEM; in vhost_vdpa_pa_map()
1119 ret = -EINVAL; in vhost_vdpa_pa_map()
1123 mmap_read_lock(dev->mm); in vhost_vdpa_pa_map()
1126 if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) { in vhost_vdpa_pa_map()
1127 ret = -ENOMEM; in vhost_vdpa_pa_map()
1144 ret = -ENOMEM; in vhost_vdpa_pa_map()
1159 csize = PFN_PHYS(last_pfn - map_pfn + 1); in vhost_vdpa_pa_map()
1173 pinned - i); in vhost_vdpa_pa_map()
1186 npages -= pinned; in vhost_vdpa_pa_map()
1190 ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1), in vhost_vdpa_pa_map()
1213 mmap_read_unlock(dev->mm); in vhost_vdpa_pa_map()
1224 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_process_iotlb_update()
1226 if (msg->iova < v->range.first || !msg->size || in vhost_vdpa_process_iotlb_update()
1227 msg->iova > U64_MAX - msg->size + 1 || in vhost_vdpa_process_iotlb_update()
1228 msg->iova + msg->size - 1 > v->range.last) in vhost_vdpa_process_iotlb_update()
1229 return -EINVAL; in vhost_vdpa_process_iotlb_update()
1231 if (vhost_iotlb_itree_first(iotlb, msg->iova, in vhost_vdpa_process_iotlb_update()
1232 msg->iova + msg->size - 1)) in vhost_vdpa_process_iotlb_update()
1233 return -EEXIST; in vhost_vdpa_process_iotlb_update()
1235 if (vdpa->use_va) in vhost_vdpa_process_iotlb_update()
1236 return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size, in vhost_vdpa_process_iotlb_update()
1237 msg->uaddr, msg->perm); in vhost_vdpa_process_iotlb_update()
1239 return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr, in vhost_vdpa_process_iotlb_update()
1240 msg->perm); in vhost_vdpa_process_iotlb_update()
1247 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_process_iotlb_msg()
1248 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_process_iotlb_msg()
1253 mutex_lock(&dev->mutex); in vhost_vdpa_process_iotlb_msg()
1259 if (msg->type == VHOST_IOTLB_UPDATE || in vhost_vdpa_process_iotlb_msg()
1260 msg->type == VHOST_IOTLB_BATCH_BEGIN) { in vhost_vdpa_process_iotlb_msg()
1263 dev_err(&v->dev, "can't find and alloc asid %d\n", in vhost_vdpa_process_iotlb_msg()
1265 r = -EINVAL; in vhost_vdpa_process_iotlb_msg()
1268 iotlb = &as->iotlb; in vhost_vdpa_process_iotlb_msg()
1272 if ((v->in_batch && v->batch_asid != asid) || !iotlb) { in vhost_vdpa_process_iotlb_msg()
1273 if (v->in_batch && v->batch_asid != asid) { in vhost_vdpa_process_iotlb_msg()
1274 dev_info(&v->dev, "batch id %d asid %d\n", in vhost_vdpa_process_iotlb_msg()
1275 v->batch_asid, asid); in vhost_vdpa_process_iotlb_msg()
1278 dev_err(&v->dev, "no iotlb for asid %d\n", asid); in vhost_vdpa_process_iotlb_msg()
1279 r = -EINVAL; in vhost_vdpa_process_iotlb_msg()
1283 switch (msg->type) { in vhost_vdpa_process_iotlb_msg()
1288 vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size); in vhost_vdpa_process_iotlb_msg()
1291 v->batch_asid = asid; in vhost_vdpa_process_iotlb_msg()
1292 v->in_batch = true; in vhost_vdpa_process_iotlb_msg()
1295 if (v->in_batch && ops->set_map) in vhost_vdpa_process_iotlb_msg()
1296 ops->set_map(vdpa, asid, iotlb); in vhost_vdpa_process_iotlb_msg()
1297 v->in_batch = false; in vhost_vdpa_process_iotlb_msg()
1300 r = -EINVAL; in vhost_vdpa_process_iotlb_msg()
1304 mutex_unlock(&dev->mutex); in vhost_vdpa_process_iotlb_msg()
1312 struct file *file = iocb->ki_filp; in vhost_vdpa_chr_write_iter()
1313 struct vhost_vdpa *v = file->private_data; in vhost_vdpa_chr_write_iter()
1314 struct vhost_dev *dev = &v->vdev; in vhost_vdpa_chr_write_iter()
1321 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_alloc_domain()
1322 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_alloc_domain()
1327 if (ops->set_map || ops->dma_map) in vhost_vdpa_alloc_domain()
1331 dev_warn_once(&v->dev, in vhost_vdpa_alloc_domain()
1332 "Failed to allocate domain, device is not IOMMU cache coherent capable\n"); in vhost_vdpa_alloc_domain()
1333 return -ENOTSUPP; in vhost_vdpa_alloc_domain()
1336 v->domain = iommu_paging_domain_alloc(dma_dev); in vhost_vdpa_alloc_domain()
1337 if (IS_ERR(v->domain)) { in vhost_vdpa_alloc_domain()
1338 ret = PTR_ERR(v->domain); in vhost_vdpa_alloc_domain()
1339 v->domain = NULL; in vhost_vdpa_alloc_domain()
1343 ret = iommu_attach_device(v->domain, dma_dev); in vhost_vdpa_alloc_domain()
1350 iommu_domain_free(v->domain); in vhost_vdpa_alloc_domain()
1351 v->domain = NULL; in vhost_vdpa_alloc_domain()
1357 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_free_domain()
1360 if (v->domain) { in vhost_vdpa_free_domain()
1361 iommu_detach_device(v->domain, dma_dev); in vhost_vdpa_free_domain()
1362 iommu_domain_free(v->domain); in vhost_vdpa_free_domain()
1365 v->domain = NULL; in vhost_vdpa_free_domain()
1370 struct vdpa_iova_range *range = &v->range; in vhost_vdpa_set_iova_range()
1371 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_set_iova_range()
1372 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_set_iova_range()
1374 if (ops->get_iova_range) { in vhost_vdpa_set_iova_range()
1375 *range = ops->get_iova_range(vdpa); in vhost_vdpa_set_iova_range()
1376 } else if (v->domain && v->domain->geometry.force_aperture) { in vhost_vdpa_set_iova_range()
1377 range->first = v->domain->geometry.aperture_start; in vhost_vdpa_set_iova_range()
1378 range->last = v->domain->geometry.aperture_end; in vhost_vdpa_set_iova_range()
1380 range->first = 0; in vhost_vdpa_set_iova_range()
1381 range->last = ULLONG_MAX; in vhost_vdpa_set_iova_range()
1390 for (asid = 0; asid < v->vdpa->nas; asid++) { in vhost_vdpa_cleanup()
1397 vhost_dev_cleanup(&v->vdev); in vhost_vdpa_cleanup()
1398 kfree(v->vdev.vqs); in vhost_vdpa_cleanup()
1399 v->vdev.vqs = NULL; in vhost_vdpa_cleanup()
1410 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev); in vhost_vdpa_open()
1412 opened = atomic_cmpxchg(&v->opened, 0, 1); in vhost_vdpa_open()
1414 return -EBUSY; in vhost_vdpa_open()
1416 nvqs = v->nvqs; in vhost_vdpa_open()
1423 r = -ENOMEM; in vhost_vdpa_open()
1427 dev = &v->vdev; in vhost_vdpa_open()
1429 vqs[i] = &v->vqs[i]; in vhost_vdpa_open()
1430 vqs[i]->handle_kick = handle_vq_kick; in vhost_vdpa_open()
1431 vqs[i]->call_ctx.ctx = NULL; in vhost_vdpa_open()
1442 filep->private_data = v; in vhost_vdpa_open()
1449 atomic_dec(&v->opened); in vhost_vdpa_open()
1457 for (i = 0; i < v->nvqs; i++) in vhost_vdpa_clean_irq()
1463 struct vhost_vdpa *v = filep->private_data; in vhost_vdpa_release()
1464 struct vhost_dev *d = &v->vdev; in vhost_vdpa_release()
1466 mutex_lock(&d->mutex); in vhost_vdpa_release()
1467 filep->private_data = NULL; in vhost_vdpa_release()
1470 vhost_dev_stop(&v->vdev); in vhost_vdpa_release()
1474 mutex_unlock(&d->mutex); in vhost_vdpa_release()
1476 atomic_dec(&v->opened); in vhost_vdpa_release()
1477 complete(&v->completion); in vhost_vdpa_release()
1485 struct vhost_vdpa *v = vmf->vma->vm_file->private_data; in vhost_vdpa_fault()
1486 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_fault()
1487 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_fault()
1489 struct vm_area_struct *vma = vmf->vma; in vhost_vdpa_fault()
1490 u16 index = vma->vm_pgoff; in vhost_vdpa_fault()
1492 notify = ops->get_vq_notification(vdpa, index); in vhost_vdpa_fault()
1494 return vmf_insert_pfn(vma, vmf->address & PAGE_MASK, PFN_DOWN(notify.addr)); in vhost_vdpa_fault()
1503 struct vhost_vdpa *v = vma->vm_file->private_data; in vhost_vdpa_mmap()
1504 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_mmap()
1505 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_mmap()
1507 unsigned long index = vma->vm_pgoff; in vhost_vdpa_mmap()
1509 if (vma->vm_end - vma->vm_start != PAGE_SIZE) in vhost_vdpa_mmap()
1510 return -EINVAL; in vhost_vdpa_mmap()
1511 if ((vma->vm_flags & VM_SHARED) == 0) in vhost_vdpa_mmap()
1512 return -EINVAL; in vhost_vdpa_mmap()
1513 if (vma->vm_flags & VM_READ) in vhost_vdpa_mmap()
1514 return -EINVAL; in vhost_vdpa_mmap()
1516 return -EINVAL; in vhost_vdpa_mmap()
1517 if (!ops->get_vq_notification) in vhost_vdpa_mmap()
1518 return -ENOTSUPP; in vhost_vdpa_mmap()
1524 notify = ops->get_vq_notification(vdpa, index); in vhost_vdpa_mmap()
1525 if (notify.addr & (PAGE_SIZE - 1)) in vhost_vdpa_mmap()
1526 return -EINVAL; in vhost_vdpa_mmap()
1527 if (vma->vm_end - vma->vm_start != notify.size) in vhost_vdpa_mmap()
1528 return -ENOTSUPP; in vhost_vdpa_mmap()
1531 vma->vm_ops = &vhost_vdpa_vm_ops; in vhost_vdpa_mmap()
1553 ida_free(&vhost_vdpa_ida, v->minor); in vhost_vdpa_release_dev()
1554 kfree(v->vqs); in vhost_vdpa_release_dev()
1560 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_probe()
1565 /* We can't support platform IOMMU device with more than 1 in vhost_vdpa_probe()
1568 if (!ops->set_map && !ops->dma_map && in vhost_vdpa_probe()
1569 (vdpa->ngroups > 1 || vdpa->nas > 1)) in vhost_vdpa_probe()
1570 return -EOPNOTSUPP; in vhost_vdpa_probe()
1574 return -ENOMEM; in vhost_vdpa_probe()
1576 minor = ida_alloc_max(&vhost_vdpa_ida, VHOST_VDPA_DEV_MAX - 1, in vhost_vdpa_probe()
1583 atomic_set(&v->opened, 0); in vhost_vdpa_probe()
1584 v->minor = minor; in vhost_vdpa_probe()
1585 v->vdpa = vdpa; in vhost_vdpa_probe()
1586 v->nvqs = vdpa->nvqs; in vhost_vdpa_probe()
1587 v->virtio_id = ops->get_device_id(vdpa); in vhost_vdpa_probe()
1589 device_initialize(&v->dev); in vhost_vdpa_probe()
1590 v->dev.release = vhost_vdpa_release_dev; in vhost_vdpa_probe()
1591 v->dev.parent = &vdpa->dev; in vhost_vdpa_probe()
1592 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor); in vhost_vdpa_probe()
1593 v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue), in vhost_vdpa_probe()
1595 if (!v->vqs) { in vhost_vdpa_probe()
1596 r = -ENOMEM; in vhost_vdpa_probe()
1600 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor); in vhost_vdpa_probe()
1604 cdev_init(&v->cdev, &vhost_vdpa_fops); in vhost_vdpa_probe()
1605 v->cdev.owner = THIS_MODULE; in vhost_vdpa_probe()
1607 r = cdev_device_add(&v->cdev, &v->dev); in vhost_vdpa_probe()
1611 init_completion(&v->completion); in vhost_vdpa_probe()
1615 INIT_HLIST_HEAD(&v->as[i]); in vhost_vdpa_probe()
1620 put_device(&v->dev); in vhost_vdpa_probe()
1629 cdev_device_del(&v->cdev, &v->dev); in vhost_vdpa_remove()
1632 opened = atomic_cmpxchg(&v->opened, 0, 1); in vhost_vdpa_remove()
1635 wait_for_completion(&v->completion); in vhost_vdpa_remove()
1638 put_device(&v->dev); in vhost_vdpa_remove()
1654 "vhost-vdpa"); in vhost_vdpa_init()
1681 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");