Lines Matching +full:virtio +full:- +full:iommu
1 // SPDX-License-Identifier: GPL-2.0
3 * Virtio driver for the paravirtualized IOMMU
11 #include <linux/dma-map-ops.h>
14 #include <linux/iommu.h>
18 #include <linux/virtio.h>
25 #include "dma-iommu.h"
35 struct iommu_device iommu; member
105 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail); in viommu_get_req_errno()
107 switch (tail->status) { in viommu_get_req_errno()
111 return -ENOSYS; in viommu_get_req_errno()
113 return -EINVAL; in viommu_get_req_errno()
115 return -ERANGE; in viommu_get_req_errno()
117 return -ENOENT; in viommu_get_req_errno()
119 return -EFAULT; in viommu_get_req_errno()
121 return -ENOMEM; in viommu_get_req_errno()
125 return -EIO; in viommu_get_req_errno()
131 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail); in viommu_set_req_status()
133 tail->status = status; in viommu_set_req_status()
142 if (req->type == VIRTIO_IOMMU_T_PROBE) in viommu_get_write_desc_offset()
143 return len - viommu->probe_size - tail_size; in viommu_get_write_desc_offset()
145 return len - tail_size; in viommu_get_write_desc_offset()
149 * __viommu_sync_req - Complete all in-flight requests
152 * requests that were in-flight at the time of the call have completed.
159 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; in __viommu_sync_req()
161 assert_spin_locked(&viommu->request_lock); in __viommu_sync_req()
165 while (!list_empty(&viommu->requests)) { in __viommu_sync_req()
172 viommu_set_req_status(req->buf, req->len, in __viommu_sync_req()
175 write_len = req->len - req->write_offset; in __viommu_sync_req()
176 if (req->writeback && len == write_len) in __viommu_sync_req()
177 memcpy(req->writeback, req->buf + req->write_offset, in __viommu_sync_req()
180 list_del(&req->list); in __viommu_sync_req()
192 spin_lock_irqsave(&viommu->request_lock, flags); in viommu_sync_req()
195 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); in viommu_sync_req()
196 spin_unlock_irqrestore(&viommu->request_lock, flags); in viommu_sync_req()
202 * __viommu_add_request - Add one request to the queue
225 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; in __viommu_add_req()
227 assert_spin_locked(&viommu->request_lock); in __viommu_add_req()
231 return -EINVAL; in __viommu_add_req()
235 return -ENOMEM; in __viommu_add_req()
237 req->len = len; in __viommu_add_req()
239 req->writeback = buf + write_offset; in __viommu_add_req()
240 req->write_offset = write_offset; in __viommu_add_req()
242 memcpy(&req->buf, buf, write_offset); in __viommu_add_req()
244 sg_init_one(&top_sg, req->buf, write_offset); in __viommu_add_req()
245 sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset); in __viommu_add_req()
248 if (ret == -ENOSPC) { in __viommu_add_req()
256 list_add_tail(&req->list, &viommu->requests); in __viommu_add_req()
269 spin_lock_irqsave(&viommu->request_lock, flags); in viommu_add_req()
272 dev_dbg(viommu->dev, "could not add request: %d\n", ret); in viommu_add_req()
273 spin_unlock_irqrestore(&viommu->request_lock, flags); in viommu_add_req()
288 spin_lock_irqsave(&viommu->request_lock, flags); in viommu_send_req_sync()
292 dev_dbg(viommu->dev, "could not add request (%d)\n", ret); in viommu_send_req_sync()
298 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); in viommu_send_req_sync()
299 /* Fall-through (get the actual request status) */ in viommu_send_req_sync()
304 spin_unlock_irqrestore(&viommu->request_lock, flags); in viommu_send_req_sync()
309 * viommu_add_mapping - add a mapping to the internal tree
321 return -ENOMEM; in viommu_add_mapping()
323 mapping->paddr = paddr; in viommu_add_mapping()
324 mapping->iova.start = iova; in viommu_add_mapping()
325 mapping->iova.last = end; in viommu_add_mapping()
326 mapping->flags = flags; in viommu_add_mapping()
328 spin_lock_irqsave(&vdomain->mappings_lock, irqflags); in viommu_add_mapping()
329 interval_tree_insert(&mapping->iova, &vdomain->mappings); in viommu_add_mapping()
330 spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags); in viommu_add_mapping()
336 * viommu_del_mappings - remove mappings from the internal tree
352 spin_lock_irqsave(&vdomain->mappings_lock, flags); in viommu_del_mappings()
353 next = interval_tree_iter_first(&vdomain->mappings, iova, end); in viommu_del_mappings()
360 if (mapping->iova.start < iova) in viommu_del_mappings()
364 * Virtio-iommu doesn't allow UNMAP to split a mapping created in viommu_del_mappings()
367 unmapped += mapping->iova.last - mapping->iova.start + 1; in viommu_del_mappings()
369 interval_tree_remove(node, &vdomain->mappings); in viommu_del_mappings()
372 spin_unlock_irqrestore(&vdomain->mappings_lock, flags); in viommu_del_mappings()
386 u64 iova = vdomain->domain.geometry.aperture_start; in viommu_domain_map_identity()
387 u64 limit = vdomain->domain.geometry.aperture_end; in viommu_domain_map_identity()
389 unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap); in viommu_domain_map_identity()
392 limit = ALIGN_DOWN(limit + 1, granule) - 1; in viommu_domain_map_identity()
394 list_for_each_entry(resv, &vdev->resv_regions, list) { in viommu_domain_map_identity()
395 u64 resv_start = ALIGN_DOWN(resv->start, granule); in viommu_domain_map_identity()
396 u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1; in viommu_domain_map_identity()
403 ret = viommu_add_mapping(vdomain, iova, resv_start - 1, in viommu_domain_map_identity()
427 * viommu_replay_mappings - re-send MAP requests
430 * mappings were deleted from the device. Re-create the mappings available in
441 spin_lock_irqsave(&vdomain->mappings_lock, flags); in viommu_replay_mappings()
442 node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); in viommu_replay_mappings()
447 .domain = cpu_to_le32(vdomain->id), in viommu_replay_mappings()
448 .virt_start = cpu_to_le64(mapping->iova.start), in viommu_replay_mappings()
449 .virt_end = cpu_to_le64(mapping->iova.last), in viommu_replay_mappings()
450 .phys_start = cpu_to_le64(mapping->paddr), in viommu_replay_mappings()
451 .flags = cpu_to_le32(mapping->flags), in viommu_replay_mappings()
454 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); in viommu_replay_mappings()
458 node = interval_tree_iter_next(node, 0, -1UL); in viommu_replay_mappings()
460 spin_unlock_irqrestore(&vdomain->mappings_lock, flags); in viommu_replay_mappings()
475 start = start64 = le64_to_cpu(mem->start); in viommu_add_resv_mem()
476 end = end64 = le64_to_cpu(mem->end); in viommu_add_resv_mem()
477 size = end64 - start64 + 1; in viommu_add_resv_mem()
479 /* Catch any overflow, including the unlikely end64 - start64 + 1 = 0 */ in viommu_add_resv_mem()
480 if (start != start64 || end != end64 || size < end64 - start64) in viommu_add_resv_mem()
481 return -EOVERFLOW; in viommu_add_resv_mem()
484 return -EINVAL; in viommu_add_resv_mem()
486 switch (mem->subtype) { in viommu_add_resv_mem()
488 dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n", in viommu_add_resv_mem()
489 mem->subtype); in viommu_add_resv_mem()
503 return -ENOMEM; in viommu_add_resv_mem()
506 list_for_each_entry(next, &vdev->resv_regions, list) { in viommu_add_resv_mem()
507 if (next->start > region->start) in viommu_add_resv_mem()
510 list_add_tail(®ion->list, &next->list); in viommu_add_resv_mem()
525 if (!fwspec->num_ids) in viommu_probe_endpoint()
526 return -EINVAL; in viommu_probe_endpoint()
528 probe_len = sizeof(*probe) + viommu->probe_size + in viommu_probe_endpoint()
532 return -ENOMEM; in viommu_probe_endpoint()
534 probe->head.type = VIRTIO_IOMMU_T_PROBE; in viommu_probe_endpoint()
539 probe->endpoint = cpu_to_le32(fwspec->ids[0]); in viommu_probe_endpoint()
545 prop = (void *)probe->properties; in viommu_probe_endpoint()
546 type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; in viommu_probe_endpoint()
549 cur < viommu->probe_size) { in viommu_probe_endpoint()
550 len = le16_to_cpu(prop->length) + sizeof(*prop); in viommu_probe_endpoint()
564 if (cur >= viommu->probe_size) in viommu_probe_endpoint()
567 prop = (void *)probe->properties + cur; in viommu_probe_endpoint()
568 type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; in viommu_probe_endpoint()
581 u8 reason = fault->reason; in viommu_fault_handler()
582 u32 flags = le32_to_cpu(fault->flags); in viommu_fault_handler()
583 u32 endpoint = le32_to_cpu(fault->endpoint); in viommu_fault_handler()
584 u64 address = le64_to_cpu(fault->address); in viommu_fault_handler()
601 dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n", in viommu_fault_handler()
607 dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n", in viommu_fault_handler()
618 struct viommu_dev *viommu = vq->vdev->priv; in viommu_event_handler()
622 dev_err(viommu->dev, in viommu_event_handler()
625 } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) { in viommu_event_handler()
626 viommu_fault_handler(viommu, &evt->fault); in viommu_event_handler()
632 dev_err(viommu->dev, "could not add event buffer\n"); in viommu_event_handler()
638 /* IOMMU API */
653 mutex_init(&vdomain->mutex); in viommu_domain_alloc()
654 spin_lock_init(&vdomain->mappings_lock); in viommu_domain_alloc()
655 vdomain->mappings = RB_ROOT_CACHED; in viommu_domain_alloc()
657 return &vdomain->domain; in viommu_domain_alloc()
665 struct viommu_dev *viommu = vdev->viommu; in viommu_domain_finalise()
668 viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap); in viommu_domain_finalise()
670 dev_err(vdev->dev, in viommu_domain_finalise()
673 return -ENODEV; in viommu_domain_finalise()
676 ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, in viommu_domain_finalise()
677 viommu->last_domain, GFP_KERNEL); in viommu_domain_finalise()
681 vdomain->id = (unsigned int)ret; in viommu_domain_finalise()
683 domain->pgsize_bitmap = viommu->pgsize_bitmap; in viommu_domain_finalise()
684 domain->geometry = viommu->geometry; in viommu_domain_finalise()
686 vdomain->map_flags = viommu->map_flags; in viommu_domain_finalise()
687 vdomain->viommu = viommu; in viommu_domain_finalise()
689 if (domain->type == IOMMU_DOMAIN_IDENTITY) { in viommu_domain_finalise()
690 if (virtio_has_feature(viommu->vdev, in viommu_domain_finalise()
692 vdomain->bypass = true; in viommu_domain_finalise()
698 ida_free(&viommu->domain_ids, vdomain->id); in viommu_domain_finalise()
699 vdomain->viommu = NULL; in viommu_domain_finalise()
714 if (vdomain->viommu) in viommu_domain_free()
715 ida_free(&vdomain->viommu->domain_ids, vdomain->id); in viommu_domain_free()
729 mutex_lock(&vdomain->mutex); in viommu_attach_dev()
730 if (!vdomain->viommu) { in viommu_attach_dev()
736 } else if (vdomain->viommu != vdev->viommu) { in viommu_attach_dev()
737 ret = -EINVAL; in viommu_attach_dev()
739 mutex_unlock(&vdomain->mutex); in viommu_attach_dev()
745 * In the virtio-iommu device, when attaching the endpoint to a new in viommu_attach_dev()
754 * vdev->vdomain is protected by group->mutex in viommu_attach_dev()
756 if (vdev->vdomain) in viommu_attach_dev()
757 vdev->vdomain->nr_endpoints--; in viommu_attach_dev()
761 .domain = cpu_to_le32(vdomain->id), in viommu_attach_dev()
764 if (vdomain->bypass) in viommu_attach_dev()
767 for (i = 0; i < fwspec->num_ids; i++) { in viommu_attach_dev()
768 req.endpoint = cpu_to_le32(fwspec->ids[i]); in viommu_attach_dev()
770 ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req)); in viommu_attach_dev()
775 if (!vdomain->nr_endpoints) { in viommu_attach_dev()
785 vdomain->nr_endpoints++; in viommu_attach_dev()
786 vdev->vdomain = vdomain; in viommu_attach_dev()
795 struct viommu_domain *vdomain = vdev->vdomain; in viommu_detach_dev()
796 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(vdev->dev); in viommu_detach_dev()
803 .domain = cpu_to_le32(vdomain->id), in viommu_detach_dev()
806 for (i = 0; i < fwspec->num_ids; i++) { in viommu_detach_dev()
807 req.endpoint = cpu_to_le32(fwspec->ids[i]); in viommu_detach_dev()
808 WARN_ON(viommu_send_req_sync(vdev->viommu, &req, sizeof(req))); in viommu_detach_dev()
810 vdomain->nr_endpoints--; in viommu_detach_dev()
811 vdev->vdomain = NULL; in viommu_detach_dev()
821 u64 end = iova + size - 1; in viommu_map_pages()
829 if (flags & ~vdomain->map_flags) in viommu_map_pages()
830 return -EINVAL; in viommu_map_pages()
836 if (vdomain->nr_endpoints) { in viommu_map_pages()
839 .domain = cpu_to_le32(vdomain->id), in viommu_map_pages()
846 ret = viommu_add_req(vdomain->viommu, &map, sizeof(map)); in viommu_map_pages()
868 unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1); in viommu_unmap_pages()
873 if (!vdomain->nr_endpoints) in viommu_unmap_pages()
878 .domain = cpu_to_le32(vdomain->id), in viommu_unmap_pages()
880 .virt_end = cpu_to_le64(iova + unmapped - 1), in viommu_unmap_pages()
883 ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap)); in viommu_unmap_pages()
896 spin_lock_irqsave(&vdomain->mappings_lock, flags); in viommu_iova_to_phys()
897 node = interval_tree_iter_first(&vdomain->mappings, iova, iova); in viommu_iova_to_phys()
900 paddr = mapping->paddr + (iova - mapping->iova.start); in viommu_iova_to_phys()
902 spin_unlock_irqrestore(&vdomain->mappings_lock, flags); in viommu_iova_to_phys()
912 viommu_sync_req(vdomain->viommu); in viommu_iotlb_sync()
924 if (!vdomain->nr_endpoints) in viommu_iotlb_sync_map()
926 return viommu_sync_req(vdomain->viommu); in viommu_iotlb_sync_map()
937 if (!vdomain->nr_endpoints) in viommu_flush_iotlb_all()
939 viommu_sync_req(vdomain->viommu); in viommu_flush_iotlb_all()
948 list_for_each_entry(entry, &vdev->resv_regions, list) { in viommu_get_resv_regions()
949 if (entry->type == IOMMU_RESV_MSI) in viommu_get_resv_regions()
955 list_add_tail(&new_entry->list, head); in viommu_get_resv_regions()
960 * software-mapped region. in viommu_get_resv_regions()
969 list_add_tail(&msi->list, head); in viommu_get_resv_regions()
980 return device_match_fwnode(dev->parent, data); in viommu_match_node()
989 return dev ? dev_to_virtio(dev)->priv : NULL; in viommu_get_by_fwnode()
999 viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode); in viommu_probe_device()
1001 return ERR_PTR(-ENODEV); in viommu_probe_device()
1005 return ERR_PTR(-ENOMEM); in viommu_probe_device()
1007 vdev->dev = dev; in viommu_probe_device()
1008 vdev->viommu = viommu; in viommu_probe_device()
1009 INIT_LIST_HEAD(&vdev->resv_regions); in viommu_probe_device()
1012 if (viommu->probe_size) { in viommu_probe_device()
1019 return &viommu->iommu; in viommu_probe_device()
1022 iommu_put_resv_regions(dev, &vdev->resv_regions); in viommu_probe_device()
1033 iommu_put_resv_regions(dev, &vdev->resv_regions); in viommu_release_device()
1048 return iommu_fwspec_add_ids(dev, args->args, 1); in viommu_of_xlate()
1086 struct virtio_device *vdev = dev_to_virtio(viommu->dev); in viommu_init_vqs()
1092 return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, in viommu_init_vqs()
1101 struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ]; in viommu_fill_evtq()
1102 size_t nr_evts = vq->num_free; in viommu_fill_evtq()
1104 viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts, in viommu_fill_evtq()
1107 return -ENOMEM; in viommu_fill_evtq()
1121 struct device *parent_dev = vdev->dev.parent; in viommu_probe()
1123 struct device *dev = &vdev->dev; in viommu_probe()
1125 u64 input_end = -1UL; in viommu_probe()
1130 return -ENODEV; in viommu_probe()
1134 return -ENOMEM; in viommu_probe()
1136 spin_lock_init(&viommu->request_lock); in viommu_probe()
1137 ida_init(&viommu->domain_ids); in viommu_probe()
1138 viommu->dev = dev; in viommu_probe()
1139 viommu->vdev = vdev; in viommu_probe()
1140 INIT_LIST_HEAD(&viommu->requests); in viommu_probe()
1147 &viommu->pgsize_bitmap); in viommu_probe()
1149 if (!viommu->pgsize_bitmap) { in viommu_probe()
1150 ret = -EINVAL; in viommu_probe()
1154 viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; in viommu_probe()
1155 viommu->last_domain = ~0U; in viommu_probe()
1168 &viommu->first_domain); in viommu_probe()
1172 &viommu->last_domain); in viommu_probe()
1176 &viommu->probe_size); in viommu_probe()
1178 viommu->geometry = (struct iommu_domain_geometry) { in viommu_probe()
1185 viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO; in viommu_probe()
1187 viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; in viommu_probe()
1196 ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s", in viommu_probe()
1201 iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev); in viommu_probe()
1203 vdev->priv = viommu; in viommu_probe()
1206 order_base_2(viommu->geometry.aperture_end)); in viommu_probe()
1207 dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap); in viommu_probe()
1212 vdev->config->del_vqs(vdev); in viommu_probe()
1219 struct viommu_dev *viommu = vdev->priv; in viommu_remove()
1221 iommu_device_sysfs_remove(&viommu->iommu); in viommu_remove()
1222 iommu_device_unregister(&viommu->iommu); in viommu_remove()
1226 vdev->config->del_vqs(vdev); in viommu_remove()
1228 dev_info(&vdev->dev, "device removed\n"); in viommu_remove()
1233 dev_warn(&vdev->dev, "config changed\n"); in viommu_config_changed()
1249 MODULE_DEVICE_TABLE(virtio, id_table);
1263 MODULE_DESCRIPTION("Virtio IOMMU driver");
1264 MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");