Lines Matching full:viommu
64 struct viommu_dev *viommu; member
65 struct mutex mutex; /* protects viommu pointer */
78 struct viommu_dev *viommu; member
136 static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu, in viommu_get_write_desc_offset() argument
143 return len - viommu->probe_size - tail_size; in viommu_get_write_desc_offset()
154 static int __viommu_sync_req(struct viommu_dev *viommu) in __viommu_sync_req() argument
159 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; in __viommu_sync_req()
161 assert_spin_locked(&viommu->request_lock); in __viommu_sync_req()
165 while (!list_empty(&viommu->requests)) { in __viommu_sync_req()
187 static int viommu_sync_req(struct viommu_dev *viommu) in viommu_sync_req() argument
192 spin_lock_irqsave(&viommu->request_lock, flags); in viommu_sync_req()
193 ret = __viommu_sync_req(viommu); in viommu_sync_req()
195 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); in viommu_sync_req()
196 spin_unlock_irqrestore(&viommu->request_lock, flags); in viommu_sync_req()
217 static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len, in __viommu_add_req() argument
225 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; in __viommu_add_req()
227 assert_spin_locked(&viommu->request_lock); in __viommu_add_req()
229 write_offset = viommu_get_write_desc_offset(viommu, buf, len); in __viommu_add_req()
250 if (!__viommu_sync_req(viommu)) in __viommu_add_req()
256 list_add_tail(&req->list, &viommu->requests); in __viommu_add_req()
264 static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len) in viommu_add_req() argument
269 spin_lock_irqsave(&viommu->request_lock, flags); in viommu_add_req()
270 ret = __viommu_add_req(viommu, buf, len, false); in viommu_add_req()
272 dev_dbg(viommu->dev, "could not add request: %d\n", ret); in viommu_add_req()
273 spin_unlock_irqrestore(&viommu->request_lock, flags); in viommu_add_req()
282 static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf, in viommu_send_req_sync() argument
288 spin_lock_irqsave(&viommu->request_lock, flags); in viommu_send_req_sync()
290 ret = __viommu_add_req(viommu, buf, len, true); in viommu_send_req_sync()
292 dev_dbg(viommu->dev, "could not add request (%d)\n", ret); in viommu_send_req_sync()
296 ret = __viommu_sync_req(viommu); in viommu_send_req_sync()
298 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); in viommu_send_req_sync()
304 spin_unlock_irqrestore(&viommu->request_lock, flags); in viommu_send_req_sync()
454 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); in viommu_replay_mappings()
514 static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev) in viommu_probe_endpoint() argument
528 probe_len = sizeof(*probe) + viommu->probe_size + in viommu_probe_endpoint()
541 ret = viommu_send_req_sync(viommu, probe, probe_len); in viommu_probe_endpoint()
549 cur < viommu->probe_size) { in viommu_probe_endpoint()
557 dev_err(dev, "unknown viommu prop 0x%x\n", type); in viommu_probe_endpoint()
561 dev_err(dev, "failed to parse viommu prop 0x%x\n", type); in viommu_probe_endpoint()
564 if (cur >= viommu->probe_size) in viommu_probe_endpoint()
576 static int viommu_fault_handler(struct viommu_dev *viommu, in viommu_fault_handler() argument
601 dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n", in viommu_fault_handler()
607 dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n", in viommu_fault_handler()
618 struct viommu_dev *viommu = vq->vdev->priv; in viommu_event_handler() local
622 dev_err(viommu->dev, in viommu_event_handler()
626 viommu_fault_handler(viommu, &evt->fault); in viommu_event_handler()
632 dev_err(viommu->dev, "could not add event buffer\n"); in viommu_event_handler()
665 struct viommu_dev *viommu = vdev->viommu; in viommu_domain_finalise() local
668 viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap); in viommu_domain_finalise()
676 ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, in viommu_domain_finalise()
677 viommu->last_domain, GFP_KERNEL); in viommu_domain_finalise()
683 domain->pgsize_bitmap = viommu->pgsize_bitmap; in viommu_domain_finalise()
684 domain->geometry = viommu->geometry; in viommu_domain_finalise()
686 vdomain->map_flags = viommu->map_flags; in viommu_domain_finalise()
687 vdomain->viommu = viommu; in viommu_domain_finalise()
690 if (virtio_has_feature(viommu->vdev, in viommu_domain_finalise()
698 ida_free(&viommu->domain_ids, vdomain->id); in viommu_domain_finalise()
699 vdomain->viommu = NULL; in viommu_domain_finalise()
714 if (vdomain->viommu) in viommu_domain_free()
715 ida_free(&vdomain->viommu->domain_ids, vdomain->id); in viommu_domain_free()
730 if (!vdomain->viommu) { in viommu_attach_dev()
732 * Properly initialize the domain now that we know which viommu in viommu_attach_dev()
736 } else if (vdomain->viommu != vdev->viommu) { in viommu_attach_dev()
770 ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req)); in viommu_attach_dev()
808 WARN_ON(viommu_send_req_sync(vdev->viommu, &req, sizeof(req))); in viommu_detach_dev()
846 ret = viommu_add_req(vdomain->viommu, &map, sizeof(map)); in viommu_map_pages()
883 ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap)); in viommu_unmap_pages()
912 viommu_sync_req(vdomain->viommu); in viommu_iotlb_sync()
921 * May be called before the viommu is initialized including in viommu_iotlb_sync_map()
926 return viommu_sync_req(vdomain->viommu); in viommu_iotlb_sync_map()
934 * May be called before the viommu is initialized including in viommu_flush_iotlb_all()
939 viommu_sync_req(vdomain->viommu); in viommu_flush_iotlb_all()
996 struct viommu_dev *viommu = NULL; in viommu_probe_device() local
999 viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode); in viommu_probe_device()
1000 if (!viommu) in viommu_probe_device()
1008 vdev->viommu = viommu; in viommu_probe_device()
1012 if (viommu->probe_size) { in viommu_probe_device()
1014 ret = viommu_probe_endpoint(viommu, dev); in viommu_probe_device()
1019 return &viommu->iommu; in viommu_probe_device()
1084 static int viommu_init_vqs(struct viommu_dev *viommu) in viommu_init_vqs() argument
1086 struct virtio_device *vdev = dev_to_virtio(viommu->dev); in viommu_init_vqs()
1092 return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, in viommu_init_vqs()
1096 static int viommu_fill_evtq(struct viommu_dev *viommu) in viommu_fill_evtq() argument
1101 struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ]; in viommu_fill_evtq()
1104 viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts, in viommu_fill_evtq()
1122 struct viommu_dev *viommu = NULL; in viommu_probe() local
1132 viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL); in viommu_probe()
1133 if (!viommu) in viommu_probe()
1136 spin_lock_init(&viommu->request_lock); in viommu_probe()
1137 ida_init(&viommu->domain_ids); in viommu_probe()
1138 viommu->dev = dev; in viommu_probe()
1139 viommu->vdev = vdev; in viommu_probe()
1140 INIT_LIST_HEAD(&viommu->requests); in viommu_probe()
1142 ret = viommu_init_vqs(viommu); in viommu_probe()
1147 &viommu->pgsize_bitmap); in viommu_probe()
1149 if (!viommu->pgsize_bitmap) { in viommu_probe()
1154 viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; in viommu_probe()
1155 viommu->last_domain = ~0U; in viommu_probe()
1168 &viommu->first_domain); in viommu_probe()
1172 &viommu->last_domain); in viommu_probe()
1176 &viommu->probe_size); in viommu_probe()
1178 viommu->geometry = (struct iommu_domain_geometry) { in viommu_probe()
1185 viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO; in viommu_probe()
1187 viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; in viommu_probe()
1192 ret = viommu_fill_evtq(viommu); in viommu_probe()
1196 ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s", in viommu_probe()
1201 iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev); in viommu_probe()
1203 vdev->priv = viommu; in viommu_probe()
1206 order_base_2(viommu->geometry.aperture_end)); in viommu_probe()
1207 dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap); in viommu_probe()
1219 struct viommu_dev *viommu = vdev->priv; in viommu_remove() local
1221 iommu_device_sysfs_remove(&viommu->iommu); in viommu_remove()
1222 iommu_device_unregister(&viommu->iommu); in viommu_remove()