Lines Matching refs:vdomain
79 struct viommu_domain *vdomain; member
313 static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end, in viommu_add_mapping() argument
328 spin_lock_irqsave(&vdomain->mappings_lock, irqflags); in viommu_add_mapping()
329 interval_tree_insert(&mapping->iova, &vdomain->mappings); in viommu_add_mapping()
330 spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags); in viommu_add_mapping()
344 static size_t viommu_del_mappings(struct viommu_domain *vdomain, in viommu_del_mappings() argument
352 spin_lock_irqsave(&vdomain->mappings_lock, flags); in viommu_del_mappings()
353 next = interval_tree_iter_first(&vdomain->mappings, iova, end); in viommu_del_mappings()
369 interval_tree_remove(node, &vdomain->mappings); in viommu_del_mappings()
372 spin_unlock_irqrestore(&vdomain->mappings_lock, flags); in viommu_del_mappings()
382 struct viommu_domain *vdomain) in viommu_domain_map_identity() argument
386 u64 iova = vdomain->domain.geometry.aperture_start; in viommu_domain_map_identity()
387 u64 limit = vdomain->domain.geometry.aperture_end; in viommu_domain_map_identity()
389 unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap); in viommu_domain_map_identity()
403 ret = viommu_add_mapping(vdomain, iova, resv_start - 1, in viommu_domain_map_identity()
415 ret = viommu_add_mapping(vdomain, iova, limit, (phys_addr_t)iova, in viommu_domain_map_identity()
422 viommu_del_mappings(vdomain, 0, iova); in viommu_domain_map_identity()
433 static int viommu_replay_mappings(struct viommu_domain *vdomain) in viommu_replay_mappings() argument
441 spin_lock_irqsave(&vdomain->mappings_lock, flags); in viommu_replay_mappings()
442 node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); in viommu_replay_mappings()
447 .domain = cpu_to_le32(vdomain->id), in viommu_replay_mappings()
454 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); in viommu_replay_mappings()
460 spin_unlock_irqrestore(&vdomain->mappings_lock, flags); in viommu_replay_mappings()
642 struct viommu_domain *vdomain; in viommu_domain_alloc() local
649 vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL); in viommu_domain_alloc()
650 if (!vdomain) in viommu_domain_alloc()
653 mutex_init(&vdomain->mutex); in viommu_domain_alloc()
654 spin_lock_init(&vdomain->mappings_lock); in viommu_domain_alloc()
655 vdomain->mappings = RB_ROOT_CACHED; in viommu_domain_alloc()
657 return &vdomain->domain; in viommu_domain_alloc()
666 struct viommu_domain *vdomain = to_viommu_domain(domain); in viommu_domain_finalise() local
681 vdomain->id = (unsigned int)ret; in viommu_domain_finalise()
686 vdomain->map_flags = viommu->map_flags; in viommu_domain_finalise()
687 vdomain->viommu = viommu; in viommu_domain_finalise()
692 vdomain->bypass = true; in viommu_domain_finalise()
696 ret = viommu_domain_map_identity(vdev, vdomain); in viommu_domain_finalise()
698 ida_free(&viommu->domain_ids, vdomain->id); in viommu_domain_finalise()
699 vdomain->viommu = NULL; in viommu_domain_finalise()
709 struct viommu_domain *vdomain = to_viommu_domain(domain); in viommu_domain_free() local
712 viommu_del_mappings(vdomain, 0, ULLONG_MAX); in viommu_domain_free()
714 if (vdomain->viommu) in viommu_domain_free()
715 ida_free(&vdomain->viommu->domain_ids, vdomain->id); in viommu_domain_free()
717 kfree(vdomain); in viommu_domain_free()
727 struct viommu_domain *vdomain = to_viommu_domain(domain); in viommu_attach_dev() local
729 mutex_lock(&vdomain->mutex); in viommu_attach_dev()
730 if (!vdomain->viommu) { in viommu_attach_dev()
736 } else if (vdomain->viommu != vdev->viommu) { in viommu_attach_dev()
739 mutex_unlock(&vdomain->mutex); in viommu_attach_dev()
756 if (vdev->vdomain) in viommu_attach_dev()
757 vdev->vdomain->nr_endpoints--; in viommu_attach_dev()
761 .domain = cpu_to_le32(vdomain->id), in viommu_attach_dev()
764 if (vdomain->bypass) in viommu_attach_dev()
770 ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req)); in viommu_attach_dev()
775 if (!vdomain->nr_endpoints) { in viommu_attach_dev()
780 ret = viommu_replay_mappings(vdomain); in viommu_attach_dev()
785 vdomain->nr_endpoints++; in viommu_attach_dev()
786 vdev->vdomain = vdomain; in viommu_attach_dev()
795 struct viommu_domain *vdomain = vdev->vdomain; in viommu_detach_dev() local
798 if (!vdomain) in viommu_detach_dev()
803 .domain = cpu_to_le32(vdomain->id), in viommu_detach_dev()
810 vdomain->nr_endpoints--; in viommu_detach_dev()
811 vdev->vdomain = NULL; in viommu_detach_dev()
823 struct viommu_domain *vdomain = to_viommu_domain(domain); in viommu_map_pages() local
829 if (flags & ~vdomain->map_flags) in viommu_map_pages()
832 ret = viommu_add_mapping(vdomain, iova, end, paddr, flags); in viommu_map_pages()
836 if (vdomain->nr_endpoints) { in viommu_map_pages()
839 .domain = cpu_to_le32(vdomain->id), in viommu_map_pages()
846 ret = viommu_add_req(vdomain->viommu, &map, sizeof(map)); in viommu_map_pages()
848 viommu_del_mappings(vdomain, iova, end); in viommu_map_pages()
865 struct viommu_domain *vdomain = to_viommu_domain(domain); in viommu_unmap_pages() local
868 unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1); in viommu_unmap_pages()
873 if (!vdomain->nr_endpoints) in viommu_unmap_pages()
878 .domain = cpu_to_le32(vdomain->id), in viommu_unmap_pages()
883 ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap)); in viommu_unmap_pages()
894 struct viommu_domain *vdomain = to_viommu_domain(domain); in viommu_iova_to_phys() local
896 spin_lock_irqsave(&vdomain->mappings_lock, flags); in viommu_iova_to_phys()
897 node = interval_tree_iter_first(&vdomain->mappings, iova, iova); in viommu_iova_to_phys()
902 spin_unlock_irqrestore(&vdomain->mappings_lock, flags); in viommu_iova_to_phys()
910 struct viommu_domain *vdomain = to_viommu_domain(domain); in viommu_iotlb_sync() local
912 viommu_sync_req(vdomain->viommu); in viommu_iotlb_sync()
918 struct viommu_domain *vdomain = to_viommu_domain(domain); in viommu_iotlb_sync_map() local
924 if (!vdomain->nr_endpoints) in viommu_iotlb_sync_map()
926 return viommu_sync_req(vdomain->viommu); in viommu_iotlb_sync_map()
931 struct viommu_domain *vdomain = to_viommu_domain(domain); in viommu_flush_iotlb_all() local
937 if (!vdomain->nr_endpoints) in viommu_flush_iotlb_all()
939 viommu_sync_req(vdomain->viommu); in viommu_flush_iotlb_all()