Lines Matching +full:ports +full:- +full:block +full:- +full:group +full:- +full:count
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
24 #include <linux/pci-ats.h>
36 #include "dma-iommu.h"
37 #include "iommu-priv.h"
72 #define for_each_group_device(group, pos) \ argument
73 list_for_each_entry(pos, &(group)->devices, list)
77 ssize_t (*show)(struct iommu_group *group, char *buf);
78 ssize_t (*store)(struct iommu_group *group,
79 const char *buf, size_t count);
84 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
97 __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type);
101 struct iommu_group *group);
107 static int __iommu_device_set_domain(struct iommu_group *group,
111 static int __iommu_group_set_domain_internal(struct iommu_group *group,
114 static int __iommu_group_set_domain(struct iommu_group *group, in __iommu_group_set_domain() argument
117 return __iommu_group_set_domain_internal(group, new_domain, 0); in __iommu_group_set_domain()
119 static void __iommu_group_set_domain_nofail(struct iommu_group *group, in __iommu_group_set_domain_nofail() argument
123 group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); in __iommu_group_set_domain_nofail()
126 static int iommu_setup_default_domain(struct iommu_group *group,
130 static ssize_t iommu_group_store_type(struct iommu_group *group,
131 const char *buf, size_t count);
132 static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
134 static void __iommu_group_free_device(struct iommu_group *group,
169 * Use a function instead of an array here because the domain-type is a
170 * bit-field, so an array would waste memory.
202 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); in iommu_subsys_init()
223 return -ENOMEM; in iommu_subsys_init()
236 if (dev->iommu && dev->iommu->iommu_dev == data) in remove_iommu_group()
243 * iommu_device_register() - Register an IOMMU hardware instance
256 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) in iommu_device_register()
257 return -EINVAL; in iommu_device_register()
259 iommu->ops = ops; in iommu_device_register()
261 iommu->fwnode = dev_fwnode(hwdev); in iommu_device_register()
264 list_add_tail(&iommu->list, &iommu_device_list); in iommu_device_register()
281 list_del(&iommu->list); in iommu_device_unregister()
285 iommu_group_put(iommu->singleton_group); in iommu_device_unregister()
286 iommu->singleton_group = NULL; in iommu_device_unregister()
312 iommu->ops = ops; in iommu_device_register_bus()
313 nb->notifier_call = iommu_bus_notifier; in iommu_device_register_bus()
319 list_add_tail(&iommu->list, &iommu_device_list); in iommu_device_register_bus()
334 struct dev_iommu *param = dev->iommu; in dev_iommu_get()
345 mutex_init(¶m->lock); in dev_iommu_get()
346 dev->iommu = param; in dev_iommu_get()
352 struct dev_iommu *param = dev->iommu; in dev_iommu_free()
354 dev->iommu = NULL; in dev_iommu_free()
355 if (param->fwspec) { in dev_iommu_free()
356 fwnode_handle_put(param->fwspec->iommu_fwnode); in dev_iommu_free()
357 kfree(param->fwspec); in dev_iommu_free()
364 * actually has API ops, and don't want false positives from VFIO-only groups.
368 return dev->iommu && dev->iommu->iommu_dev; in dev_has_iommu()
381 ret = device_property_read_u32(dev, "pasid-num-bits", &bits); in dev_iommu_get_max_pasids()
386 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); in dev_iommu_get_max_pasids()
394 dev->iommu->priv = priv; in dev_iommu_priv_set()
399 * Init the dev->iommu and dev->iommu_group in the struct device and get the
405 struct iommu_group *group; in iommu_init_device() local
409 return -ENOMEM; in iommu_init_device()
411 if (!try_module_get(ops->owner)) { in iommu_init_device()
412 ret = -EINVAL; in iommu_init_device()
416 iommu_dev = ops->probe_device(dev); in iommu_init_device()
421 dev->iommu->iommu_dev = iommu_dev; in iommu_init_device()
427 group = ops->device_group(dev); in iommu_init_device()
428 if (WARN_ON_ONCE(group == NULL)) in iommu_init_device()
429 group = ERR_PTR(-EINVAL); in iommu_init_device()
430 if (IS_ERR(group)) { in iommu_init_device()
431 ret = PTR_ERR(group); in iommu_init_device()
434 dev->iommu_group = group; in iommu_init_device()
436 dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); in iommu_init_device()
437 if (ops->is_attach_deferred) in iommu_init_device()
438 dev->iommu->attach_deferred = ops->is_attach_deferred(dev); in iommu_init_device()
444 if (ops->release_device) in iommu_init_device()
445 ops->release_device(dev); in iommu_init_device()
447 module_put(ops->owner); in iommu_init_device()
449 dev->iommu->iommu_dev = NULL; in iommu_init_device()
456 struct iommu_group *group = dev->iommu_group; in iommu_deinit_device() local
459 lockdep_assert_held(&group->mutex); in iommu_deinit_device()
461 iommu_device_unlink(dev->iommu->iommu_dev, dev); in iommu_deinit_device()
465 * If there are still other devices in the group, they are not affected in iommu_deinit_device()
480 if (!dev->iommu->attach_deferred && ops->release_domain) in iommu_deinit_device()
481 ops->release_domain->ops->attach_dev(ops->release_domain, dev); in iommu_deinit_device()
483 if (ops->release_device) in iommu_deinit_device()
484 ops->release_device(dev); in iommu_deinit_device()
487 * If this is the last driver to use the group then we must free the in iommu_deinit_device()
490 if (list_empty(&group->devices)) { in iommu_deinit_device()
491 if (group->default_domain) { in iommu_deinit_device()
492 iommu_domain_free(group->default_domain); in iommu_deinit_device()
493 group->default_domain = NULL; in iommu_deinit_device()
495 if (group->blocking_domain) { in iommu_deinit_device()
496 iommu_domain_free(group->blocking_domain); in iommu_deinit_device()
497 group->blocking_domain = NULL; in iommu_deinit_device()
499 group->domain = NULL; in iommu_deinit_device()
503 dev->iommu_group = NULL; in iommu_deinit_device()
504 module_put(ops->owner); in iommu_deinit_device()
513 struct iommu_group *group; in __iommu_probe_device() local
518 * For FDT-based systems and ACPI IORT/VIOT, drivers register IOMMU in __iommu_probe_device()
519 * instances with non-NULL fwnodes, and client devices should have been in __iommu_probe_device()
523 * ops for probing, and thus cheekily co-opt the same mechanism. in __iommu_probe_device()
527 return -ENODEV; in __iommu_probe_device()
537 /* Device is probed already if in a group */ in __iommu_probe_device()
538 if (dev->iommu_group) in __iommu_probe_device()
545 group = dev->iommu_group; in __iommu_probe_device()
546 gdev = iommu_group_alloc_device(group, dev); in __iommu_probe_device()
547 mutex_lock(&group->mutex); in __iommu_probe_device()
557 list_add_tail(&gdev->list, &group->devices); in __iommu_probe_device()
558 WARN_ON(group->default_domain && !group->domain); in __iommu_probe_device()
559 if (group->default_domain) in __iommu_probe_device()
560 iommu_create_device_direct_mappings(group->default_domain, dev); in __iommu_probe_device()
561 if (group->domain) { in __iommu_probe_device()
562 ret = __iommu_device_set_domain(group, dev, group->domain, 0); in __iommu_probe_device()
565 } else if (!group->default_domain && !group_list) { in __iommu_probe_device()
566 ret = iommu_setup_default_domain(group, 0); in __iommu_probe_device()
569 } else if (!group->default_domain) { in __iommu_probe_device()
572 * to the caller by providing a de-duplicated list of groups in __iommu_probe_device()
575 if (list_empty(&group->entry)) in __iommu_probe_device()
576 list_add_tail(&group->entry, group_list); in __iommu_probe_device()
579 if (group->default_domain) in __iommu_probe_device()
582 mutex_unlock(&group->mutex); in __iommu_probe_device()
587 list_del(&gdev->list); in __iommu_probe_device()
588 __iommu_group_free_device(group, gdev); in __iommu_probe_device()
591 mutex_unlock(&group->mutex); in __iommu_probe_device()
592 iommu_group_put(group); in __iommu_probe_device()
609 if (ops->probe_finalize) in iommu_probe_device()
610 ops->probe_finalize(dev); in iommu_probe_device()
615 static void __iommu_group_free_device(struct iommu_group *group, in __iommu_group_free_device() argument
618 struct device *dev = grp_dev->dev; in __iommu_group_free_device()
620 sysfs_remove_link(group->devices_kobj, grp_dev->name); in __iommu_group_free_device()
621 sysfs_remove_link(&dev->kobj, "iommu_group"); in __iommu_group_free_device()
623 trace_remove_device_from_group(group->id, dev); in __iommu_group_free_device()
626 * If the group has become empty then ownership must have been in __iommu_group_free_device()
630 if (list_empty(&group->devices)) in __iommu_group_free_device()
631 WARN_ON(group->owner_cnt || in __iommu_group_free_device()
632 group->domain != group->default_domain); in __iommu_group_free_device()
634 kfree(grp_dev->name); in __iommu_group_free_device()
641 struct iommu_group *group = dev->iommu_group; in __iommu_group_remove_device() local
644 mutex_lock(&group->mutex); in __iommu_group_remove_device()
645 for_each_group_device(group, device) { in __iommu_group_remove_device()
646 if (device->dev != dev) in __iommu_group_remove_device()
649 list_del(&device->list); in __iommu_group_remove_device()
650 __iommu_group_free_device(group, device); in __iommu_group_remove_device()
654 dev->iommu_group = NULL; in __iommu_group_remove_device()
657 mutex_unlock(&group->mutex); in __iommu_group_remove_device()
663 iommu_group_put(group); in __iommu_group_remove_device()
668 struct iommu_group *group = dev->iommu_group; in iommu_release_device() local
670 if (group) in iommu_release_device()
674 if (dev->iommu) in iommu_release_device()
717 struct iommu_group *group = to_iommu_group(kobj); in iommu_group_attr_show() local
718 ssize_t ret = -EIO; in iommu_group_attr_show()
720 if (attr->show) in iommu_group_attr_show()
721 ret = attr->show(group, buf); in iommu_group_attr_show()
727 const char *buf, size_t count) in iommu_group_attr_store() argument
730 struct iommu_group *group = to_iommu_group(kobj); in iommu_group_attr_store() local
731 ssize_t ret = -EIO; in iommu_group_attr_store()
733 if (attr->store) in iommu_group_attr_store()
734 ret = attr->store(group, buf, count); in iommu_group_attr_store()
743 static int iommu_group_create_file(struct iommu_group *group, in iommu_group_create_file() argument
746 return sysfs_create_file(&group->kobj, &attr->attr); in iommu_group_create_file()
749 static void iommu_group_remove_file(struct iommu_group *group, in iommu_group_remove_file() argument
752 sysfs_remove_file(&group->kobj, &attr->attr); in iommu_group_remove_file()
755 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) in iommu_group_show_name() argument
757 return sysfs_emit(buf, "%s\n", group->name); in iommu_group_show_name()
761 * iommu_insert_resv_region - Insert a new region in the
775 nr = iommu_alloc_resv_region(new->start, new->length, in iommu_insert_resv_region()
776 new->prot, new->type, GFP_KERNEL); in iommu_insert_resv_region()
778 return -ENOMEM; in iommu_insert_resv_region()
782 if (nr->start < iter->start || in iommu_insert_resv_region()
783 (nr->start == iter->start && nr->type <= iter->type)) in iommu_insert_resv_region()
786 list_add_tail(&nr->list, &iter->list); in iommu_insert_resv_region()
788 /* Merge overlapping segments of type nr->type in @regions, if any */ in iommu_insert_resv_region()
790 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; in iommu_insert_resv_region()
793 if (iter->type != new->type) { in iommu_insert_resv_region()
794 list_move_tail(&iter->list, &stack); in iommu_insert_resv_region()
800 if (top->type == iter->type) in iommu_insert_resv_region()
803 list_move_tail(&iter->list, &stack); in iommu_insert_resv_region()
807 top_end = top->start + top->length - 1; in iommu_insert_resv_region()
809 if (iter->start > top_end + 1) { in iommu_insert_resv_region()
810 list_move_tail(&iter->list, &stack); in iommu_insert_resv_region()
812 top->length = max(top_end, iter_end) - top->start + 1; in iommu_insert_resv_region()
813 list_del(&iter->list); in iommu_insert_resv_region()
836 int iommu_get_group_resv_regions(struct iommu_group *group, in iommu_get_group_resv_regions() argument
842 mutex_lock(&group->mutex); in iommu_get_group_resv_regions()
843 for_each_group_device(group, device) { in iommu_get_group_resv_regions()
847 * Non-API groups still expose reserved_regions in sysfs, in iommu_get_group_resv_regions()
850 if (!dev_has_iommu(device->dev)) in iommu_get_group_resv_regions()
854 iommu_get_resv_regions(device->dev, &dev_resv_regions); in iommu_get_group_resv_regions()
856 iommu_put_resv_regions(device->dev, &dev_resv_regions); in iommu_get_group_resv_regions()
860 mutex_unlock(&group->mutex); in iommu_get_group_resv_regions()
865 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, in iommu_group_show_resv_regions() argument
873 iommu_get_group_resv_regions(group, &group_resv_regions); in iommu_group_show_resv_regions()
877 (long long)region->start, in iommu_group_show_resv_regions()
878 (long long)(region->start + in iommu_group_show_resv_regions()
879 region->length - 1), in iommu_group_show_resv_regions()
880 iommu_group_resv_type_string[region->type]); in iommu_group_show_resv_regions()
887 static ssize_t iommu_group_show_type(struct iommu_group *group, in iommu_group_show_type() argument
892 mutex_lock(&group->mutex); in iommu_group_show_type()
893 if (group->default_domain) { in iommu_group_show_type()
894 switch (group->default_domain->type) { in iommu_group_show_type()
908 type = "DMA-FQ"; in iommu_group_show_type()
912 mutex_unlock(&group->mutex); in iommu_group_show_type()
927 struct iommu_group *group = to_iommu_group(kobj); in iommu_group_release() local
929 pr_debug("Releasing group %d\n", group->id); in iommu_group_release()
931 if (group->iommu_data_release) in iommu_group_release()
932 group->iommu_data_release(group->iommu_data); in iommu_group_release()
934 ida_free(&iommu_group_ida, group->id); in iommu_group_release()
937 WARN_ON(group->default_domain); in iommu_group_release()
938 WARN_ON(group->blocking_domain); in iommu_group_release()
940 kfree(group->name); in iommu_group_release()
941 kfree(group); in iommu_group_release()
950 * iommu_group_alloc - Allocate a new group
953 * group. The iommu group represents the minimum granularity of the iommu.
955 * group in order to hold the group until devices are added. Use
956 * iommu_group_put() to release this extra reference count, allowing the
957 * group to be automatically reclaimed once it has no devices or external
962 struct iommu_group *group; in iommu_group_alloc() local
965 group = kzalloc(sizeof(*group), GFP_KERNEL); in iommu_group_alloc()
966 if (!group) in iommu_group_alloc()
967 return ERR_PTR(-ENOMEM); in iommu_group_alloc()
969 group->kobj.kset = iommu_group_kset; in iommu_group_alloc()
970 mutex_init(&group->mutex); in iommu_group_alloc()
971 INIT_LIST_HEAD(&group->devices); in iommu_group_alloc()
972 INIT_LIST_HEAD(&group->entry); in iommu_group_alloc()
973 xa_init(&group->pasid_array); in iommu_group_alloc()
977 kfree(group); in iommu_group_alloc()
980 group->id = ret; in iommu_group_alloc()
982 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, in iommu_group_alloc()
983 NULL, "%d", group->id); in iommu_group_alloc()
985 kobject_put(&group->kobj); in iommu_group_alloc()
989 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); in iommu_group_alloc()
990 if (!group->devices_kobj) { in iommu_group_alloc()
991 kobject_put(&group->kobj); /* triggers .release & free */ in iommu_group_alloc()
992 return ERR_PTR(-ENOMEM); in iommu_group_alloc()
996 * The devices_kobj holds a reference on the group kobject, so in iommu_group_alloc()
997 * as long as that exists so will the group. We can therefore in iommu_group_alloc()
1000 kobject_put(&group->kobj); in iommu_group_alloc()
1002 ret = iommu_group_create_file(group, in iommu_group_alloc()
1005 kobject_put(group->devices_kobj); in iommu_group_alloc()
1009 ret = iommu_group_create_file(group, &iommu_group_attr_type); in iommu_group_alloc()
1011 kobject_put(group->devices_kobj); in iommu_group_alloc()
1015 pr_debug("Allocated group %d\n", group->id); in iommu_group_alloc()
1017 return group; in iommu_group_alloc()
1022 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
1023 * @group: the group
1025 * iommu drivers can store data in the group for use when doing iommu
1027 * should hold a group reference.
1029 void *iommu_group_get_iommudata(struct iommu_group *group) in iommu_group_get_iommudata() argument
1031 return group->iommu_data; in iommu_group_get_iommudata()
1036 * iommu_group_set_iommudata - set iommu_data for a group
1037 * @group: the group
1041 * iommu drivers can store data in the group for use when doing iommu
1043 * the group has been allocated. Caller should hold a group reference.
1045 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, in iommu_group_set_iommudata() argument
1048 group->iommu_data = iommu_data; in iommu_group_set_iommudata()
1049 group->iommu_data_release = release; in iommu_group_set_iommudata()
1054 * iommu_group_set_name - set name for a group
1055 * @group: the group
1058 * Allow iommu driver to set a name for a group. When set it will
1059 * appear in a name attribute file under the group in sysfs.
1061 int iommu_group_set_name(struct iommu_group *group, const char *name) in iommu_group_set_name() argument
1065 if (group->name) { in iommu_group_set_name()
1066 iommu_group_remove_file(group, &iommu_group_attr_name); in iommu_group_set_name()
1067 kfree(group->name); in iommu_group_set_name()
1068 group->name = NULL; in iommu_group_set_name()
1073 group->name = kstrdup(name, GFP_KERNEL); in iommu_group_set_name()
1074 if (!group->name) in iommu_group_set_name()
1075 return -ENOMEM; in iommu_group_set_name()
1077 ret = iommu_group_create_file(group, &iommu_group_attr_name); in iommu_group_set_name()
1079 kfree(group->name); in iommu_group_set_name()
1080 group->name = NULL; in iommu_group_set_name()
1096 pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0; in iommu_create_device_direct_mappings()
1100 return -EINVAL; in iommu_create_device_direct_mappings()
1109 if (entry->type == IOMMU_RESV_DIRECT) in iommu_create_device_direct_mappings()
1110 dev->iommu->require_direct = 1; in iommu_create_device_direct_mappings()
1112 if ((entry->type != IOMMU_RESV_DIRECT && in iommu_create_device_direct_mappings()
1113 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) || in iommu_create_device_direct_mappings()
1117 start = ALIGN(entry->start, pg_size); in iommu_create_device_direct_mappings()
1118 end = ALIGN(entry->start + entry->length, pg_size); in iommu_create_device_direct_mappings()
1134 ret = iommu_map(domain, addr - map_size, in iommu_create_device_direct_mappings()
1135 addr - map_size, map_size, in iommu_create_device_direct_mappings()
1136 entry->prot, GFP_KERNEL); in iommu_create_device_direct_mappings()
1155 static struct group_device *iommu_group_alloc_device(struct iommu_group *group, in iommu_group_alloc_device() argument
1163 return ERR_PTR(-ENOMEM); in iommu_group_alloc_device()
1165 device->dev = dev; in iommu_group_alloc_device()
1167 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); in iommu_group_alloc_device()
1171 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); in iommu_group_alloc_device()
1173 if (!device->name) { in iommu_group_alloc_device()
1174 ret = -ENOMEM; in iommu_group_alloc_device()
1178 ret = sysfs_create_link_nowarn(group->devices_kobj, in iommu_group_alloc_device()
1179 &dev->kobj, device->name); in iommu_group_alloc_device()
1181 if (ret == -EEXIST && i >= 0) { in iommu_group_alloc_device()
1186 kfree(device->name); in iommu_group_alloc_device()
1187 device->name = kasprintf(GFP_KERNEL, "%s.%d", in iommu_group_alloc_device()
1188 kobject_name(&dev->kobj), i++); in iommu_group_alloc_device()
1194 trace_add_device_to_group(group->id, dev); in iommu_group_alloc_device()
1196 dev_info(dev, "Adding to iommu group %d\n", group->id); in iommu_group_alloc_device()
1201 kfree(device->name); in iommu_group_alloc_device()
1203 sysfs_remove_link(&dev->kobj, "iommu_group"); in iommu_group_alloc_device()
1206 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); in iommu_group_alloc_device()
1211 * iommu_group_add_device - add a device to an iommu group
1212 * @group: the group into which to add the device (reference should be held)
1216 * group. Adding a device increments the group reference count.
1218 int iommu_group_add_device(struct iommu_group *group, struct device *dev) in iommu_group_add_device() argument
1222 gdev = iommu_group_alloc_device(group, dev); in iommu_group_add_device()
1226 iommu_group_ref_get(group); in iommu_group_add_device()
1227 dev->iommu_group = group; in iommu_group_add_device()
1229 mutex_lock(&group->mutex); in iommu_group_add_device()
1230 list_add_tail(&gdev->list, &group->devices); in iommu_group_add_device()
1231 mutex_unlock(&group->mutex); in iommu_group_add_device()
1237 * iommu_group_remove_device - remove a device from it's current group
1241 * it's current group. This decrements the iommu group reference count.
1245 struct iommu_group *group = dev->iommu_group; in iommu_group_remove_device() local
1247 if (!group) in iommu_group_remove_device()
1250 dev_info(dev, "Removing from iommu group %d\n", group->id); in iommu_group_remove_device()
1258 * iommu_group_mutex_assert - Check device group mutex lock
1259 * @dev: the device that has group param set
1262 * group mutex lock for the given device or not.
1264 * Note that this function must be called after device group param is set.
1268 struct iommu_group *group = dev->iommu_group; in iommu_group_mutex_assert() local
1270 lockdep_assert_held(&group->mutex); in iommu_group_mutex_assert()
1275 static struct device *iommu_group_first_dev(struct iommu_group *group) in iommu_group_first_dev() argument
1277 lockdep_assert_held(&group->mutex); in iommu_group_first_dev()
1278 return list_first_entry(&group->devices, struct group_device, list)->dev; in iommu_group_first_dev()
1282 * iommu_group_for_each_dev - iterate over each device in the group
1283 * @group: the group
1287 * This function is called by group users to iterate over group devices.
1288 * Callers should hold a reference count to the group during callback.
1289 * The group->mutex is held across callbacks, which will block calls to
1292 int iommu_group_for_each_dev(struct iommu_group *group, void *data, in iommu_group_for_each_dev() argument
1298 mutex_lock(&group->mutex); in iommu_group_for_each_dev()
1299 for_each_group_device(group, device) { in iommu_group_for_each_dev()
1300 ret = fn(device->dev, data); in iommu_group_for_each_dev()
1304 mutex_unlock(&group->mutex); in iommu_group_for_each_dev()
1311 * iommu_group_get - Return the group for a device and increment reference
1312 * @dev: get the group that this device belongs to
1314 * This function is called by iommu drivers and users to get the group
1315 * for the specified device. If found, the group is returned and the group
1320 struct iommu_group *group = dev->iommu_group; in iommu_group_get() local
1322 if (group) in iommu_group_get()
1323 kobject_get(group->devices_kobj); in iommu_group_get()
1325 return group; in iommu_group_get()
1330 * iommu_group_ref_get - Increment reference on a group
1331 * @group: the group to use, must not be NULL
1334 * existing group. Returns the given group for convenience.
1336 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) in iommu_group_ref_get() argument
1338 kobject_get(group->devices_kobj); in iommu_group_ref_get()
1339 return group; in iommu_group_ref_get()
1344 * iommu_group_put - Decrement group reference
1345 * @group: the group to use
1348 * iommu group. Once the reference count is zero, the group is released.
1350 void iommu_group_put(struct iommu_group *group) in iommu_group_put() argument
1352 if (group) in iommu_group_put()
1353 kobject_put(group->devices_kobj); in iommu_group_put()
1358 * iommu_group_id - Return ID for a group
1359 * @group: the group to ID
1361 * Return the unique ID for the group matching the sysfs group number.
1363 int iommu_group_id(struct iommu_group *group) in iommu_group_id() argument
1365 return group->id; in iommu_group_id()
1384 * all the other non-isolated functions and look for existing groups. For
1386 * that may already have a group.
1392 struct iommu_group *group; in get_pci_function_alias_group() local
1394 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) in get_pci_function_alias_group()
1398 if (tmp == pdev || tmp->bus != pdev->bus || in get_pci_function_alias_group()
1399 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || in get_pci_function_alias_group()
1403 group = get_pci_alias_group(tmp, devfns); in get_pci_function_alias_group()
1404 if (group) { in get_pci_function_alias_group()
1406 return group; in get_pci_function_alias_group()
1418 * downstream switch ports). It's conceivable though that a pair of
1426 struct iommu_group *group; in get_pci_alias_group() local
1428 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) in get_pci_alias_group()
1431 group = iommu_group_get(&pdev->dev); in get_pci_alias_group()
1432 if (group) in get_pci_alias_group()
1433 return group; in get_pci_alias_group()
1436 if (tmp == pdev || tmp->bus != pdev->bus) in get_pci_alias_group()
1441 group = get_pci_alias_group(tmp, devfns); in get_pci_alias_group()
1442 if (group) { in get_pci_alias_group()
1444 return group; in get_pci_alias_group()
1447 group = get_pci_function_alias_group(tmp, devfns); in get_pci_alias_group()
1448 if (group) { in get_pci_alias_group()
1450 return group; in get_pci_alias_group()
1460 struct iommu_group *group; member
1465 * the IOMMU group if we find one along the way.
1471 data->pdev = pdev; in get_pci_alias_or_group()
1472 data->group = iommu_group_get(&pdev->dev); in get_pci_alias_or_group()
1474 return data->group != NULL; in get_pci_alias_or_group()
1478 * Generic device_group call-back function. It just allocates one
1479 * iommu-group per device.
1488 * Generic device_group call-back function. It just allocates one
1489 * iommu-group per iommu driver instance shared by every device
1494 struct iommu_device *iommu = dev->iommu->iommu_dev; in generic_single_device_group()
1496 if (!iommu->singleton_group) { in generic_single_device_group()
1497 struct iommu_group *group; in generic_single_device_group() local
1499 group = iommu_group_alloc(); in generic_single_device_group()
1500 if (IS_ERR(group)) in generic_single_device_group()
1501 return group; in generic_single_device_group()
1502 iommu->singleton_group = group; in generic_single_device_group()
1504 return iommu_group_ref_get(iommu->singleton_group); in generic_single_device_group()
1510 * to find or create an IOMMU group for a device.
1517 struct iommu_group *group = NULL; in pci_device_group() local
1521 return ERR_PTR(-EINVAL); in pci_device_group()
1525 * be aliased due to topology in order to have its own IOMMU group. in pci_device_group()
1527 * group, use it. in pci_device_group()
1530 return data.group; in pci_device_group()
1537 * peer-to-peer DMA by PCI ACS. Again, if we find an existing in pci_device_group()
1538 * group, use it. in pci_device_group()
1540 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { in pci_device_group()
1541 if (!bus->self) in pci_device_group()
1544 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) in pci_device_group()
1547 pdev = bus->self; in pci_device_group()
1549 group = iommu_group_get(&pdev->dev); in pci_device_group()
1550 if (group) in pci_device_group()
1551 return group; in pci_device_group()
1556 * device or another device aliases us, use the same group. in pci_device_group()
1558 group = get_pci_alias_group(pdev, (unsigned long *)devfns); in pci_device_group()
1559 if (group) in pci_device_group()
1560 return group; in pci_device_group()
1563 * Look for existing groups on non-isolated functions on the same in pci_device_group()
1567 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); in pci_device_group()
1568 if (group) in pci_device_group()
1569 return group; in pci_device_group()
1571 /* No shared group found, allocate new */ in pci_device_group()
1576 /* Get the IOMMU group for device on fsl-mc bus */
1580 struct iommu_group *group; in fsl_mc_device_group() local
1582 group = iommu_group_get(cont_dev); in fsl_mc_device_group()
1583 if (!group) in fsl_mc_device_group()
1584 group = iommu_group_alloc(); in fsl_mc_device_group()
1585 return group; in fsl_mc_device_group()
1590 __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) in __iommu_group_alloc_default_domain() argument
1592 if (group->default_domain && group->default_domain->type == req_type) in __iommu_group_alloc_default_domain()
1593 return group->default_domain; in __iommu_group_alloc_default_domain()
1594 return __iommu_group_domain_alloc(group, req_type); in __iommu_group_alloc_default_domain()
1602 iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) in iommu_group_alloc_default_domain() argument
1604 const struct iommu_ops *ops = dev_iommu_ops(iommu_group_first_dev(group)); in iommu_group_alloc_default_domain()
1607 lockdep_assert_held(&group->mutex); in iommu_group_alloc_default_domain()
1614 if (ops->default_domain) { in iommu_group_alloc_default_domain()
1615 if (req_type != ops->default_domain->type) in iommu_group_alloc_default_domain()
1616 return ERR_PTR(-EINVAL); in iommu_group_alloc_default_domain()
1617 return ops->default_domain; in iommu_group_alloc_default_domain()
1621 return __iommu_group_alloc_default_domain(group, req_type); in iommu_group_alloc_default_domain()
1624 dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type); in iommu_group_alloc_default_domain()
1630 return ERR_PTR(-EINVAL); in iommu_group_alloc_default_domain()
1631 dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA); in iommu_group_alloc_default_domain()
1635 …pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_D… in iommu_group_alloc_default_domain()
1636 iommu_def_domain_type, group->name); in iommu_group_alloc_default_domain()
1640 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) in iommu_group_default_domain() argument
1642 return group->default_domain; in iommu_group_default_domain()
1653 if (ret == -ENODEV) in probe_iommu_group()
1679 * group. Drivers must give a consistent result.
1681 static int iommu_get_def_domain_type(struct iommu_group *group, in iommu_get_def_domain_type() argument
1687 if (ops->default_domain) { in iommu_get_def_domain_type()
1692 type = ops->default_domain->type; in iommu_get_def_domain_type()
1694 if (ops->def_domain_type) in iommu_get_def_domain_type()
1695 type = ops->def_domain_type(dev); in iommu_get_def_domain_type()
1706 …"IOMMU driver error, requesting conflicting def_domain_type, %s and %s, for devices in group %u.\n… in iommu_get_def_domain_type()
1708 group->id); in iommu_get_def_domain_type()
1723 static int iommu_get_default_domain_type(struct iommu_group *group, in iommu_get_default_domain_type() argument
1730 lockdep_assert_held(&group->mutex); in iommu_get_default_domain_type()
1744 for_each_group_device(group, gdev) { in iommu_get_default_domain_type()
1745 driver_type = iommu_get_def_domain_type(group, gdev->dev, in iommu_get_default_domain_type()
1748 if (dev_is_pci(gdev->dev) && to_pci_dev(gdev->dev)->untrusted) { in iommu_get_default_domain_type()
1754 return -1; in iommu_get_default_domain_type()
1755 untrusted = gdev->dev; in iommu_get_default_domain_type()
1766 return -1; in iommu_get_default_domain_type()
1775 "Device is not trusted, but driver is overriding group %u to %s, refusing to probe.\n", in iommu_get_default_domain_type()
1776 group->id, iommu_domain_type_str(driver_type)); in iommu_get_default_domain_type()
1777 return -1; in iommu_get_default_domain_type()
1784 return -1; in iommu_get_default_domain_type()
1794 if (ops->probe_finalize) in iommu_group_do_probe_finalize()
1795 ops->probe_finalize(dev); in iommu_group_do_probe_finalize()
1800 struct iommu_group *group, *next; in bus_iommu_probe() local
1808 list_for_each_entry_safe(group, next, &group_list, entry) { in bus_iommu_probe()
1811 mutex_lock(&group->mutex); in bus_iommu_probe()
1814 list_del_init(&group->entry); in bus_iommu_probe()
1818 * that the cross-group default domain type and the setup of the in bus_iommu_probe()
1819 * IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios. in bus_iommu_probe()
1821 ret = iommu_setup_default_domain(group, 0); in bus_iommu_probe()
1823 mutex_unlock(&group->mutex); in bus_iommu_probe()
1826 for_each_group_device(group, gdev) in bus_iommu_probe()
1827 iommu_setup_dma_ops(gdev->dev); in bus_iommu_probe()
1828 mutex_unlock(&group->mutex); in bus_iommu_probe()
1831 * FIXME: Mis-locked because the ops->probe_finalize() call-back in bus_iommu_probe()
1833 * in-turn might call back into IOMMU core code, where it tries in bus_iommu_probe()
1834 * to take group->mutex, resulting in a deadlock. in bus_iommu_probe()
1836 for_each_group_device(group, gdev) in bus_iommu_probe()
1837 iommu_group_do_probe_finalize(gdev->dev); in bus_iommu_probe()
1844 * iommu_present() - make platform-specific assumptions about an IOMMU
1869 * device_iommu_capable() - check for a general IOMMU capability
1884 if (!ops->capable) in device_iommu_capable()
1887 return ops->capable(dev, cap); in device_iommu_capable()
1892 * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi()
1893 * for a group
1894 * @group: Group to query
1897 * msi_device_has_isolated_msi() for devices in a group. However nothing
1901 bool iommu_group_has_isolated_msi(struct iommu_group *group) in iommu_group_has_isolated_msi() argument
1906 mutex_lock(&group->mutex); in iommu_group_has_isolated_msi()
1907 for_each_group_device(group, group_dev) in iommu_group_has_isolated_msi()
1908 ret &= msi_device_has_isolated_msi(group_dev->dev); in iommu_group_has_isolated_msi()
1909 mutex_unlock(&group->mutex); in iommu_group_has_isolated_msi()
1915 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1932 domain->handler = handler; in iommu_set_fault_handler()
1933 domain->handler_token = token; in iommu_set_fault_handler()
1944 if (alloc_type == IOMMU_DOMAIN_IDENTITY && ops->identity_domain) in __iommu_domain_alloc()
1945 return ops->identity_domain; in __iommu_domain_alloc()
1946 else if (alloc_type == IOMMU_DOMAIN_BLOCKED && ops->blocked_domain) in __iommu_domain_alloc()
1947 return ops->blocked_domain; in __iommu_domain_alloc()
1948 else if (type & __IOMMU_DOMAIN_PAGING && ops->domain_alloc_paging) in __iommu_domain_alloc()
1949 domain = ops->domain_alloc_paging(dev); in __iommu_domain_alloc()
1950 else if (ops->domain_alloc) in __iommu_domain_alloc()
1951 domain = ops->domain_alloc(alloc_type); in __iommu_domain_alloc()
1953 return ERR_PTR(-EOPNOTSUPP); in __iommu_domain_alloc()
1963 return ERR_PTR(-ENOMEM); in __iommu_domain_alloc()
1965 domain->type = type; in __iommu_domain_alloc()
1966 domain->owner = ops; in __iommu_domain_alloc()
1971 if (!domain->pgsize_bitmap) in __iommu_domain_alloc()
1972 domain->pgsize_bitmap = ops->pgsize_bitmap; in __iommu_domain_alloc()
1974 if (!domain->ops) in __iommu_domain_alloc()
1975 domain->ops = ops->default_domain_ops; in __iommu_domain_alloc()
1990 __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type) in __iommu_group_domain_alloc() argument
1992 struct device *dev = iommu_group_first_dev(group); in __iommu_group_domain_alloc()
2007 return -EBUSY; in __iommu_domain_alloc_dev()
2034 * iommu_paging_domain_alloc() - Allocate a paging domain
2043 return ERR_PTR(-ENODEV); in iommu_paging_domain_alloc()
2051 if (domain->type == IOMMU_DOMAIN_SVA) in iommu_domain_free()
2052 mmdrop(domain->mm); in iommu_domain_free()
2054 if (domain->ops->free) in iommu_domain_free()
2055 domain->ops->free(domain); in iommu_domain_free()
2060 * Put the group's domain back to the appropriate core-owned domain - either the
2061 * standard kernel-mode DMA configuration or an all-DMA-blocked domain.
2063 static void __iommu_group_set_core_domain(struct iommu_group *group) in __iommu_group_set_core_domain() argument
2067 if (group->owner) in __iommu_group_set_core_domain()
2068 new_domain = group->blocking_domain; in __iommu_group_set_core_domain()
2070 new_domain = group->default_domain; in __iommu_group_set_core_domain()
2072 __iommu_group_set_domain_nofail(group, new_domain); in __iommu_group_set_core_domain()
2080 if (unlikely(domain->ops->attach_dev == NULL)) in __iommu_attach_device()
2081 return -ENODEV; in __iommu_attach_device()
2083 ret = domain->ops->attach_dev(domain, dev); in __iommu_attach_device()
2086 dev->iommu->attach_deferred = 0; in __iommu_attach_device()
2092 * iommu_attach_device - Attach an IOMMU domain to a device
2106 struct iommu_group *group = dev->iommu_group; in iommu_attach_device() local
2109 if (!group) in iommu_attach_device()
2110 return -ENODEV; in iommu_attach_device()
2113 * Lock the group to make sure the device-count doesn't in iommu_attach_device()
2116 mutex_lock(&group->mutex); in iommu_attach_device()
2117 ret = -EINVAL; in iommu_attach_device()
2118 if (list_count_nodes(&group->devices) != 1) in iommu_attach_device()
2121 ret = __iommu_attach_group(domain, group); in iommu_attach_device()
2124 mutex_unlock(&group->mutex); in iommu_attach_device()
2131 if (dev->iommu && dev->iommu->attach_deferred) in iommu_deferred_attach()
2140 struct iommu_group *group = dev->iommu_group; in iommu_detach_device() local
2142 if (!group) in iommu_detach_device()
2145 mutex_lock(&group->mutex); in iommu_detach_device()
2146 if (WARN_ON(domain != group->domain) || in iommu_detach_device()
2147 WARN_ON(list_count_nodes(&group->devices) != 1)) in iommu_detach_device()
2149 __iommu_group_set_core_domain(group); in iommu_detach_device()
2152 mutex_unlock(&group->mutex); in iommu_detach_device()
2159 struct iommu_group *group = dev->iommu_group; in iommu_get_domain_for_dev() local
2161 if (!group) in iommu_get_domain_for_dev()
2164 return group->domain; in iommu_get_domain_for_dev()
2170 * guarantees that the group and its default domain are valid and correct.
2174 return dev->iommu_group->default_domain; in iommu_get_dma_domain()
2178 struct iommu_group *group) in __iommu_attach_group() argument
2182 if (group->domain && group->domain != group->default_domain && in __iommu_attach_group()
2183 group->domain != group->blocking_domain) in __iommu_attach_group()
2184 return -EBUSY; in __iommu_attach_group()
2186 dev = iommu_group_first_dev(group); in __iommu_attach_group()
2187 if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner) in __iommu_attach_group()
2188 return -EINVAL; in __iommu_attach_group()
2190 return __iommu_group_set_domain(group, domain); in __iommu_attach_group()
2194 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group
2196 * @group: IOMMU group that will be attached
2202 * the group. In this case attaching a different domain to the
2203 * group may succeed.
2205 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_attach_group() argument
2209 mutex_lock(&group->mutex); in iommu_attach_group()
2210 ret = __iommu_attach_group(domain, group); in iommu_attach_group()
2211 mutex_unlock(&group->mutex); in iommu_attach_group()
2218 * iommu_group_replace_domain - replace the domain that a group is attached to
2220 * @group: IOMMU group that will be attached to the new domain
2222 * This API allows the group to switch domains without being forced to go to
2223 * the blocking domain in-between.
2228 int iommu_group_replace_domain(struct iommu_group *group, in iommu_group_replace_domain() argument
2234 return -EINVAL; in iommu_group_replace_domain()
2236 mutex_lock(&group->mutex); in iommu_group_replace_domain()
2237 ret = __iommu_group_set_domain(group, new_domain); in iommu_group_replace_domain()
2238 mutex_unlock(&group->mutex); in iommu_group_replace_domain()
2243 static int __iommu_device_set_domain(struct iommu_group *group, in __iommu_device_set_domain() argument
2254 * being used with iommu_group_claim_dma_owner() which will block in __iommu_device_set_domain()
2257 if (dev->iommu->require_direct && in __iommu_device_set_domain()
2258 (new_domain->type == IOMMU_DOMAIN_BLOCKED || in __iommu_device_set_domain()
2259 new_domain == group->blocking_domain)) { in __iommu_device_set_domain()
2262 return -EINVAL; in __iommu_device_set_domain()
2265 if (dev->iommu->attach_deferred) { in __iommu_device_set_domain()
2266 if (new_domain == group->default_domain) in __iommu_device_set_domain()
2268 dev->iommu->attach_deferred = 0; in __iommu_device_set_domain()
2279 group->blocking_domain && in __iommu_device_set_domain()
2280 group->blocking_domain != new_domain) in __iommu_device_set_domain()
2281 __iommu_attach_device(group->blocking_domain, dev); in __iommu_device_set_domain()
2288 * If 0 is returned the group's domain is new_domain. If an error is returned
2289 * then the group's domain will be set back to the existing domain unless
2290 * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's
2296 * devices in a group. Ideally we'd have a single device which represents the
2297 * requestor ID of the group, but we also allow IOMMU drivers to create policy
2299 * members, but we wish to group them at a higher level (ex. untrusted
2300 * multi-function PCI devices). Thus we attach each device.
2302 static int __iommu_group_set_domain_internal(struct iommu_group *group, in __iommu_group_set_domain_internal() argument
2311 lockdep_assert_held(&group->mutex); in __iommu_group_set_domain_internal()
2313 if (group->domain == new_domain) in __iommu_group_set_domain_internal()
2317 return -EINVAL; in __iommu_group_set_domain_internal()
2323 * either new_domain or group->domain, never something else. in __iommu_group_set_domain_internal()
2326 for_each_group_device(group, gdev) { in __iommu_group_set_domain_internal()
2327 ret = __iommu_device_set_domain(group, gdev->dev, new_domain, in __iommu_group_set_domain_internal()
2332 * Keep trying the other devices in the group. If a in __iommu_group_set_domain_internal()
2343 group->domain = new_domain; in __iommu_group_set_domain_internal()
2352 for_each_group_device(group, gdev) { in __iommu_group_set_domain_internal()
2355 * we leave group->domain as NULL and let release clean in __iommu_group_set_domain_internal()
2358 if (group->domain) in __iommu_group_set_domain_internal()
2360 group, gdev->dev, group->domain, in __iommu_group_set_domain_internal()
2368 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_detach_group() argument
2370 mutex_lock(&group->mutex); in iommu_detach_group()
2371 __iommu_group_set_core_domain(group); in iommu_detach_group()
2372 mutex_unlock(&group->mutex); in iommu_detach_group()
2378 if (domain->type == IOMMU_DOMAIN_IDENTITY) in iommu_iova_to_phys()
2381 if (domain->type == IOMMU_DOMAIN_BLOCKED) in iommu_iova_to_phys()
2384 return domain->ops->iova_to_phys(domain, iova); in iommu_iova_to_phys()
2389 phys_addr_t paddr, size_t size, size_t *count) in iommu_pgsize() argument
2397 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); in iommu_pgsize()
2409 if (!count) in iommu_pgsize()
2413 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); in iommu_pgsize()
2424 if ((iova ^ paddr) & (pgsize_next - 1)) in iommu_pgsize()
2428 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); in iommu_pgsize()
2438 *count = size >> pgsize_idx; in iommu_pgsize()
2445 const struct iommu_domain_ops *ops = domain->ops; in __iommu_map()
2452 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) in __iommu_map()
2453 return -EINVAL; in __iommu_map()
2455 if (WARN_ON(!ops->map_pages || domain->pgsize_bitmap == 0UL)) in __iommu_map()
2456 return -ENODEV; in __iommu_map()
2459 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); in __iommu_map()
2469 return -EINVAL; in __iommu_map()
2475 size_t pgsize, count, mapped = 0; in __iommu_map() local
2477 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); in __iommu_map()
2479 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", in __iommu_map()
2480 iova, &paddr, pgsize, count); in __iommu_map()
2481 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, in __iommu_map()
2487 size -= mapped; in __iommu_map()
2498 iommu_unmap(domain, orig_iova, orig_size - size); in __iommu_map()
2508 const struct iommu_domain_ops *ops = domain->ops; in iommu_map()
2516 return -EINVAL; in iommu_map()
2519 if (ret == 0 && ops->iotlb_sync_map) { in iommu_map()
2520 ret = ops->iotlb_sync_map(domain, iova, size); in iommu_map()
2539 const struct iommu_domain_ops *ops = domain->ops; in __iommu_unmap()
2544 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) in __iommu_unmap()
2547 if (WARN_ON(!ops->unmap_pages || domain->pgsize_bitmap == 0UL)) in __iommu_unmap()
2551 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); in __iommu_unmap()
2571 size_t pgsize, count; in __iommu_unmap() local
2573 pgsize = iommu_pgsize(domain, iova, iova, size - unmapped, &count); in __iommu_unmap()
2574 unmapped_page = ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather); in __iommu_unmap()
2615 const struct iommu_domain_ops *ops = domain->ops; in iommu_map_sg()
2626 return -EINVAL; in iommu_map_sg()
2646 len += sg->length; in iommu_map_sg()
2648 len = sg->length; in iommu_map_sg()
2657 if (ops->iotlb_sync_map) { in iommu_map_sg()
2658 ret = ops->iotlb_sync_map(domain, iova, mapped); in iommu_map_sg()
2673 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2679 * This function should be called by the low-level IOMMU implementations
2680 * whenever IOMMU faults happen, to allow high-level users, that are
2684 * - mere logging of the event
2685 * - dynamic TLB/PTE loading
2686 * - if restarting of the faulting device is required
2692 * Specifically, -ENOSYS is returned if a fault handler isn't installed
2693 * (though fault handlers can also return -ENOSYS, in case they want to
2699 int ret = -ENOSYS; in report_iommu_fault()
2705 if (domain->handler) in report_iommu_fault()
2706 ret = domain->handler(domain, dev, iova, flags, in report_iommu_fault()
2707 domain->handler_token); in report_iommu_fault()
2728 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in iommu_enable_nesting()
2729 return -EINVAL; in iommu_enable_nesting()
2730 if (!domain->ops->enable_nesting) in iommu_enable_nesting()
2731 return -EINVAL; in iommu_enable_nesting()
2732 return domain->ops->enable_nesting(domain); in iommu_enable_nesting()
2739 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in iommu_set_pgtable_quirks()
2740 return -EINVAL; in iommu_set_pgtable_quirks()
2741 if (!domain->ops->set_pgtable_quirks) in iommu_set_pgtable_quirks()
2742 return -EINVAL; in iommu_set_pgtable_quirks()
2743 return domain->ops->set_pgtable_quirks(domain, quirk); in iommu_set_pgtable_quirks()
2748 * iommu_get_resv_regions - get reserved regions
2759 if (ops->get_resv_regions) in iommu_get_resv_regions()
2760 ops->get_resv_regions(dev, list); in iommu_get_resv_regions()
2765 * iommu_put_resv_regions - release reserved regions
2776 if (entry->free) in iommu_put_resv_regions()
2777 entry->free(dev, entry); in iommu_put_resv_regions()
2795 INIT_LIST_HEAD(®ion->list); in iommu_alloc_resv_region()
2796 region->start = start; in iommu_alloc_resv_region()
2797 region->length = length; in iommu_alloc_resv_region()
2798 region->prot = prot; in iommu_alloc_resv_region()
2799 region->type = type; in iommu_alloc_resv_region()
2831 if (iommu->fwnode == fwnode) { in iommu_ops_from_fwnode()
2832 ops = iommu->ops; in iommu_ops_from_fwnode()
2845 return -EPROBE_DEFER; in iommu_fwspec_init()
2848 return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL; in iommu_fwspec_init()
2851 return -ENOMEM; in iommu_fwspec_init()
2856 return -ENOMEM; in iommu_fwspec_init()
2859 fwspec->iommu_fwnode = iommu_fwnode; in iommu_fwspec_init()
2870 fwnode_handle_put(fwspec->iommu_fwnode); in iommu_fwspec_free()
2883 return -EINVAL; in iommu_fwspec_add_ids()
2885 new_num = fwspec->num_ids + num_ids; in iommu_fwspec_add_ids()
2890 return -ENOMEM; in iommu_fwspec_add_ids()
2896 fwspec->ids[fwspec->num_ids + i] = ids[i]; in iommu_fwspec_add_ids()
2898 fwspec->num_ids = new_num; in iommu_fwspec_add_ids()
2911 if (ops->dev_enable_feat) in iommu_dev_enable_feature()
2912 return ops->dev_enable_feat(dev, feat); in iommu_dev_enable_feature()
2915 return -ENODEV; in iommu_dev_enable_feature()
2927 if (ops->dev_disable_feat) in iommu_dev_disable_feature()
2928 return ops->dev_disable_feat(dev, feat); in iommu_dev_disable_feature()
2931 return -EBUSY; in iommu_dev_disable_feature()
2936 * iommu_setup_default_domain - Set the default_domain for the group
2937 * @group: Group to change
2940 * Allocate a default domain and set it as the current domain on the group. If
2941 * the group already has a default domain it will be changed to the target_type.
2945 static int iommu_setup_default_domain(struct iommu_group *group, in iommu_setup_default_domain() argument
2948 struct iommu_domain *old_dom = group->default_domain; in iommu_setup_default_domain()
2955 lockdep_assert_held(&group->mutex); in iommu_setup_default_domain()
2957 req_type = iommu_get_default_domain_type(group, target_type); in iommu_setup_default_domain()
2959 return -EINVAL; in iommu_setup_default_domain()
2961 dom = iommu_group_alloc_default_domain(group, req_type); in iommu_setup_default_domain()
2965 if (group->default_domain == dom) in iommu_setup_default_domain()
2974 for_each_group_device(group, gdev) { in iommu_setup_default_domain()
2975 if (iommu_create_device_direct_mappings(dom, gdev->dev)) { in iommu_setup_default_domain()
2978 gdev->dev->iommu->iommu_dev->dev, in iommu_setup_default_domain()
2984 group->default_domain = dom; in iommu_setup_default_domain()
2985 if (!group->domain) { in iommu_setup_default_domain()
2989 * iommu driver and call ops->release_device. Put the domain in iommu_setup_default_domain()
2990 * in group->default_domain so it is freed after. in iommu_setup_default_domain()
2993 group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); in iommu_setup_default_domain()
2997 ret = __iommu_group_set_domain(group, dom); in iommu_setup_default_domain()
3009 for_each_group_device(group, gdev) { in iommu_setup_default_domain()
3010 ret = iommu_create_device_direct_mappings(dom, gdev->dev); in iommu_setup_default_domain()
3024 group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); in iommu_setup_default_domain()
3028 group->default_domain = old_dom; in iommu_setup_default_domain()
3035 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ
3039 * group->mutex is used here to guarantee that the device release path
3042 static ssize_t iommu_group_store_type(struct iommu_group *group, in iommu_group_store_type() argument
3043 const char *buf, size_t count) in iommu_group_store_type() argument
3049 return -EACCES; in iommu_group_store_type()
3051 if (WARN_ON(!group) || !group->default_domain) in iommu_group_store_type()
3052 return -EINVAL; in iommu_group_store_type()
3058 else if (sysfs_streq(buf, "DMA-FQ")) in iommu_group_store_type()
3063 return -EINVAL; in iommu_group_store_type()
3065 mutex_lock(&group->mutex); in iommu_group_store_type()
3068 group->default_domain->type == IOMMU_DOMAIN_DMA) { in iommu_group_store_type()
3069 ret = iommu_dma_init_fq(group->default_domain); in iommu_group_store_type()
3073 group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; in iommu_group_store_type()
3074 ret = count; in iommu_group_store_type()
3079 if (list_empty(&group->devices) || group->owner_cnt) { in iommu_group_store_type()
3080 ret = -EPERM; in iommu_group_store_type()
3084 ret = iommu_setup_default_domain(group, req_type); in iommu_group_store_type()
3089 for_each_group_device(group, gdev) in iommu_group_store_type()
3090 iommu_setup_dma_ops(gdev->dev); in iommu_group_store_type()
3093 mutex_unlock(&group->mutex); in iommu_group_store_type()
3094 return ret ?: count; in iommu_group_store_type()
3098 * iommu_device_use_default_domain() - Device driver wants to handle device
3107 /* Caller is the driver core during the pre-probe path */ in iommu_device_use_default_domain()
3108 struct iommu_group *group = dev->iommu_group; in iommu_device_use_default_domain() local
3111 if (!group) in iommu_device_use_default_domain()
3114 mutex_lock(&group->mutex); in iommu_device_use_default_domain()
3115 if (group->owner_cnt) { in iommu_device_use_default_domain()
3116 if (group->domain != group->default_domain || group->owner || in iommu_device_use_default_domain()
3117 !xa_empty(&group->pasid_array)) { in iommu_device_use_default_domain()
3118 ret = -EBUSY; in iommu_device_use_default_domain()
3123 group->owner_cnt++; in iommu_device_use_default_domain()
3126 mutex_unlock(&group->mutex); in iommu_device_use_default_domain()
3131 * iommu_device_unuse_default_domain() - Device driver stops handling device
3140 /* Caller is the driver core during the post-probe path */ in iommu_device_unuse_default_domain()
3141 struct iommu_group *group = dev->iommu_group; in iommu_device_unuse_default_domain() local
3143 if (!group) in iommu_device_unuse_default_domain()
3146 mutex_lock(&group->mutex); in iommu_device_unuse_default_domain()
3147 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) in iommu_device_unuse_default_domain()
3148 group->owner_cnt--; in iommu_device_unuse_default_domain()
3150 mutex_unlock(&group->mutex); in iommu_device_unuse_default_domain()
3153 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) in __iommu_group_alloc_blocking_domain() argument
3157 if (group->blocking_domain) in __iommu_group_alloc_blocking_domain()
3160 domain = __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED); in __iommu_group_alloc_blocking_domain()
3166 domain = __iommu_group_domain_alloc(group, in __iommu_group_alloc_blocking_domain()
3171 group->blocking_domain = domain; in __iommu_group_alloc_blocking_domain()
3175 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) in __iommu_take_dma_ownership() argument
3179 if ((group->domain && group->domain != group->default_domain) || in __iommu_take_dma_ownership()
3180 !xa_empty(&group->pasid_array)) in __iommu_take_dma_ownership()
3181 return -EBUSY; in __iommu_take_dma_ownership()
3183 ret = __iommu_group_alloc_blocking_domain(group); in __iommu_take_dma_ownership()
3186 ret = __iommu_group_set_domain(group, group->blocking_domain); in __iommu_take_dma_ownership()
3190 group->owner = owner; in __iommu_take_dma_ownership()
3191 group->owner_cnt++; in __iommu_take_dma_ownership()
3196 * iommu_group_claim_dma_owner() - Set DMA ownership of a group
3197 * @group: The group.
3202 * prohibited. Only a single owner may exist for a group.
3204 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) in iommu_group_claim_dma_owner() argument
3209 return -EINVAL; in iommu_group_claim_dma_owner()
3211 mutex_lock(&group->mutex); in iommu_group_claim_dma_owner()
3212 if (group->owner_cnt) { in iommu_group_claim_dma_owner()
3213 ret = -EPERM; in iommu_group_claim_dma_owner()
3217 ret = __iommu_take_dma_ownership(group, owner); in iommu_group_claim_dma_owner()
3219 mutex_unlock(&group->mutex); in iommu_group_claim_dma_owner()
3226 * iommu_device_claim_dma_owner() - Set DMA ownership of a device
3230 * Claim the DMA ownership of a device. Multiple devices in the same group may
3237 struct iommu_group *group = dev->iommu_group; in iommu_device_claim_dma_owner() local
3241 return -EINVAL; in iommu_device_claim_dma_owner()
3243 if (!group) in iommu_device_claim_dma_owner()
3244 return -ENODEV; in iommu_device_claim_dma_owner()
3246 mutex_lock(&group->mutex); in iommu_device_claim_dma_owner()
3247 if (group->owner_cnt) { in iommu_device_claim_dma_owner()
3248 if (group->owner != owner) { in iommu_device_claim_dma_owner()
3249 ret = -EPERM; in iommu_device_claim_dma_owner()
3252 group->owner_cnt++; in iommu_device_claim_dma_owner()
3256 ret = __iommu_take_dma_ownership(group, owner); in iommu_device_claim_dma_owner()
3258 mutex_unlock(&group->mutex); in iommu_device_claim_dma_owner()
3263 static void __iommu_release_dma_ownership(struct iommu_group *group) in __iommu_release_dma_ownership() argument
3265 if (WARN_ON(!group->owner_cnt || !group->owner || in __iommu_release_dma_ownership()
3266 !xa_empty(&group->pasid_array))) in __iommu_release_dma_ownership()
3269 group->owner_cnt = 0; in __iommu_release_dma_ownership()
3270 group->owner = NULL; in __iommu_release_dma_ownership()
3271 __iommu_group_set_domain_nofail(group, group->default_domain); in __iommu_release_dma_ownership()
3275 * iommu_group_release_dma_owner() - Release DMA ownership of a group
3276 * @group: The group
3280 void iommu_group_release_dma_owner(struct iommu_group *group) in iommu_group_release_dma_owner() argument
3282 mutex_lock(&group->mutex); in iommu_group_release_dma_owner()
3283 __iommu_release_dma_ownership(group); in iommu_group_release_dma_owner()
3284 mutex_unlock(&group->mutex); in iommu_group_release_dma_owner()
3289 * iommu_device_release_dma_owner() - Release DMA ownership of a device
3297 struct iommu_group *group = dev->iommu_group; in iommu_device_release_dma_owner() local
3299 mutex_lock(&group->mutex); in iommu_device_release_dma_owner()
3300 if (group->owner_cnt > 1) in iommu_device_release_dma_owner()
3301 group->owner_cnt--; in iommu_device_release_dma_owner()
3303 __iommu_release_dma_ownership(group); in iommu_device_release_dma_owner()
3304 mutex_unlock(&group->mutex); in iommu_device_release_dma_owner()
3309 * iommu_group_dma_owner_claimed() - Query group dma ownership status
3310 * @group: The group.
3312 * This provides status query on a given group. It is racy and only for
3313 * non-binding status reporting.
3315 bool iommu_group_dma_owner_claimed(struct iommu_group *group) in iommu_group_dma_owner_claimed() argument
3319 mutex_lock(&group->mutex); in iommu_group_dma_owner_claimed()
3320 user = group->owner_cnt; in iommu_group_dma_owner_claimed()
3321 mutex_unlock(&group->mutex); in iommu_group_dma_owner_claimed()
3328 struct iommu_group *group, ioasid_t pasid) in __iommu_set_group_pasid() argument
3333 for_each_group_device(group, device) { in __iommu_set_group_pasid()
3334 ret = domain->ops->set_dev_pasid(domain, device->dev, pasid); in __iommu_set_group_pasid()
3343 for_each_group_device(group, device) { in __iommu_set_group_pasid()
3344 const struct iommu_ops *ops = dev_iommu_ops(device->dev); in __iommu_set_group_pasid()
3348 ops->remove_dev_pasid(device->dev, pasid, domain); in __iommu_set_group_pasid()
3353 static void __iommu_remove_group_pasid(struct iommu_group *group, in __iommu_remove_group_pasid() argument
3360 for_each_group_device(group, device) { in __iommu_remove_group_pasid()
3361 ops = dev_iommu_ops(device->dev); in __iommu_remove_group_pasid()
3362 ops->remove_dev_pasid(device->dev, pasid, domain); in __iommu_remove_group_pasid()
3367 * iommu_attach_device_pasid() - Attach a domain to pasid of device
3380 struct iommu_group *group = dev->iommu_group; in iommu_attach_device_pasid() local
3384 if (!domain->ops->set_dev_pasid) in iommu_attach_device_pasid()
3385 return -EOPNOTSUPP; in iommu_attach_device_pasid()
3387 if (!group) in iommu_attach_device_pasid()
3388 return -ENODEV; in iommu_attach_device_pasid()
3390 if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner || in iommu_attach_device_pasid()
3392 return -EINVAL; in iommu_attach_device_pasid()
3394 mutex_lock(&group->mutex); in iommu_attach_device_pasid()
3395 for_each_group_device(group, device) { in iommu_attach_device_pasid()
3396 if (pasid >= device->dev->iommu->max_pasids) { in iommu_attach_device_pasid()
3397 ret = -EINVAL; in iommu_attach_device_pasid()
3403 handle->domain = domain; in iommu_attach_device_pasid()
3405 ret = xa_insert(&group->pasid_array, pasid, handle, GFP_KERNEL); in iommu_attach_device_pasid()
3409 ret = __iommu_set_group_pasid(domain, group, pasid); in iommu_attach_device_pasid()
3411 xa_erase(&group->pasid_array, pasid); in iommu_attach_device_pasid()
3413 mutex_unlock(&group->mutex); in iommu_attach_device_pasid()
3419 * iommu_detach_device_pasid() - Detach the domain from pasid of device
3431 struct iommu_group *group = dev->iommu_group; in iommu_detach_device_pasid() local
3433 mutex_lock(&group->mutex); in iommu_detach_device_pasid()
3434 __iommu_remove_group_pasid(group, pasid, domain); in iommu_detach_device_pasid()
3435 xa_erase(&group->pasid_array, pasid); in iommu_detach_device_pasid()
3436 mutex_unlock(&group->mutex); in iommu_detach_device_pasid()
3445 if (!dev->iommu->max_pasids) in iommu_alloc_global_pasid()
3453 dev->iommu->max_pasids - 1, GFP_KERNEL); in iommu_alloc_global_pasid()
3468 * iommu_attach_handle_get - Return the attach handle
3469 * @group: the iommu group that domain was attached to
3470 * @pasid: the pasid within the group
3473 * Return handle or ERR_PTR(-ENOENT) on none, ERR_PTR(-EBUSY) on mismatch.
3482 iommu_attach_handle_get(struct iommu_group *group, ioasid_t pasid, unsigned int type) in iommu_attach_handle_get() argument
3486 xa_lock(&group->pasid_array); in iommu_attach_handle_get()
3487 handle = xa_load(&group->pasid_array, pasid); in iommu_attach_handle_get()
3489 handle = ERR_PTR(-ENOENT); in iommu_attach_handle_get()
3490 else if (type && handle->domain->type != type) in iommu_attach_handle_get()
3491 handle = ERR_PTR(-EBUSY); in iommu_attach_handle_get()
3492 xa_unlock(&group->pasid_array); in iommu_attach_handle_get()
3499 * iommu_attach_group_handle - Attach an IOMMU domain to an IOMMU group
3501 * @group: IOMMU group that will be attached
3511 struct iommu_group *group, in iommu_attach_group_handle() argument
3517 handle->domain = domain; in iommu_attach_group_handle()
3519 mutex_lock(&group->mutex); in iommu_attach_group_handle()
3520 ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL); in iommu_attach_group_handle()
3524 ret = __iommu_attach_group(domain, group); in iommu_attach_group_handle()
3527 mutex_unlock(&group->mutex); in iommu_attach_group_handle()
3531 xa_erase(&group->pasid_array, IOMMU_NO_PASID); in iommu_attach_group_handle()
3533 mutex_unlock(&group->mutex); in iommu_attach_group_handle()
3539 * iommu_detach_group_handle - Detach an IOMMU domain from an IOMMU group
3541 * @group: IOMMU group that will be attached
3543 * Detach the specified IOMMU domain from the specified IOMMU group.
3547 struct iommu_group *group) in iommu_detach_group_handle() argument
3549 mutex_lock(&group->mutex); in iommu_detach_group_handle()
3550 __iommu_group_set_core_domain(group); in iommu_detach_group_handle()
3551 xa_erase(&group->pasid_array, IOMMU_NO_PASID); in iommu_detach_group_handle()
3552 mutex_unlock(&group->mutex); in iommu_detach_group_handle()
3557 * iommu_replace_group_handle - replace the domain that a group is attached to
3558 * @group: IOMMU group that will be attached to the new domain
3566 int iommu_replace_group_handle(struct iommu_group *group, in iommu_replace_group_handle() argument
3574 return -EINVAL; in iommu_replace_group_handle()
3576 mutex_lock(&group->mutex); in iommu_replace_group_handle()
3578 ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL); in iommu_replace_group_handle()
3581 handle->domain = new_domain; in iommu_replace_group_handle()
3584 ret = __iommu_group_set_domain(group, new_domain); in iommu_replace_group_handle()
3588 curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL); in iommu_replace_group_handle()
3591 mutex_unlock(&group->mutex); in iommu_replace_group_handle()
3595 xa_release(&group->pasid_array, IOMMU_NO_PASID); in iommu_replace_group_handle()
3597 mutex_unlock(&group->mutex); in iommu_replace_group_handle()