Lines Matching full:group
74 #define for_each_group_device(group, pos) \ argument
75 list_for_each_entry(pos, &(group)->devices, list)
79 ssize_t (*show)(struct iommu_group *group, char *buf);
80 ssize_t (*store)(struct iommu_group *group,
99 __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type);
103 struct iommu_group *group);
109 static int __iommu_device_set_domain(struct iommu_group *group,
113 static int __iommu_group_set_domain_internal(struct iommu_group *group,
116 static int __iommu_group_set_domain(struct iommu_group *group, in __iommu_group_set_domain() argument
119 return __iommu_group_set_domain_internal(group, new_domain, 0); in __iommu_group_set_domain()
121 static void __iommu_group_set_domain_nofail(struct iommu_group *group, in __iommu_group_set_domain_nofail() argument
125 group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); in __iommu_group_set_domain_nofail()
128 static int iommu_setup_default_domain(struct iommu_group *group,
132 static ssize_t iommu_group_store_type(struct iommu_group *group,
134 static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
136 static void __iommu_group_free_device(struct iommu_group *group,
406 struct iommu_group *group; in iommu_init_device() local
428 group = ops->device_group(dev); in iommu_init_device()
429 if (WARN_ON_ONCE(group == NULL)) in iommu_init_device()
430 group = ERR_PTR(-EINVAL); in iommu_init_device()
431 if (IS_ERR(group)) { in iommu_init_device()
432 ret = PTR_ERR(group); in iommu_init_device()
435 dev->iommu_group = group; in iommu_init_device()
457 struct iommu_group *group = dev->iommu_group; in iommu_deinit_device() local
460 lockdep_assert_held(&group->mutex); in iommu_deinit_device()
466 * If there are still other devices in the group they are not effected in iommu_deinit_device()
477 * If this is the last driver to use the group then we must free the in iommu_deinit_device()
480 if (list_empty(&group->devices)) { in iommu_deinit_device()
481 if (group->default_domain) { in iommu_deinit_device()
482 iommu_domain_free(group->default_domain); in iommu_deinit_device()
483 group->default_domain = NULL; in iommu_deinit_device()
485 if (group->blocking_domain) { in iommu_deinit_device()
486 iommu_domain_free(group->blocking_domain); in iommu_deinit_device()
487 group->blocking_domain = NULL; in iommu_deinit_device()
489 group->domain = NULL; in iommu_deinit_device()
504 struct iommu_group *group; in __iommu_probe_device() local
533 /* Device is probed already if in a group */ in __iommu_probe_device()
541 group = dev->iommu_group; in __iommu_probe_device()
542 gdev = iommu_group_alloc_device(group, dev); in __iommu_probe_device()
543 mutex_lock(&group->mutex); in __iommu_probe_device()
553 list_add_tail(&gdev->list, &group->devices); in __iommu_probe_device()
554 WARN_ON(group->default_domain && !group->domain); in __iommu_probe_device()
555 if (group->default_domain) in __iommu_probe_device()
556 iommu_create_device_direct_mappings(group->default_domain, dev); in __iommu_probe_device()
557 if (group->domain) { in __iommu_probe_device()
558 ret = __iommu_device_set_domain(group, dev, group->domain, 0); in __iommu_probe_device()
561 } else if (!group->default_domain && !group_list) { in __iommu_probe_device()
562 ret = iommu_setup_default_domain(group, 0); in __iommu_probe_device()
565 } else if (!group->default_domain) { in __iommu_probe_device()
571 if (list_empty(&group->entry)) in __iommu_probe_device()
572 list_add_tail(&group->entry, group_list); in __iommu_probe_device()
574 mutex_unlock(&group->mutex); in __iommu_probe_device()
583 __iommu_group_free_device(group, gdev); in __iommu_probe_device()
586 mutex_unlock(&group->mutex); in __iommu_probe_device()
587 iommu_group_put(group); in __iommu_probe_device()
610 static void __iommu_group_free_device(struct iommu_group *group, in __iommu_group_free_device() argument
615 sysfs_remove_link(group->devices_kobj, grp_dev->name); in __iommu_group_free_device()
618 trace_remove_device_from_group(group->id, dev); in __iommu_group_free_device()
621 * If the group has become empty then ownership must have been in __iommu_group_free_device()
625 if (list_empty(&group->devices)) in __iommu_group_free_device()
626 WARN_ON(group->owner_cnt || in __iommu_group_free_device()
627 group->domain != group->default_domain); in __iommu_group_free_device()
636 struct iommu_group *group = dev->iommu_group; in __iommu_group_remove_device() local
639 mutex_lock(&group->mutex); in __iommu_group_remove_device()
640 for_each_group_device(group, device) { in __iommu_group_remove_device()
645 __iommu_group_free_device(group, device); in __iommu_group_remove_device()
652 mutex_unlock(&group->mutex); in __iommu_group_remove_device()
658 iommu_group_put(group); in __iommu_group_remove_device()
663 struct iommu_group *group = dev->iommu_group; in iommu_release_device() local
665 if (group) in iommu_release_device()
712 struct iommu_group *group = to_iommu_group(kobj); in iommu_group_attr_show() local
716 ret = attr->show(group, buf); in iommu_group_attr_show()
725 struct iommu_group *group = to_iommu_group(kobj); in iommu_group_attr_store() local
729 ret = attr->store(group, buf, count); in iommu_group_attr_store()
738 static int iommu_group_create_file(struct iommu_group *group, in iommu_group_create_file() argument
741 return sysfs_create_file(&group->kobj, &attr->attr); in iommu_group_create_file()
744 static void iommu_group_remove_file(struct iommu_group *group, in iommu_group_remove_file() argument
747 sysfs_remove_file(&group->kobj, &attr->attr); in iommu_group_remove_file()
750 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) in iommu_group_show_name() argument
752 return sysfs_emit(buf, "%s\n", group->name); in iommu_group_show_name()
831 int iommu_get_group_resv_regions(struct iommu_group *group, in iommu_get_group_resv_regions() argument
837 mutex_lock(&group->mutex); in iommu_get_group_resv_regions()
838 for_each_group_device(group, device) { in iommu_get_group_resv_regions()
855 mutex_unlock(&group->mutex); in iommu_get_group_resv_regions()
860 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, in iommu_group_show_resv_regions() argument
868 iommu_get_group_resv_regions(group, &group_resv_regions); in iommu_group_show_resv_regions()
882 static ssize_t iommu_group_show_type(struct iommu_group *group, in iommu_group_show_type() argument
887 mutex_lock(&group->mutex); in iommu_group_show_type()
888 if (group->default_domain) { in iommu_group_show_type()
889 switch (group->default_domain->type) { in iommu_group_show_type()
907 mutex_unlock(&group->mutex); in iommu_group_show_type()
922 struct iommu_group *group = to_iommu_group(kobj); in iommu_group_release() local
924 pr_debug("Releasing group %d\n", group->id); in iommu_group_release()
926 if (group->iommu_data_release) in iommu_group_release()
927 group->iommu_data_release(group->iommu_data); in iommu_group_release()
929 ida_free(&iommu_group_ida, group->id); in iommu_group_release()
932 WARN_ON(group->default_domain); in iommu_group_release()
933 WARN_ON(group->blocking_domain); in iommu_group_release()
935 kfree(group->name); in iommu_group_release()
936 kfree(group); in iommu_group_release()
945 * iommu_group_alloc - Allocate a new group
948 * group. The iommu group represents the minimum granularity of the iommu.
950 * group in order to hold the group until devices are added. Use
952 * group to be automatically reclaimed once it has no devices or external
957 struct iommu_group *group; in iommu_group_alloc() local
960 group = kzalloc(sizeof(*group), GFP_KERNEL); in iommu_group_alloc()
961 if (!group) in iommu_group_alloc()
964 group->kobj.kset = iommu_group_kset; in iommu_group_alloc()
965 mutex_init(&group->mutex); in iommu_group_alloc()
966 INIT_LIST_HEAD(&group->devices); in iommu_group_alloc()
967 INIT_LIST_HEAD(&group->entry); in iommu_group_alloc()
968 xa_init(&group->pasid_array); in iommu_group_alloc()
972 kfree(group); in iommu_group_alloc()
975 group->id = ret; in iommu_group_alloc()
977 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, in iommu_group_alloc()
978 NULL, "%d", group->id); in iommu_group_alloc()
980 kobject_put(&group->kobj); in iommu_group_alloc()
984 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); in iommu_group_alloc()
985 if (!group->devices_kobj) { in iommu_group_alloc()
986 kobject_put(&group->kobj); /* triggers .release & free */ in iommu_group_alloc()
991 * The devices_kobj holds a reference on the group kobject, so in iommu_group_alloc()
992 * as long as that exists so will the group. We can therefore in iommu_group_alloc()
995 kobject_put(&group->kobj); in iommu_group_alloc()
997 ret = iommu_group_create_file(group, in iommu_group_alloc()
1000 kobject_put(group->devices_kobj); in iommu_group_alloc()
1004 ret = iommu_group_create_file(group, &iommu_group_attr_type); in iommu_group_alloc()
1006 kobject_put(group->devices_kobj); in iommu_group_alloc()
1010 pr_debug("Allocated group %d\n", group->id); in iommu_group_alloc()
1012 return group; in iommu_group_alloc()
1017 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
1018 * @group: the group
1020 * iommu drivers can store data in the group for use when doing iommu
1022 * should hold a group reference.
1024 void *iommu_group_get_iommudata(struct iommu_group *group) in iommu_group_get_iommudata() argument
1026 return group->iommu_data; in iommu_group_get_iommudata()
1031 * iommu_group_set_iommudata - set iommu_data for a group
1032 * @group: the group
1036 * iommu drivers can store data in the group for use when doing iommu
1038 * the group has been allocated. Caller should hold a group reference.
1040 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, in iommu_group_set_iommudata() argument
1043 group->iommu_data = iommu_data; in iommu_group_set_iommudata()
1044 group->iommu_data_release = release; in iommu_group_set_iommudata()
1049 * iommu_group_set_name - set name for a group
1050 * @group: the group
1053 * Allow iommu driver to set a name for a group. When set it will
1054 * appear in a name attribute file under the group in sysfs.
1056 int iommu_group_set_name(struct iommu_group *group, const char *name) in iommu_group_set_name() argument
1060 if (group->name) { in iommu_group_set_name()
1061 iommu_group_remove_file(group, &iommu_group_attr_name); in iommu_group_set_name()
1062 kfree(group->name); in iommu_group_set_name()
1063 group->name = NULL; in iommu_group_set_name()
1068 group->name = kstrdup(name, GFP_KERNEL); in iommu_group_set_name()
1069 if (!group->name) in iommu_group_set_name()
1072 ret = iommu_group_create_file(group, &iommu_group_attr_name); in iommu_group_set_name()
1074 kfree(group->name); in iommu_group_set_name()
1075 group->name = NULL; in iommu_group_set_name()
1150 static struct group_device *iommu_group_alloc_device(struct iommu_group *group, in iommu_group_alloc_device() argument
1162 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); in iommu_group_alloc_device()
1173 ret = sysfs_create_link_nowarn(group->devices_kobj, in iommu_group_alloc_device()
1189 trace_add_device_to_group(group->id, dev); in iommu_group_alloc_device()
1191 dev_info(dev, "Adding to iommu group %d\n", group->id); in iommu_group_alloc_device()
1201 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); in iommu_group_alloc_device()
1206 * iommu_group_add_device - add a device to an iommu group
1207 * @group: the group into which to add the device (reference should be held)
1211 * group. Adding a device increments the group reference count.
1213 int iommu_group_add_device(struct iommu_group *group, struct device *dev) in iommu_group_add_device() argument
1217 gdev = iommu_group_alloc_device(group, dev); in iommu_group_add_device()
1221 iommu_group_ref_get(group); in iommu_group_add_device()
1222 dev->iommu_group = group; in iommu_group_add_device()
1224 mutex_lock(&group->mutex); in iommu_group_add_device()
1225 list_add_tail(&gdev->list, &group->devices); in iommu_group_add_device()
1226 mutex_unlock(&group->mutex); in iommu_group_add_device()
1232 * iommu_group_remove_device - remove a device from it's current group
1236 * it's current group. This decrements the iommu group reference count.
1240 struct iommu_group *group = dev->iommu_group; in iommu_group_remove_device() local
1242 if (!group) in iommu_group_remove_device()
1245 dev_info(dev, "Removing from iommu group %d\n", group->id); in iommu_group_remove_device()
1251 static struct device *iommu_group_first_dev(struct iommu_group *group) in iommu_group_first_dev() argument
1253 lockdep_assert_held(&group->mutex); in iommu_group_first_dev()
1254 return list_first_entry(&group->devices, struct group_device, list)->dev; in iommu_group_first_dev()
1258 * iommu_group_for_each_dev - iterate over each device in the group
1259 * @group: the group
1263 * This function is called by group users to iterate over group devices.
1264 * Callers should hold a reference count to the group during callback.
1265 * The group->mutex is held across callbacks, which will block calls to
1268 int iommu_group_for_each_dev(struct iommu_group *group, void *data, in iommu_group_for_each_dev() argument
1274 mutex_lock(&group->mutex); in iommu_group_for_each_dev()
1275 for_each_group_device(group, device) { in iommu_group_for_each_dev()
1280 mutex_unlock(&group->mutex); in iommu_group_for_each_dev()
1287 * iommu_group_get - Return the group for a device and increment reference
1288 * @dev: get the group that this device belongs to
1290 * This function is called by iommu drivers and users to get the group
1291 * for the specified device. If found, the group is returned and the group
1296 struct iommu_group *group = dev->iommu_group; in iommu_group_get() local
1298 if (group) in iommu_group_get()
1299 kobject_get(group->devices_kobj); in iommu_group_get()
1301 return group; in iommu_group_get()
1306 * iommu_group_ref_get - Increment reference on a group
1307 * @group: the group to use, must not be NULL
1310 * existing group. Returns the given group for convenience.
1312 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) in iommu_group_ref_get() argument
1314 kobject_get(group->devices_kobj); in iommu_group_ref_get()
1315 return group; in iommu_group_ref_get()
1320 * iommu_group_put - Decrement group reference
1321 * @group: the group to use
1324 * iommu group. Once the reference count is zero, the group is released.
1326 void iommu_group_put(struct iommu_group *group) in iommu_group_put() argument
1328 if (group) in iommu_group_put()
1329 kobject_put(group->devices_kobj); in iommu_group_put()
1518 * matched using the group ID, the PASID valid bit and the PASID in iommu_page_response()
1519 * value. Otherwise only the group ID matches request and in iommu_page_response()
1545 * iommu_group_id - Return ID for a group
1546 * @group: the group to ID
1548 * Return the unique ID for the group matching the sysfs group number.
1550 int iommu_group_id(struct iommu_group *group) in iommu_group_id() argument
1552 return group->id; in iommu_group_id()
1573 * that may already have a group.
1579 struct iommu_group *group; in get_pci_function_alias_group() local
1590 group = get_pci_alias_group(tmp, devfns); in get_pci_function_alias_group()
1591 if (group) { in get_pci_function_alias_group()
1593 return group; in get_pci_function_alias_group()
1613 struct iommu_group *group; in get_pci_alias_group() local
1618 group = iommu_group_get(&pdev->dev); in get_pci_alias_group()
1619 if (group) in get_pci_alias_group()
1620 return group; in get_pci_alias_group()
1628 group = get_pci_alias_group(tmp, devfns); in get_pci_alias_group()
1629 if (group) { in get_pci_alias_group()
1631 return group; in get_pci_alias_group()
1634 group = get_pci_function_alias_group(tmp, devfns); in get_pci_alias_group()
1635 if (group) { in get_pci_alias_group()
1637 return group; in get_pci_alias_group()
1647 struct iommu_group *group; member
1652 * the IOMMU group if we find one along the way.
1659 data->group = iommu_group_get(&pdev->dev); in get_pci_alias_or_group()
1661 return data->group != NULL; in get_pci_alias_or_group()
1666 * iommu-group per device.
1676 * iommu-group per iommu driver instance shared by every device
1684 struct iommu_group *group; in generic_single_device_group() local
1686 group = iommu_group_alloc(); in generic_single_device_group()
1687 if (IS_ERR(group)) in generic_single_device_group()
1688 return group; in generic_single_device_group()
1689 iommu->singleton_group = group; in generic_single_device_group()
1697 * to find or create an IOMMU group for a device.
1704 struct iommu_group *group = NULL; in pci_device_group() local
1712 * be aliased due to topology in order to have its own IOMMU group. in pci_device_group()
1714 * group, use it. in pci_device_group()
1717 return data.group; in pci_device_group()
1725 * group, use it. in pci_device_group()
1736 group = iommu_group_get(&pdev->dev); in pci_device_group()
1737 if (group) in pci_device_group()
1738 return group; in pci_device_group()
1743 * device or another device aliases us, use the same group. in pci_device_group()
1745 group = get_pci_alias_group(pdev, (unsigned long *)devfns); in pci_device_group()
1746 if (group) in pci_device_group()
1747 return group; in pci_device_group()
1754 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); in pci_device_group()
1755 if (group) in pci_device_group()
1756 return group; in pci_device_group()
1758 /* No shared group found, allocate new */ in pci_device_group()
1763 /* Get the IOMMU group for device on fsl-mc bus */
1767 struct iommu_group *group; in fsl_mc_device_group() local
1769 group = iommu_group_get(cont_dev); in fsl_mc_device_group()
1770 if (!group) in fsl_mc_device_group()
1771 group = iommu_group_alloc(); in fsl_mc_device_group()
1772 return group; in fsl_mc_device_group()
1777 __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) in __iommu_group_alloc_default_domain() argument
1779 if (group->default_domain && group->default_domain->type == req_type) in __iommu_group_alloc_default_domain()
1780 return group->default_domain; in __iommu_group_alloc_default_domain()
1781 return __iommu_group_domain_alloc(group, req_type); in __iommu_group_alloc_default_domain()
1789 iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) in iommu_group_alloc_default_domain() argument
1791 const struct iommu_ops *ops = dev_iommu_ops(iommu_group_first_dev(group)); in iommu_group_alloc_default_domain()
1794 lockdep_assert_held(&group->mutex); in iommu_group_alloc_default_domain()
1808 return __iommu_group_alloc_default_domain(group, req_type); in iommu_group_alloc_default_domain()
1811 dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type); in iommu_group_alloc_default_domain()
1818 dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA); in iommu_group_alloc_default_domain()
1822 …pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_D… in iommu_group_alloc_default_domain()
1823 iommu_def_domain_type, group->name); in iommu_group_alloc_default_domain()
1827 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) in iommu_group_default_domain() argument
1829 return group->default_domain; in iommu_group_default_domain()
1866 * group. Drivers must give a consistent result.
1868 static int iommu_get_def_domain_type(struct iommu_group *group, in iommu_get_def_domain_type() argument
1893 …"IOMMU driver error, requesting conflicting def_domain_type, %s and %s, for devices in group %u.\n… in iommu_get_def_domain_type()
1895 group->id); in iommu_get_def_domain_type()
1910 static int iommu_get_default_domain_type(struct iommu_group *group, in iommu_get_default_domain_type() argument
1917 lockdep_assert_held(&group->mutex); in iommu_get_default_domain_type()
1931 for_each_group_device(group, gdev) { in iommu_get_default_domain_type()
1932 driver_type = iommu_get_def_domain_type(group, gdev->dev, in iommu_get_default_domain_type()
1962 "Device is not trusted, but driver is overriding group %u to %s, refusing to probe.\n", in iommu_get_default_domain_type()
1963 group->id, iommu_domain_type_str(driver_type)); in iommu_get_default_domain_type()
1987 struct iommu_group *group, *next; in bus_iommu_probe() local
1995 list_for_each_entry_safe(group, next, &group_list, entry) { in bus_iommu_probe()
1998 mutex_lock(&group->mutex); in bus_iommu_probe()
2001 list_del_init(&group->entry); in bus_iommu_probe()
2005 * that the cross-group default domain type and the setup of the in bus_iommu_probe()
2008 ret = iommu_setup_default_domain(group, 0); in bus_iommu_probe()
2010 mutex_unlock(&group->mutex); in bus_iommu_probe()
2013 mutex_unlock(&group->mutex); in bus_iommu_probe()
2019 * to take group->mutex, resulting in a deadlock. in bus_iommu_probe()
2021 for_each_group_device(group, gdev) in bus_iommu_probe()
2078 * for a group
2079 * @group: Group to query
2082 * msi_device_has_isolated_msi() for devices in a group. However nothing
2086 bool iommu_group_has_isolated_msi(struct iommu_group *group) in iommu_group_has_isolated_msi() argument
2091 mutex_lock(&group->mutex); in iommu_group_has_isolated_msi()
2092 for_each_group_device(group, group_dev) in iommu_group_has_isolated_msi()
2094 mutex_unlock(&group->mutex); in iommu_group_has_isolated_msi()
2175 __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type) in __iommu_group_domain_alloc() argument
2177 struct device *dev = iommu_group_first_dev(group); in __iommu_group_domain_alloc()
2225 * Put the group's domain back to the appropriate core-owned domain - either the
2228 static void __iommu_group_set_core_domain(struct iommu_group *group) in __iommu_group_set_core_domain() argument
2232 if (group->owner) in __iommu_group_set_core_domain()
2233 new_domain = group->blocking_domain; in __iommu_group_set_core_domain()
2235 new_domain = group->default_domain; in __iommu_group_set_core_domain()
2237 __iommu_group_set_domain_nofail(group, new_domain); in __iommu_group_set_core_domain()
2271 struct iommu_group *group = dev->iommu_group; in iommu_attach_device() local
2274 if (!group) in iommu_attach_device()
2278 * Lock the group to make sure the device-count doesn't in iommu_attach_device()
2281 mutex_lock(&group->mutex); in iommu_attach_device()
2283 if (list_count_nodes(&group->devices) != 1) in iommu_attach_device()
2286 ret = __iommu_attach_group(domain, group); in iommu_attach_device()
2289 mutex_unlock(&group->mutex); in iommu_attach_device()
2305 struct iommu_group *group = dev->iommu_group; in iommu_detach_device() local
2307 if (!group) in iommu_detach_device()
2310 mutex_lock(&group->mutex); in iommu_detach_device()
2311 if (WARN_ON(domain != group->domain) || in iommu_detach_device()
2312 WARN_ON(list_count_nodes(&group->devices) != 1)) in iommu_detach_device()
2314 __iommu_group_set_core_domain(group); in iommu_detach_device()
2317 mutex_unlock(&group->mutex); in iommu_detach_device()
2324 struct iommu_group *group = dev->iommu_group; in iommu_get_domain_for_dev() local
2326 if (!group) in iommu_get_domain_for_dev()
2329 return group->domain; in iommu_get_domain_for_dev()
2335 * guarantees that the group and its default domain are valid and correct.
2343 struct iommu_group *group) in __iommu_attach_group() argument
2347 if (group->domain && group->domain != group->default_domain && in __iommu_attach_group()
2348 group->domain != group->blocking_domain) in __iommu_attach_group()
2351 dev = iommu_group_first_dev(group); in __iommu_attach_group()
2355 return __iommu_group_set_domain(group, domain); in __iommu_attach_group()
2359 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group
2361 * @group: IOMMU group that will be attached
2367 * the group. In this case attaching a different domain to the
2368 * group may succeed.
2370 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_attach_group() argument
2374 mutex_lock(&group->mutex); in iommu_attach_group()
2375 ret = __iommu_attach_group(domain, group); in iommu_attach_group()
2376 mutex_unlock(&group->mutex); in iommu_attach_group()
2383 * iommu_group_replace_domain - replace the domain that a group is attached to
2385 * @group: IOMMU group that will be attached to the new domain
2387 * This API allows the group to switch domains without being forced to go to
2393 int iommu_group_replace_domain(struct iommu_group *group, in iommu_group_replace_domain() argument
2401 mutex_lock(&group->mutex); in iommu_group_replace_domain()
2402 ret = __iommu_group_set_domain(group, new_domain); in iommu_group_replace_domain()
2403 mutex_unlock(&group->mutex); in iommu_group_replace_domain()
2408 static int __iommu_device_set_domain(struct iommu_group *group, in __iommu_device_set_domain() argument
2424 new_domain == group->blocking_domain)) { in __iommu_device_set_domain()
2431 if (new_domain == group->default_domain) in __iommu_device_set_domain()
2444 group->blocking_domain && in __iommu_device_set_domain()
2445 group->blocking_domain != new_domain) in __iommu_device_set_domain()
2446 __iommu_attach_device(group->blocking_domain, dev); in __iommu_device_set_domain()
2453 * If 0 is returned the group's domain is new_domain. If an error is returned
2454 * then the group's domain will be set back to the existing domain unless
2455 * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's
2461 * devices in a group. Ideally we'd have a single device which represents the
2462 * requestor ID of the group, but we also allow IOMMU drivers to create policy
2464 * members, but we wish to group them at a higher level (ex. untrusted
2467 static int __iommu_group_set_domain_internal(struct iommu_group *group, in __iommu_group_set_domain_internal() argument
2476 lockdep_assert_held(&group->mutex); in __iommu_group_set_domain_internal()
2478 if (group->domain == new_domain) in __iommu_group_set_domain_internal()
2488 * either new_domain or group->domain, never something else. in __iommu_group_set_domain_internal()
2491 for_each_group_device(group, gdev) { in __iommu_group_set_domain_internal()
2492 ret = __iommu_device_set_domain(group, gdev->dev, new_domain, in __iommu_group_set_domain_internal()
2497 * Keep trying the other devices in the group. If a in __iommu_group_set_domain_internal()
2508 group->domain = new_domain; in __iommu_group_set_domain_internal()
2517 for_each_group_device(group, gdev) { in __iommu_group_set_domain_internal()
2520 * we leave group->domain as NULL and let release clean in __iommu_group_set_domain_internal()
2523 if (group->domain) in __iommu_group_set_domain_internal()
2525 group, gdev->dev, group->domain, in __iommu_group_set_domain_internal()
2533 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_detach_group() argument
2535 mutex_lock(&group->mutex); in iommu_detach_group()
2536 __iommu_group_set_core_domain(group); in iommu_detach_group()
2537 mutex_unlock(&group->mutex); in iommu_detach_group()
3099 * iommu_setup_default_domain - Set the default_domain for the group
3100 * @group: Group to change
3103 * Allocate a default domain and set it as the current domain on the group. If
3104 * the group already has a default domain it will be changed to the target_type.
3108 static int iommu_setup_default_domain(struct iommu_group *group, in iommu_setup_default_domain() argument
3111 struct iommu_domain *old_dom = group->default_domain; in iommu_setup_default_domain()
3118 lockdep_assert_held(&group->mutex); in iommu_setup_default_domain()
3120 req_type = iommu_get_default_domain_type(group, target_type); in iommu_setup_default_domain()
3124 dom = iommu_group_alloc_default_domain(group, req_type); in iommu_setup_default_domain()
3128 if (group->default_domain == dom) in iommu_setup_default_domain()
3137 for_each_group_device(group, gdev) { in iommu_setup_default_domain()
3147 group->default_domain = dom; in iommu_setup_default_domain()
3148 if (!group->domain) { in iommu_setup_default_domain()
3153 * in group->default_domain so it is freed after. in iommu_setup_default_domain()
3156 group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); in iommu_setup_default_domain()
3160 ret = __iommu_group_set_domain(group, dom); in iommu_setup_default_domain()
3172 for_each_group_device(group, gdev) { in iommu_setup_default_domain()
3187 group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); in iommu_setup_default_domain()
3191 group->default_domain = old_dom; in iommu_setup_default_domain()
3198 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ
3202 * group->mutex is used here to guarantee that the device release path
3205 static ssize_t iommu_group_store_type(struct iommu_group *group, in iommu_group_store_type() argument
3214 if (WARN_ON(!group) || !group->default_domain) in iommu_group_store_type()
3228 mutex_lock(&group->mutex); in iommu_group_store_type()
3231 group->default_domain->type == IOMMU_DOMAIN_DMA) { in iommu_group_store_type()
3232 ret = iommu_dma_init_fq(group->default_domain); in iommu_group_store_type()
3236 group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; in iommu_group_store_type()
3242 if (list_empty(&group->devices) || group->owner_cnt) { in iommu_group_store_type()
3247 ret = iommu_setup_default_domain(group, req_type); in iommu_group_store_type()
3255 * group->mutex, resulting in a deadlock. in iommu_group_store_type()
3257 mutex_unlock(&group->mutex); in iommu_group_store_type()
3260 for_each_group_device(group, gdev) in iommu_group_store_type()
3265 mutex_unlock(&group->mutex); in iommu_group_store_type()
3280 struct iommu_group *group = dev->iommu_group; in iommu_device_use_default_domain() local
3283 if (!group) in iommu_device_use_default_domain()
3286 mutex_lock(&group->mutex); in iommu_device_use_default_domain()
3287 if (group->owner_cnt) { in iommu_device_use_default_domain()
3288 if (group->domain != group->default_domain || group->owner || in iommu_device_use_default_domain()
3289 !xa_empty(&group->pasid_array)) { in iommu_device_use_default_domain()
3295 group->owner_cnt++; in iommu_device_use_default_domain()
3298 mutex_unlock(&group->mutex); in iommu_device_use_default_domain()
3313 struct iommu_group *group = dev->iommu_group; in iommu_device_unuse_default_domain() local
3315 if (!group) in iommu_device_unuse_default_domain()
3318 mutex_lock(&group->mutex); in iommu_device_unuse_default_domain()
3319 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) in iommu_device_unuse_default_domain()
3320 group->owner_cnt--; in iommu_device_unuse_default_domain()
3322 mutex_unlock(&group->mutex); in iommu_device_unuse_default_domain()
3325 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) in __iommu_group_alloc_blocking_domain() argument
3329 if (group->blocking_domain) in __iommu_group_alloc_blocking_domain()
3332 domain = __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED); in __iommu_group_alloc_blocking_domain()
3338 domain = __iommu_group_domain_alloc(group, in __iommu_group_alloc_blocking_domain()
3343 group->blocking_domain = domain; in __iommu_group_alloc_blocking_domain()
3347 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) in __iommu_take_dma_ownership() argument
3351 if ((group->domain && group->domain != group->default_domain) || in __iommu_take_dma_ownership()
3352 !xa_empty(&group->pasid_array)) in __iommu_take_dma_ownership()
3355 ret = __iommu_group_alloc_blocking_domain(group); in __iommu_take_dma_ownership()
3358 ret = __iommu_group_set_domain(group, group->blocking_domain); in __iommu_take_dma_ownership()
3362 group->owner = owner; in __iommu_take_dma_ownership()
3363 group->owner_cnt++; in __iommu_take_dma_ownership()
3368 * iommu_group_claim_dma_owner() - Set DMA ownership of a group
3369 * @group: The group.
3374 * prohibited. Only a single owner may exist for a group.
3376 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) in iommu_group_claim_dma_owner() argument
3383 mutex_lock(&group->mutex); in iommu_group_claim_dma_owner()
3384 if (group->owner_cnt) { in iommu_group_claim_dma_owner()
3389 ret = __iommu_take_dma_ownership(group, owner); in iommu_group_claim_dma_owner()
3391 mutex_unlock(&group->mutex); in iommu_group_claim_dma_owner()
3402 * Claim the DMA ownership of a device. Multiple devices in the same group may
3409 struct iommu_group *group = dev->iommu_group; in iommu_device_claim_dma_owner() local
3415 if (!group) in iommu_device_claim_dma_owner()
3418 mutex_lock(&group->mutex); in iommu_device_claim_dma_owner()
3419 if (group->owner_cnt) { in iommu_device_claim_dma_owner()
3420 if (group->owner != owner) { in iommu_device_claim_dma_owner()
3424 group->owner_cnt++; in iommu_device_claim_dma_owner()
3428 ret = __iommu_take_dma_ownership(group, owner); in iommu_device_claim_dma_owner()
3430 mutex_unlock(&group->mutex); in iommu_device_claim_dma_owner()
3435 static void __iommu_release_dma_ownership(struct iommu_group *group) in __iommu_release_dma_ownership() argument
3437 if (WARN_ON(!group->owner_cnt || !group->owner || in __iommu_release_dma_ownership()
3438 !xa_empty(&group->pasid_array))) in __iommu_release_dma_ownership()
3441 group->owner_cnt = 0; in __iommu_release_dma_ownership()
3442 group->owner = NULL; in __iommu_release_dma_ownership()
3443 __iommu_group_set_domain_nofail(group, group->default_domain); in __iommu_release_dma_ownership()
3447 * iommu_group_release_dma_owner() - Release DMA ownership of a group
3448 * @group: The group
3452 void iommu_group_release_dma_owner(struct iommu_group *group) in iommu_group_release_dma_owner() argument
3454 mutex_lock(&group->mutex); in iommu_group_release_dma_owner()
3455 __iommu_release_dma_ownership(group); in iommu_group_release_dma_owner()
3456 mutex_unlock(&group->mutex); in iommu_group_release_dma_owner()
3469 struct iommu_group *group = dev->iommu_group; in iommu_device_release_dma_owner() local
3471 mutex_lock(&group->mutex); in iommu_device_release_dma_owner()
3472 if (group->owner_cnt > 1) in iommu_device_release_dma_owner()
3473 group->owner_cnt--; in iommu_device_release_dma_owner()
3475 __iommu_release_dma_ownership(group); in iommu_device_release_dma_owner()
3476 mutex_unlock(&group->mutex); in iommu_device_release_dma_owner()
3481 * iommu_group_dma_owner_claimed() - Query group dma ownership status
3482 * @group: The group.
3484 * This provides status query on a given group. It is racy and only for
3487 bool iommu_group_dma_owner_claimed(struct iommu_group *group) in iommu_group_dma_owner_claimed() argument
3491 mutex_lock(&group->mutex); in iommu_group_dma_owner_claimed()
3492 user = group->owner_cnt; in iommu_group_dma_owner_claimed()
3493 mutex_unlock(&group->mutex); in iommu_group_dma_owner_claimed()
3500 struct iommu_group *group, ioasid_t pasid) in __iommu_set_group_pasid() argument
3505 for_each_group_device(group, device) { in __iommu_set_group_pasid()
3514 static void __iommu_remove_group_pasid(struct iommu_group *group, in __iommu_remove_group_pasid() argument
3520 for_each_group_device(group, device) { in __iommu_remove_group_pasid()
3538 struct iommu_group *group = dev->iommu_group; in iommu_attach_device_pasid() local
3545 if (!group) in iommu_attach_device_pasid()
3551 mutex_lock(&group->mutex); in iommu_attach_device_pasid()
3552 curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL); in iommu_attach_device_pasid()
3558 ret = __iommu_set_group_pasid(domain, group, pasid); in iommu_attach_device_pasid()
3560 __iommu_remove_group_pasid(group, pasid); in iommu_attach_device_pasid()
3561 xa_erase(&group->pasid_array, pasid); in iommu_attach_device_pasid()
3564 mutex_unlock(&group->mutex); in iommu_attach_device_pasid()
3582 struct iommu_group *group = dev->iommu_group; in iommu_detach_device_pasid() local
3584 mutex_lock(&group->mutex); in iommu_detach_device_pasid()
3585 __iommu_remove_group_pasid(group, pasid); in iommu_detach_device_pasid()
3586 WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); in iommu_detach_device_pasid()
3587 mutex_unlock(&group->mutex); in iommu_detach_device_pasid()
3610 struct iommu_group *group = dev->iommu_group; in iommu_get_domain_for_dev_pasid() local
3613 if (!group) in iommu_get_domain_for_dev_pasid()
3616 xa_lock(&group->pasid_array); in iommu_get_domain_for_dev_pasid()
3617 domain = xa_load(&group->pasid_array, pasid); in iommu_get_domain_for_dev_pasid()
3620 xa_unlock(&group->pasid_array); in iommu_get_domain_for_dev_pasid()