Lines Matching full:group

97 	struct vfio_group		*group;  member
115 * removes the device from the dummy group and cannot be nested.
119 struct iommu_group *group; in vfio_iommu_group_get() local
122 group = iommu_group_get(dev); in vfio_iommu_group_get()
126 * With noiommu enabled, an IOMMU group will be created for a device in vfio_iommu_group_get()
131 if (group || !noiommu || iommu_present(dev->bus)) in vfio_iommu_group_get()
132 return group; in vfio_iommu_group_get()
134 group = iommu_group_alloc(); in vfio_iommu_group_get()
135 if (IS_ERR(group)) in vfio_iommu_group_get()
138 iommu_group_set_name(group, "vfio-noiommu"); in vfio_iommu_group_get()
139 iommu_group_set_iommudata(group, &noiommu, NULL); in vfio_iommu_group_get()
140 ret = iommu_group_add_device(group, dev); in vfio_iommu_group_get()
142 iommu_group_put(group); in vfio_iommu_group_get()
147 * Where to taint? At this point we've added an IOMMU group for a in vfio_iommu_group_get()
155 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n"); in vfio_iommu_group_get()
158 return group; in vfio_iommu_group_get()
162 void vfio_iommu_group_put(struct iommu_group *group, struct device *dev) in vfio_iommu_group_put() argument
165 if (iommu_group_get_iommudata(group) == &noiommu) in vfio_iommu_group_put()
169 iommu_group_put(group); in vfio_iommu_group_put()
270 * Group minor allocation/free - both called with vfio.group_lock held
272 static int vfio_alloc_group_minor(struct vfio_group *group) in vfio_alloc_group_minor() argument
274 return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL); in vfio_alloc_group_minor()
284 static void vfio_group_get(struct vfio_group *group);
289 * it's freed via kref. Must support container/group/device being
310 static void vfio_group_unlock_and_free(struct vfio_group *group) in vfio_group_unlock_and_free() argument
315 * that the group is no longer in vfio.group_list. in vfio_group_unlock_and_free()
317 iommu_group_unregister_notifier(group->iommu_group, &group->nb); in vfio_group_unlock_and_free()
318 kfree(group); in vfio_group_unlock_and_free()
322 * Group objects - create, release, get, put, search
326 struct vfio_group *group, *tmp; in vfio_create_group() local
330 group = kzalloc(sizeof(*group), GFP_KERNEL); in vfio_create_group()
331 if (!group) in vfio_create_group()
334 kref_init(&group->kref); in vfio_create_group()
335 INIT_LIST_HEAD(&group->device_list); in vfio_create_group()
336 mutex_init(&group->device_lock); in vfio_create_group()
337 INIT_LIST_HEAD(&group->unbound_list); in vfio_create_group()
338 mutex_init(&group->unbound_lock); in vfio_create_group()
339 atomic_set(&group->container_users, 0); in vfio_create_group()
340 atomic_set(&group->opened, 0); in vfio_create_group()
341 init_waitqueue_head(&group->container_q); in vfio_create_group()
342 group->iommu_group = iommu_group; in vfio_create_group()
344 group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu); in vfio_create_group()
346 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); in vfio_create_group()
348 group->nb.notifier_call = vfio_iommu_group_notifier; in vfio_create_group()
354 * do anything unless it can find the group in vfio.group_list, so in vfio_create_group()
357 ret = iommu_group_register_notifier(iommu_group, &group->nb); in vfio_create_group()
359 kfree(group); in vfio_create_group()
365 /* Did we race creating this group? */ in vfio_create_group()
369 vfio_group_unlock_and_free(group); in vfio_create_group()
374 minor = vfio_alloc_group_minor(group); in vfio_create_group()
376 vfio_group_unlock_and_free(group); in vfio_create_group()
382 group, "%s%d", group->noiommu ? "noiommu-" : "", in vfio_create_group()
386 vfio_group_unlock_and_free(group); in vfio_create_group()
390 group->minor = minor; in vfio_create_group()
391 group->dev = dev; in vfio_create_group()
393 list_add(&group->vfio_next, &vfio.group_list); in vfio_create_group()
397 return group; in vfio_create_group()
403 struct vfio_group *group = container_of(kref, struct vfio_group, kref); in vfio_group_release() local
405 struct iommu_group *iommu_group = group->iommu_group; in vfio_group_release()
407 WARN_ON(!list_empty(&group->device_list)); in vfio_group_release()
408 WARN_ON(group->notifier.head); in vfio_group_release()
411 &group->unbound_list, unbound_next) { in vfio_group_release()
416 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor)); in vfio_group_release()
417 list_del(&group->vfio_next); in vfio_group_release()
418 vfio_free_group_minor(group->minor); in vfio_group_release()
419 vfio_group_unlock_and_free(group); in vfio_group_release()
423 static void vfio_group_put(struct vfio_group *group) in vfio_group_put() argument
425 kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); in vfio_group_put()
430 struct vfio_group *group; member
439 vfio_group_put(do_work->group); in vfio_group_put_bg()
443 static void vfio_group_schedule_put(struct vfio_group *group) in vfio_group_schedule_put() argument
452 do_work->group = group; in vfio_group_schedule_put()
456 /* Assume group_lock or group reference is held */
457 static void vfio_group_get(struct vfio_group *group) in vfio_group_get() argument
459 kref_get(&group->kref); in vfio_group_get()
464 * sure the group pointer is valid under lock and get a reference.
466 static struct vfio_group *vfio_group_try_get(struct vfio_group *group) in vfio_group_try_get() argument
468 struct vfio_group *target = group; in vfio_group_try_get()
471 list_for_each_entry(group, &vfio.group_list, vfio_next) { in vfio_group_try_get()
472 if (group == target) { in vfio_group_try_get()
473 vfio_group_get(group); in vfio_group_try_get()
475 return group; in vfio_group_try_get()
486 struct vfio_group *group; in vfio_group_get_from_iommu() local
489 list_for_each_entry(group, &vfio.group_list, vfio_next) { in vfio_group_get_from_iommu()
490 if (group->iommu_group == iommu_group) { in vfio_group_get_from_iommu()
491 vfio_group_get(group); in vfio_group_get_from_iommu()
493 return group; in vfio_group_get_from_iommu()
503 struct vfio_group *group; in vfio_group_get_from_minor() local
506 group = idr_find(&vfio.group_idr, minor); in vfio_group_get_from_minor()
507 if (!group) { in vfio_group_get_from_minor()
511 vfio_group_get(group); in vfio_group_get_from_minor()
514 return group; in vfio_group_get_from_minor()
520 struct vfio_group *group; in vfio_group_get_from_dev() local
526 group = vfio_group_get_from_iommu(iommu_group); in vfio_group_get_from_dev()
529 return group; in vfio_group_get_from_dev()
536 struct vfio_device *vfio_group_create_device(struct vfio_group *group, in vfio_group_create_device() argument
549 device->group = group; in vfio_group_create_device()
554 /* No need to get group_lock, caller has group reference */ in vfio_group_create_device()
555 vfio_group_get(group); in vfio_group_create_device()
557 mutex_lock(&group->device_lock); in vfio_group_create_device()
558 list_add(&device->group_next, &group->device_list); in vfio_group_create_device()
559 group->dev_counter++; in vfio_group_create_device()
560 mutex_unlock(&group->device_lock); in vfio_group_create_device()
569 struct vfio_group *group = device->group; in vfio_device_release() local
572 group->dev_counter--; in vfio_device_release()
573 mutex_unlock(&group->device_lock); in vfio_device_release()
583 /* Device reference always implies a group reference */
586 struct vfio_group *group = device->group; in vfio_device_put() local
587 kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock); in vfio_device_put()
588 vfio_group_put(group); in vfio_device_put()
594 vfio_group_get(device->group); in vfio_device_get()
598 static struct vfio_device *vfio_group_get_device(struct vfio_group *group, in vfio_group_get_device() argument
603 mutex_lock(&group->device_lock); in vfio_group_get_device()
604 list_for_each_entry(device, &group->device_list, group_next) { in vfio_group_get_device()
607 mutex_unlock(&group->device_lock); in vfio_group_get_device()
611 mutex_unlock(&group->device_lock); in vfio_group_get_device()
618 * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping
624 * then all of the downstream devices will be part of the same IOMMU group as
648 * A vfio group is viable for use by userspace if all devices are in
657 * group. The second is to test if the device exists on the group
663 struct vfio_group *group = data; in vfio_dev_viable() local
669 mutex_lock(&group->unbound_lock); in vfio_dev_viable()
670 list_for_each_entry(unbound, &group->unbound_list, unbound_next) { in vfio_dev_viable()
676 mutex_unlock(&group->unbound_lock); in vfio_dev_viable()
681 device = vfio_group_get_device(group, dev); in vfio_dev_viable()
693 static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev) in vfio_group_nb_add_dev() argument
698 device = vfio_group_get_device(group, dev); in vfio_group_nb_add_dev()
705 if (!atomic_read(&group->container_users)) in vfio_group_nb_add_dev()
709 dev_WARN(dev, "Device added to live group %d!\n", in vfio_group_nb_add_dev()
710 iommu_group_id(group->iommu_group)); in vfio_group_nb_add_dev()
715 static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev) in vfio_group_nb_verify() argument
717 /* We don't care what happens when the group isn't in use */ in vfio_group_nb_verify()
718 if (!atomic_read(&group->container_users)) in vfio_group_nb_verify()
721 return vfio_dev_viable(dev, group); in vfio_group_nb_verify()
727 struct vfio_group *group = container_of(nb, struct vfio_group, nb); in vfio_iommu_group_notifier() local
733 * risk racing a group being removed. Ignore spurious notifies. in vfio_iommu_group_notifier()
735 group = vfio_group_try_get(group); in vfio_iommu_group_notifier()
736 if (!group) in vfio_iommu_group_notifier()
741 vfio_group_nb_add_dev(group, dev); in vfio_iommu_group_notifier()
753 dev_dbg(dev, "%s: group %d binding to driver\n", __func__, in vfio_iommu_group_notifier()
754 iommu_group_id(group->iommu_group)); in vfio_iommu_group_notifier()
757 dev_dbg(dev, "%s: group %d bound to driver %s\n", __func__, in vfio_iommu_group_notifier()
758 iommu_group_id(group->iommu_group), dev->driver->name); in vfio_iommu_group_notifier()
759 BUG_ON(vfio_group_nb_verify(group, dev)); in vfio_iommu_group_notifier()
762 dev_dbg(dev, "%s: group %d unbinding from driver %s\n", in vfio_iommu_group_notifier()
763 __func__, iommu_group_id(group->iommu_group), in vfio_iommu_group_notifier()
767 dev_dbg(dev, "%s: group %d unbound from driver\n", __func__, in vfio_iommu_group_notifier()
768 iommu_group_id(group->iommu_group)); in vfio_iommu_group_notifier()
770 * XXX An unbound device in a live group is ok, but we'd in vfio_iommu_group_notifier()
777 mutex_lock(&group->unbound_lock); in vfio_iommu_group_notifier()
779 &group->unbound_list, unbound_next) { in vfio_iommu_group_notifier()
786 mutex_unlock(&group->unbound_lock); in vfio_iommu_group_notifier()
791 * If we're the last reference to the group, the group will be in vfio_iommu_group_notifier()
792 * released, which includes unregistering the iommu group notifier. in vfio_iommu_group_notifier()
797 vfio_group_schedule_put(group); in vfio_iommu_group_notifier()
808 struct vfio_group *group; in vfio_add_group_dev() local
815 group = vfio_group_get_from_iommu(iommu_group); in vfio_add_group_dev()
816 if (!group) { in vfio_add_group_dev()
817 group = vfio_create_group(iommu_group); in vfio_add_group_dev()
818 if (IS_ERR(group)) { in vfio_add_group_dev()
820 return PTR_ERR(group); in vfio_add_group_dev()
830 device = vfio_group_get_device(group, dev); in vfio_add_group_dev()
832 dev_WARN(dev, "Device already exists on group %d\n", in vfio_add_group_dev()
835 vfio_group_put(group); in vfio_add_group_dev()
839 device = vfio_group_create_device(group, dev, ops, device_data); in vfio_add_group_dev()
841 vfio_group_put(group); in vfio_add_group_dev()
850 vfio_group_put(group); in vfio_add_group_dev()
865 struct vfio_group *group; in vfio_device_get_from_dev() local
868 group = vfio_group_get_from_dev(dev); in vfio_device_get_from_dev()
869 if (!group) in vfio_device_get_from_dev()
872 device = vfio_group_get_device(group, dev); in vfio_device_get_from_dev()
873 vfio_group_put(group); in vfio_device_get_from_dev()
879 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, in vfio_device_get_from_name() argument
884 mutex_lock(&group->device_lock); in vfio_device_get_from_name()
885 list_for_each_entry(it, &group->device_list, group_next) { in vfio_device_get_from_name()
904 mutex_unlock(&group->device_lock); in vfio_device_get_from_name()
925 struct vfio_group *group = device->group; in vfio_del_group_dev() local
932 * The group exists so long as we have a device reference. Get in vfio_del_group_dev()
933 * a group reference and use it to scan for the device going away. in vfio_del_group_dev()
935 vfio_group_get(group); in vfio_del_group_dev()
938 * When the device is removed from the group, the group suddenly in vfio_del_group_dev()
940 * completes), but it's not present in the group. This is bad news in vfio_del_group_dev()
941 * for any external users that need to re-acquire a group reference in vfio_del_group_dev()
949 mutex_lock(&group->unbound_lock); in vfio_del_group_dev()
950 list_add(&unbound->unbound_next, &group->unbound_list); in vfio_del_group_dev()
951 mutex_unlock(&group->unbound_lock); in vfio_del_group_dev()
958 * If the device is still present in the group after the above in vfio_del_group_dev()
968 device = vfio_group_get_device(group, dev); in vfio_del_group_dev()
995 * In order to support multiple devices per group, devices can be in vfio_del_group_dev()
996 * plucked from the group while other devices in the group are still in vfio_del_group_dev()
997 * in use. The container persists with this group and those remaining in vfio_del_group_dev()
999 * by binding this device to another driver while the group is still in in vfio_del_group_dev()
1001 * or potentially the only, device in the group there can be no other in vfio_del_group_dev()
1002 * in-use devices in the group. The user has done their due diligence in vfio_del_group_dev()
1004 * we need to make sure the group is detached from the container. in vfio_del_group_dev()
1008 if (list_empty(&group->device_list)) in vfio_del_group_dev()
1009 wait_event(group->container_q, !group->container); in vfio_del_group_dev()
1011 vfio_group_put(group); in vfio_del_group_dev()
1077 struct vfio_group *group; in __vfio_container_attach_groups() local
1080 list_for_each_entry(group, &container->group_list, container_next) { in __vfio_container_attach_groups()
1081 ret = driver->ops->attach_group(data, group->iommu_group); in __vfio_container_attach_groups()
1089 list_for_each_entry_continue_reverse(group, &container->group_list, in __vfio_container_attach_groups()
1091 driver->ops->detach_group(data, group->iommu_group); in __vfio_container_attach_groups()
1107 * the group can be assigned to specific users. Therefore, only by in vfio_ioctl_set_iommu()
1108 * adding a group to a container does the user get the privilege of in vfio_ioctl_set_iommu()
1290 * VFIO Group fd, /dev/vfio/$GROUP
1292 static void __vfio_group_unset_container(struct vfio_group *group) in __vfio_group_unset_container() argument
1294 struct vfio_container *container = group->container; in __vfio_group_unset_container()
1302 group->iommu_group); in __vfio_group_unset_container()
1304 group->container = NULL; in __vfio_group_unset_container()
1305 wake_up(&group->container_q); in __vfio_group_unset_container()
1306 list_del(&group->container_next); in __vfio_group_unset_container()
1308 /* Detaching the last group deprivileges a container, remove iommu */ in __vfio_group_unset_container()
1324 * the group, we know that still exists, therefore the only valid
1327 static int vfio_group_unset_container(struct vfio_group *group) in vfio_group_unset_container() argument
1329 int users = atomic_cmpxchg(&group->container_users, 1, 0); in vfio_group_unset_container()
1336 __vfio_group_unset_container(group); in vfio_group_unset_container()
1343 * implicitly removes the group from the container. That is, if the
1344 * group file descriptor is closed, as well as any device file descriptors,
1345 * the group is free.
1347 static void vfio_group_try_dissolve_container(struct vfio_group *group) in vfio_group_try_dissolve_container() argument
1349 if (0 == atomic_dec_if_positive(&group->container_users)) in vfio_group_try_dissolve_container()
1350 __vfio_group_unset_container(group); in vfio_group_try_dissolve_container()
1353 static int vfio_group_set_container(struct vfio_group *group, int container_fd) in vfio_group_set_container() argument
1360 if (atomic_read(&group->container_users)) in vfio_group_set_container()
1363 if (group->noiommu && !capable(CAP_SYS_RAWIO)) in vfio_group_set_container()
1383 container->noiommu != group->noiommu) { in vfio_group_set_container()
1391 group->iommu_group); in vfio_group_set_container()
1396 group->container = container; in vfio_group_set_container()
1397 container->noiommu = group->noiommu; in vfio_group_set_container()
1398 list_add(&group->container_next, &container->group_list); in vfio_group_set_container()
1400 /* Get a reference on the container and mark a user within the group */ in vfio_group_set_container()
1402 atomic_inc(&group->container_users); in vfio_group_set_container()
1410 static bool vfio_group_viable(struct vfio_group *group) in vfio_group_viable() argument
1412 return (iommu_group_for_each_dev(group->iommu_group, in vfio_group_viable()
1413 group, vfio_dev_viable) == 0); in vfio_group_viable()
1416 static int vfio_group_add_container_user(struct vfio_group *group) in vfio_group_add_container_user() argument
1418 if (!atomic_inc_not_zero(&group->container_users)) in vfio_group_add_container_user()
1421 if (group->noiommu) { in vfio_group_add_container_user()
1422 atomic_dec(&group->container_users); in vfio_group_add_container_user()
1425 if (!group->container->iommu_driver || !vfio_group_viable(group)) { in vfio_group_add_container_user()
1426 atomic_dec(&group->container_users); in vfio_group_add_container_user()
1435 static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) in vfio_group_get_device_fd() argument
1441 if (0 == atomic_read(&group->container_users) || in vfio_group_get_device_fd()
1442 !group->container->iommu_driver || !vfio_group_viable(group)) in vfio_group_get_device_fd()
1445 if (group->noiommu && !capable(CAP_SYS_RAWIO)) in vfio_group_get_device_fd()
1448 device = vfio_device_get_from_name(group, buf); in vfio_group_get_device_fd()
1486 atomic_inc(&group->container_users); in vfio_group_get_device_fd()
1490 if (group->noiommu) in vfio_group_get_device_fd()
1500 struct vfio_group *group = filep->private_data; in vfio_group_fops_unl_ioctl() local
1519 if (vfio_group_viable(group)) in vfio_group_fops_unl_ioctl()
1522 if (group->container) in vfio_group_fops_unl_ioctl()
1541 ret = vfio_group_set_container(group, fd); in vfio_group_fops_unl_ioctl()
1545 ret = vfio_group_unset_container(group); in vfio_group_fops_unl_ioctl()
1555 ret = vfio_group_get_device_fd(group, buf); in vfio_group_fops_unl_ioctl()
1566 struct vfio_group *group; in vfio_group_fops_open() local
1569 group = vfio_group_get_from_minor(iminor(inode)); in vfio_group_fops_open()
1570 if (!group) in vfio_group_fops_open()
1573 if (group->noiommu && !capable(CAP_SYS_RAWIO)) { in vfio_group_fops_open()
1574 vfio_group_put(group); in vfio_group_fops_open()
1578 /* Do we need multiple instances of the group open? Seems not. */ in vfio_group_fops_open()
1579 opened = atomic_cmpxchg(&group->opened, 0, 1); in vfio_group_fops_open()
1581 vfio_group_put(group); in vfio_group_fops_open()
1586 if (group->container) { in vfio_group_fops_open()
1587 atomic_dec(&group->opened); in vfio_group_fops_open()
1588 vfio_group_put(group); in vfio_group_fops_open()
1593 if (WARN_ON(group->notifier.head)) in vfio_group_fops_open()
1594 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); in vfio_group_fops_open()
1596 filep->private_data = group; in vfio_group_fops_open()
1603 struct vfio_group *group = filep->private_data; in vfio_group_fops_release() local
1607 vfio_group_try_dissolve_container(group); in vfio_group_fops_release()
1609 atomic_dec(&group->opened); in vfio_group_fops_release()
1611 vfio_group_put(group); in vfio_group_fops_release()
1633 vfio_group_try_dissolve_container(device->group); in vfio_device_fops_release()
1700 * - attaching group(s) to it;
1705 * 2. User space passes a group fd to an external user.
1708 * - the group is initialized;
1712 * the VFIO group from disposal before KVM exits.
1718 * vfio_group_put_external_user() to release the VFIO group.
1723 struct vfio_group *group = filep->private_data; in vfio_group_get_external_user() local
1729 ret = vfio_group_add_container_user(group); in vfio_group_get_external_user()
1733 vfio_group_get(group); in vfio_group_get_external_user()
1735 return group; in vfio_group_get_external_user()
1743 * - A VFIO group is assiciated with the device;
1744 * - IOMMU is set for the group.
1746 * increments the container user counter to prevent the VFIO group
1748 * to the VFIO group.
1750 * When the external user finishes using the VFIO group, it calls
1751 * vfio_group_put_external_user() to release the VFIO group and
1755 * Return error PTR or pointer to VFIO group.
1760 struct vfio_group *group; in vfio_group_get_external_user_from_dev() local
1763 group = vfio_group_get_from_dev(dev); in vfio_group_get_external_user_from_dev()
1764 if (!group) in vfio_group_get_external_user_from_dev()
1767 ret = vfio_group_add_container_user(group); in vfio_group_get_external_user_from_dev()
1769 vfio_group_put(group); in vfio_group_get_external_user_from_dev()
1773 return group; in vfio_group_get_external_user_from_dev()
1777 void vfio_group_put_external_user(struct vfio_group *group) in vfio_group_put_external_user() argument
1779 vfio_group_try_dissolve_container(group); in vfio_group_put_external_user()
1780 vfio_group_put(group); in vfio_group_put_external_user()
1787 struct vfio_group *group = filep->private_data; in vfio_external_group_match_file() local
1789 return (filep->f_op == &vfio_group_fops) && (group == test_group); in vfio_external_group_match_file()
1793 int vfio_external_user_iommu_id(struct vfio_group *group) in vfio_external_user_iommu_id() argument
1795 return iommu_group_id(group->iommu_group); in vfio_external_user_iommu_id()
1799 long vfio_external_check_extension(struct vfio_group *group, unsigned long arg) in vfio_external_check_extension() argument
1801 return vfio_ioctl_check_extension(group->container, arg); in vfio_external_check_extension()
1938 struct vfio_group *group; in vfio_pin_pages() local
1948 group = vfio_group_get_from_dev(dev); in vfio_pin_pages()
1949 if (!group) in vfio_pin_pages()
1952 if (group->dev_counter > 1) { in vfio_pin_pages()
1957 ret = vfio_group_add_container_user(group); in vfio_pin_pages()
1961 container = group->container; in vfio_pin_pages()
1965 group->iommu_group, user_pfn, in vfio_pin_pages()
1970 vfio_group_try_dissolve_container(group); in vfio_pin_pages()
1973 vfio_group_put(group); in vfio_pin_pages()
1990 struct vfio_group *group; in vfio_unpin_pages() local
2000 group = vfio_group_get_from_dev(dev); in vfio_unpin_pages()
2001 if (!group) in vfio_unpin_pages()
2004 ret = vfio_group_add_container_user(group); in vfio_unpin_pages()
2008 container = group->container; in vfio_unpin_pages()
2016 vfio_group_try_dissolve_container(group); in vfio_unpin_pages()
2019 vfio_group_put(group); in vfio_unpin_pages()
2026 * VFIO group.
2030 * so as to prevent the VFIO group from disposal in the middle of the call.
2031 * But it can keep the reference to the VFIO group for several calls into
2033 * After finishing using of the VFIO group, the caller needs to release the
2034 * VFIO group by calling vfio_group_put_external_user().
2036 * @group [in] : VFIO group
2045 int vfio_group_pin_pages(struct vfio_group *group, in vfio_group_pin_pages() argument
2053 if (!group || !user_iova_pfn || !phys_pfn || !npage) in vfio_group_pin_pages()
2056 if (group->dev_counter > 1) in vfio_group_pin_pages()
2062 container = group->container; in vfio_group_pin_pages()
2066 group->iommu_group, user_iova_pfn, in vfio_group_pin_pages()
2076 * Unpin a set of guest IOVA PFNs for a VFIO group.
2080 * so as to prevent the VFIO group from disposal in the middle of the call.
2081 * But it can keep the reference to the VFIO group for several calls into
2083 * After finishing using of the VFIO group, the caller needs to release the
2084 * VFIO group by calling vfio_group_put_external_user().
2086 * @group [in] : vfio group
2093 int vfio_group_unpin_pages(struct vfio_group *group, in vfio_group_unpin_pages() argument
2100 if (!group || !user_iova_pfn || !npage) in vfio_group_unpin_pages()
2106 container = group->container; in vfio_group_unpin_pages()
2131 * so as to prevent the VFIO group from disposal in the middle of the call.
2132 * But it can keep the reference to the VFIO group for several calls into
2134 * After finishing using of the VFIO group, the caller needs to release the
2135 * VFIO group by calling vfio_group_put_external_user().
2137 * @group [in] : VFIO group
2144 int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova, in vfio_dma_rw() argument
2151 if (!group || !data || len <= 0) in vfio_dma_rw()
2154 container = group->container; in vfio_dma_rw()
2167 static int vfio_register_iommu_notifier(struct vfio_group *group, in vfio_register_iommu_notifier() argument
2175 ret = vfio_group_add_container_user(group); in vfio_register_iommu_notifier()
2179 container = group->container; in vfio_register_iommu_notifier()
2187 vfio_group_try_dissolve_container(group); in vfio_register_iommu_notifier()
2192 static int vfio_unregister_iommu_notifier(struct vfio_group *group, in vfio_unregister_iommu_notifier() argument
2199 ret = vfio_group_add_container_user(group); in vfio_unregister_iommu_notifier()
2203 container = group->container; in vfio_unregister_iommu_notifier()
2211 vfio_group_try_dissolve_container(group); in vfio_unregister_iommu_notifier()
2216 void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm) in vfio_group_set_kvm() argument
2218 group->kvm = kvm; in vfio_group_set_kvm()
2219 blocking_notifier_call_chain(&group->notifier, in vfio_group_set_kvm()
2224 static int vfio_register_group_notifier(struct vfio_group *group, in vfio_register_group_notifier() argument
2241 ret = vfio_group_add_container_user(group); in vfio_register_group_notifier()
2245 ret = blocking_notifier_chain_register(&group->notifier, nb); in vfio_register_group_notifier()
2251 if (!ret && set_kvm && group->kvm) in vfio_register_group_notifier()
2252 blocking_notifier_call_chain(&group->notifier, in vfio_register_group_notifier()
2253 VFIO_GROUP_NOTIFY_SET_KVM, group->kvm); in vfio_register_group_notifier()
2255 vfio_group_try_dissolve_container(group); in vfio_register_group_notifier()
2260 static int vfio_unregister_group_notifier(struct vfio_group *group, in vfio_unregister_group_notifier() argument
2265 ret = vfio_group_add_container_user(group); in vfio_unregister_group_notifier()
2269 ret = blocking_notifier_chain_unregister(&group->notifier, nb); in vfio_unregister_group_notifier()
2271 vfio_group_try_dissolve_container(group); in vfio_unregister_group_notifier()
2279 struct vfio_group *group; in vfio_register_notifier() local
2285 group = vfio_group_get_from_dev(dev); in vfio_register_notifier()
2286 if (!group) in vfio_register_notifier()
2291 ret = vfio_register_iommu_notifier(group, events, nb); in vfio_register_notifier()
2294 ret = vfio_register_group_notifier(group, events, nb); in vfio_register_notifier()
2300 vfio_group_put(group); in vfio_register_notifier()
2308 struct vfio_group *group; in vfio_unregister_notifier() local
2314 group = vfio_group_get_from_dev(dev); in vfio_unregister_notifier()
2315 if (!group) in vfio_unregister_notifier()
2320 ret = vfio_unregister_iommu_notifier(group, nb); in vfio_unregister_notifier()
2323 ret = vfio_unregister_group_notifier(group, nb); in vfio_unregister_notifier()
2329 vfio_group_put(group); in vfio_unregister_notifier()
2367 /* /dev/vfio/$GROUP */ in vfio_init()