Lines Matching +full:iommu +full:- +full:secure +full:- +full:id
1 // SPDX-License-Identifier: GPL-2.0-only
18 #include <linux/iommu.h>
44 #define DRIVER_DESC "VFIO - User Level meta-driver"
55 MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no …
67 return -EINVAL; in vfio_assign_device_set()
80 return -ENOMEM; in vfio_assign_device_set()
81 mutex_init(&new_dev_set->lock); in vfio_assign_device_set()
82 INIT_LIST_HEAD(&new_dev_set->device_list); in vfio_assign_device_set()
83 new_dev_set->set_id = set_id; in vfio_assign_device_set()
100 dev_set->device_count++; in vfio_assign_device_set()
102 mutex_lock(&dev_set->lock); in vfio_assign_device_set()
103 device->dev_set = dev_set; in vfio_assign_device_set()
104 list_add_tail(&device->dev_set_list, &dev_set->device_list); in vfio_assign_device_set()
105 mutex_unlock(&dev_set->lock); in vfio_assign_device_set()
112 struct vfio_device_set *dev_set = device->dev_set; in vfio_release_device_set()
117 mutex_lock(&dev_set->lock); in vfio_release_device_set()
118 list_del(&device->dev_set_list); in vfio_release_device_set()
119 mutex_unlock(&dev_set->lock); in vfio_release_device_set()
122 if (!--dev_set->device_count) { in vfio_release_device_set()
124 (unsigned long)dev_set->set_id); in vfio_release_device_set()
125 mutex_destroy(&dev_set->lock); in vfio_release_device_set()
136 lockdep_assert_held(&dev_set->lock); in vfio_device_set_open_count()
138 list_for_each_entry(cur, &dev_set->device_list, dev_set_list) in vfio_device_set_open_count()
139 open_count += cur->open_count; in vfio_device_set_open_count()
150 lockdep_assert_held(&dev_set->lock); in vfio_find_device_in_devset()
152 list_for_each_entry(cur, &dev_set->device_list, dev_set_list) in vfio_find_device_in_devset()
153 if (cur->dev == dev) in vfio_find_device_in_devset()
160 * Device objects - create, release, get, put, search
165 if (refcount_dec_and_test(&device->refcount)) in vfio_device_put_registration()
166 complete(&device->comp); in vfio_device_put_registration()
171 return refcount_inc_not_zero(&device->refcount); in vfio_device_try_get_registration()
184 ida_free(&vfio.device_ida, device->index); in vfio_device_release()
186 if (device->ops->release) in vfio_device_release()
187 device->ops->release(device); in vfio_device_release()
214 return ERR_PTR(-EINVAL); in _vfio_alloc_device()
218 return ERR_PTR(-ENOMEM); in _vfio_alloc_device()
245 device->index = ret; in vfio_init_device()
246 init_completion(&device->comp); in vfio_init_device()
247 device->dev = dev; in vfio_init_device()
248 device->ops = ops; in vfio_init_device()
250 if (ops->init) { in vfio_init_device()
251 ret = ops->init(device); in vfio_init_device()
256 device_initialize(&device->device); in vfio_init_device()
257 device->device.release = vfio_device_release; in vfio_init_device()
258 device->device.class = vfio.device_class; in vfio_init_device()
259 device->device.parent = device->dev; in vfio_init_device()
264 ida_free(&vfio.device_ida, device->index); in vfio_init_device()
274 (!device->ops->bind_iommufd || in __vfio_register_dev()
275 !device->ops->unbind_iommufd || in __vfio_register_dev()
276 !device->ops->attach_ioas || in __vfio_register_dev()
277 !device->ops->detach_ioas))) in __vfio_register_dev()
278 return -EINVAL; in __vfio_register_dev()
284 if (!device->dev_set) in __vfio_register_dev()
287 ret = dev_set_name(&device->device, "vfio%d", device->index); in __vfio_register_dev()
298 * valid for cases where we are using iommu groups. in __vfio_register_dev()
301 !device_iommu_capable(device->dev, IOMMU_CAP_CACHE_COHERENCY)) { in __vfio_register_dev()
302 ret = -EINVAL; in __vfio_register_dev()
311 refcount_set(&device->refcount, 1); in __vfio_register_dev()
329 * Register a virtual device without IOMMU backing. The user of this
360 rc = try_wait_for_completion(&device->comp); in vfio_unregister_group_dev()
362 if (device->ops->request) in vfio_unregister_group_dev()
363 device->ops->request(device, i++); in vfio_unregister_group_dev()
366 rc = wait_for_completion_timeout(&device->comp, in vfio_unregister_group_dev()
370 &device->comp, HZ * 10); in vfio_unregister_group_dev()
373 dev_warn(device->dev, in vfio_unregister_group_dev()
377 current->comm, task_pid_nr(current)); in vfio_unregister_group_dev()
395 lockdep_assert_held(&device->dev_set->lock); in vfio_device_get_kvm_safe()
417 device->put_kvm = pfn; in vfio_device_get_kvm_safe()
418 device->kvm = kvm; in vfio_device_get_kvm_safe()
423 lockdep_assert_held(&device->dev_set->lock); in vfio_device_put_kvm()
425 if (!device->kvm) in vfio_device_put_kvm()
428 if (WARN_ON(!device->put_kvm)) in vfio_device_put_kvm()
431 device->put_kvm(device->kvm); in vfio_device_put_kvm()
432 device->put_kvm = NULL; in vfio_device_put_kvm()
436 device->kvm = NULL; in vfio_device_put_kvm()
443 return !WARN_ON_ONCE(!READ_ONCE(device->open_count)); in vfio_assert_device_open()
453 return ERR_PTR(-ENOMEM); in vfio_allocate_device_file()
455 df->device = device; in vfio_allocate_device_file()
456 spin_lock_init(&df->kvm_ref_lock); in vfio_allocate_device_file()
463 struct vfio_device *device = df->device; in vfio_df_device_first_open()
464 struct iommufd_ctx *iommufd = df->iommufd; in vfio_df_device_first_open()
467 lockdep_assert_held(&device->dev_set->lock); in vfio_df_device_first_open()
469 if (!try_module_get(device->dev->driver->owner)) in vfio_df_device_first_open()
470 return -ENODEV; in vfio_df_device_first_open()
479 if (device->ops->open_device) { in vfio_df_device_first_open()
480 ret = device->ops->open_device(device); in vfio_df_device_first_open()
492 module_put(device->dev->driver->owner); in vfio_df_device_first_open()
498 struct vfio_device *device = df->device; in vfio_df_device_last_close()
499 struct iommufd_ctx *iommufd = df->iommufd; in vfio_df_device_last_close()
501 lockdep_assert_held(&device->dev_set->lock); in vfio_df_device_last_close()
503 if (device->ops->close_device) in vfio_df_device_last_close()
504 device->ops->close_device(device); in vfio_df_device_last_close()
509 module_put(device->dev->driver->owner); in vfio_df_device_last_close()
514 struct vfio_device *device = df->device; in vfio_df_open()
517 lockdep_assert_held(&device->dev_set->lock); in vfio_df_open()
521 * times. The device cdev path doesn't have a secure way for it. in vfio_df_open()
523 if (device->open_count != 0 && !df->group) in vfio_df_open()
524 return -EINVAL; in vfio_df_open()
526 device->open_count++; in vfio_df_open()
527 if (device->open_count == 1) { in vfio_df_open()
530 device->open_count--; in vfio_df_open()
538 struct vfio_device *device = df->device; in vfio_df_close()
540 lockdep_assert_held(&device->dev_set->lock); in vfio_df_close()
543 if (device->open_count == 1) in vfio_df_close()
545 device->open_count--; in vfio_df_close()
554 struct device *dev = device->dev; in vfio_device_pm_runtime_get()
556 if (dev->driver && dev->driver->pm) { in vfio_device_pm_runtime_get()
563 return -EIO; in vfio_device_pm_runtime_get()
575 struct device *dev = device->dev; in vfio_device_pm_runtime_put()
577 if (dev->driver && dev->driver->pm) in vfio_device_pm_runtime_put()
586 struct vfio_device_file *df = filep->private_data; in vfio_device_fops_release()
587 struct vfio_device *device = df->device; in vfio_device_fops_release()
589 if (df->group) in vfio_device_fops_release()
602 * vfio_mig_get_next_state - Compute the next step in the FSM
603 * @cur_fsm - The current state the device is in
604 * @new_fsm - The target state to reach
605 * @next_fsm - Pointer to the next step to get to new_fsm
607 * Return 0 upon success, otherwise -errno
625 * RESUMING -> STOP in vfio_mig_get_next_state()
626 * STOP -> RESUMING in vfio_mig_get_next_state()
627 * STOP -> STOP_COPY in vfio_mig_get_next_state()
628 * STOP_COPY -> STOP in vfio_mig_get_next_state()
632 * RUNNING -> RUNNING_P2P in vfio_mig_get_next_state()
633 * RUNNING_P2P -> RUNNING in vfio_mig_get_next_state()
634 * RUNNING_P2P -> STOP in vfio_mig_get_next_state()
635 * STOP -> RUNNING_P2P in vfio_mig_get_next_state()
639 * RUNNING -> PRE_COPY in vfio_mig_get_next_state()
640 * PRE_COPY -> RUNNING in vfio_mig_get_next_state()
641 * PRE_COPY -> STOP_COPY in vfio_mig_get_next_state()
644 * PRE_COPY -> RUNNING in vfio_mig_get_next_state()
645 * PRE_COPY -> PRE_COPY_P2P in vfio_mig_get_next_state()
646 * PRE_COPY_P2P -> PRE_COPY in vfio_mig_get_next_state()
647 * PRE_COPY_P2P -> RUNNING_P2P in vfio_mig_get_next_state()
648 * PRE_COPY_P2P -> STOP_COPY in vfio_mig_get_next_state()
649 * RUNNING -> PRE_COPY in vfio_mig_get_next_state()
650 * RUNNING_P2P -> PRE_COPY_P2P in vfio_mig_get_next_state()
653 * RUNNING -> STOP in vfio_mig_get_next_state()
654 * STOP -> RUNNING in vfio_mig_get_next_state()
659 * PRE_COPY -> PRE_COPY_P2P -> STOP_COPY in vfio_mig_get_next_state()
660 * PRE_COPY -> RUNNING -> RUNNING_P2P in vfio_mig_get_next_state()
661 * PRE_COPY -> RUNNING -> RUNNING_P2P -> STOP in vfio_mig_get_next_state()
662 * PRE_COPY -> RUNNING -> RUNNING_P2P -> STOP -> RESUMING in vfio_mig_get_next_state()
663 * PRE_COPY_P2P -> RUNNING_P2P -> RUNNING in vfio_mig_get_next_state()
664 * PRE_COPY_P2P -> RUNNING_P2P -> STOP in vfio_mig_get_next_state()
665 * PRE_COPY_P2P -> RUNNING_P2P -> STOP -> RESUMING in vfio_mig_get_next_state()
666 * RESUMING -> STOP -> RUNNING_P2P in vfio_mig_get_next_state()
667 * RESUMING -> STOP -> RUNNING_P2P -> PRE_COPY_P2P in vfio_mig_get_next_state()
668 * RESUMING -> STOP -> RUNNING_P2P -> RUNNING in vfio_mig_get_next_state()
669 * RESUMING -> STOP -> RUNNING_P2P -> RUNNING -> PRE_COPY in vfio_mig_get_next_state()
670 * RESUMING -> STOP -> STOP_COPY in vfio_mig_get_next_state()
671 * RUNNING -> RUNNING_P2P -> PRE_COPY_P2P in vfio_mig_get_next_state()
672 * RUNNING -> RUNNING_P2P -> STOP in vfio_mig_get_next_state()
673 * RUNNING -> RUNNING_P2P -> STOP -> RESUMING in vfio_mig_get_next_state()
674 * RUNNING -> RUNNING_P2P -> STOP -> STOP_COPY in vfio_mig_get_next_state()
675 * RUNNING_P2P -> RUNNING -> PRE_COPY in vfio_mig_get_next_state()
676 * RUNNING_P2P -> STOP -> RESUMING in vfio_mig_get_next_state()
677 * RUNNING_P2P -> STOP -> STOP_COPY in vfio_mig_get_next_state()
678 * STOP -> RUNNING_P2P -> PRE_COPY_P2P in vfio_mig_get_next_state()
679 * STOP -> RUNNING_P2P -> RUNNING in vfio_mig_get_next_state()
680 * STOP -> RUNNING_P2P -> RUNNING -> PRE_COPY in vfio_mig_get_next_state()
681 * STOP_COPY -> STOP -> RESUMING in vfio_mig_get_next_state()
682 * STOP_COPY -> STOP -> RUNNING_P2P in vfio_mig_get_next_state()
683 * STOP_COPY -> STOP -> RUNNING_P2P -> RUNNING in vfio_mig_get_next_state()
686 * STOP_COPY -> PRE_COPY in vfio_mig_get_next_state()
687 * STOP_COPY -> PRE_COPY_P2P in vfio_mig_get_next_state()
788 (state_flags_table[cur_fsm] & device->migration_flags) != in vfio_mig_get_next_state()
790 return -EINVAL; in vfio_mig_get_next_state()
793 (state_flags_table[new_fsm] & device->migration_flags) != in vfio_mig_get_next_state()
795 return -EINVAL; in vfio_mig_get_next_state()
803 while ((state_flags_table[*next_fsm] & device->migration_flags) != in vfio_mig_get_next_state()
807 return (*next_fsm != VFIO_DEVICE_STATE_ERROR) ? 0 : -EINVAL; in vfio_mig_get_next_state()
826 mig->data_fd = fd; in vfio_ioct_mig_return_fd()
828 ret = -EFAULT; in vfio_ioct_mig_return_fd()
852 if (!device->mig_ops) in vfio_ioctl_device_feature_mig_device_state()
853 return -ENOTTY; in vfio_ioctl_device_feature_mig_device_state()
863 return -EFAULT; in vfio_ioctl_device_feature_mig_device_state()
868 ret = device->mig_ops->migration_get_state(device, in vfio_ioctl_device_feature_mig_device_state()
877 filp = device->mig_ops->migration_set_state(device, mig.device_state); in vfio_ioctl_device_feature_mig_device_state()
883 mig.data_fd = -1; in vfio_ioctl_device_feature_mig_device_state()
885 return -EFAULT; in vfio_ioctl_device_feature_mig_device_state()
900 if (!device->mig_ops) in vfio_ioctl_device_feature_migration_data_size()
901 return -ENOTTY; in vfio_ioctl_device_feature_migration_data_size()
908 ret = device->mig_ops->migration_get_data_size(device, &stop_copy_length); in vfio_ioctl_device_feature_migration_data_size()
914 return -EFAULT; in vfio_ioctl_device_feature_migration_data_size()
924 .flags = device->migration_flags, in vfio_ioctl_device_feature_migration()
928 if (!device->mig_ops) in vfio_ioctl_device_feature_migration()
929 return -ENOTTY; in vfio_ioctl_device_feature_migration()
936 return -EFAULT; in vfio_ioctl_device_feature_migration()
958 last = curr->last; in vfio_combine_iova_ranges()
964 comb_start->last = last; in vfio_combine_iova_ranges()
975 curr_gap = curr->start - prev->last; in vfio_combine_iova_ranges()
990 comb_start->last = comb_end->last; in vfio_combine_iova_ranges()
992 cur_nodes--; in vfio_combine_iova_ranges()
1018 if (!device->log_ops) in vfio_ioctl_device_feature_logging_start()
1019 return -ENOTTY; in vfio_ioctl_device_feature_logging_start()
1028 return -EFAULT; in vfio_ioctl_device_feature_logging_start()
1032 return -EINVAL; in vfio_ioctl_device_feature_logging_start()
1035 return -E2BIG; in vfio_ioctl_device_feature_logging_start()
1041 return -ENOMEM; in vfio_ioctl_device_feature_logging_start()
1045 ret = -EFAULT; in vfio_ioctl_device_feature_logging_start()
1050 ret = -EINVAL; in vfio_ioctl_device_feature_logging_start()
1056 ret = -EOVERFLOW; in vfio_ioctl_device_feature_logging_start()
1061 nodes[i].last = range.iova + range.length - 1; in vfio_ioctl_device_feature_logging_start()
1065 ret = -EINVAL; in vfio_ioctl_device_feature_logging_start()
1071 ret = device->log_ops->log_start(device, &root, nnodes, in vfio_ioctl_device_feature_logging_start()
1077 ret = -EFAULT; in vfio_ioctl_device_feature_logging_start()
1078 device->log_ops->log_stop(device); in vfio_ioctl_device_feature_logging_start()
1093 if (!device->log_ops) in vfio_ioctl_device_feature_logging_stop()
1094 return -ENOTTY; in vfio_ioctl_device_feature_logging_stop()
1101 return device->log_ops->log_stop(device); in vfio_ioctl_device_feature_logging_stop()
1110 return device->log_ops->log_read_and_clear(device, iova, length, iter); in vfio_device_log_read_and_clear()
1126 if (!device->log_ops) in vfio_ioctl_device_feature_logging_report()
1127 return -ENOTTY; in vfio_ioctl_device_feature_logging_report()
1136 return -EFAULT; in vfio_ioctl_device_feature_logging_report()
1139 return -EINVAL; in vfio_ioctl_device_feature_logging_report()
1143 return -EOVERFLOW; in vfio_ioctl_device_feature_logging_report()
1165 return -EFAULT; in vfio_ioctl_device_feature()
1168 return -EINVAL; in vfio_ioctl_device_feature()
1174 return -EINVAL; in vfio_ioctl_device_feature()
1180 return -EINVAL; in vfio_ioctl_device_feature()
1185 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1186 feature.argsz - minsz); in vfio_ioctl_device_feature()
1189 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1190 feature.argsz - minsz); in vfio_ioctl_device_feature()
1193 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1194 feature.argsz - minsz); in vfio_ioctl_device_feature()
1197 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1198 feature.argsz - minsz); in vfio_ioctl_device_feature()
1201 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1202 feature.argsz - minsz); in vfio_ioctl_device_feature()
1205 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1206 feature.argsz - minsz); in vfio_ioctl_device_feature()
1208 if (unlikely(!device->ops->device_feature)) in vfio_ioctl_device_feature()
1209 return -EINVAL; in vfio_ioctl_device_feature()
1210 return device->ops->device_feature(device, feature.flags, in vfio_ioctl_device_feature()
1211 arg->data, in vfio_ioctl_device_feature()
1212 feature.argsz - minsz); in vfio_ioctl_device_feature()
1219 struct vfio_device_file *df = filep->private_data; in vfio_device_fops_unl_ioctl()
1220 struct vfio_device *device = df->device; in vfio_device_fops_unl_ioctl()
1228 if (!smp_load_acquire(&df->access_granted)) in vfio_device_fops_unl_ioctl()
1229 return -EINVAL; in vfio_device_fops_unl_ioctl()
1236 if (IS_ENABLED(CONFIG_VFIO_DEVICE_CDEV) && !df->group) { in vfio_device_fops_unl_ioctl()
1254 if (unlikely(!device->ops->ioctl)) in vfio_device_fops_unl_ioctl()
1255 ret = -EINVAL; in vfio_device_fops_unl_ioctl()
1257 ret = device->ops->ioctl(device, cmd, arg); in vfio_device_fops_unl_ioctl()
1268 struct vfio_device_file *df = filep->private_data; in vfio_device_fops_read()
1269 struct vfio_device *device = df->device; in vfio_device_fops_read()
1272 if (!smp_load_acquire(&df->access_granted)) in vfio_device_fops_read()
1273 return -EINVAL; in vfio_device_fops_read()
1275 if (unlikely(!device->ops->read)) in vfio_device_fops_read()
1276 return -EINVAL; in vfio_device_fops_read()
1278 return device->ops->read(device, buf, count, ppos); in vfio_device_fops_read()
1285 struct vfio_device_file *df = filep->private_data; in vfio_device_fops_write()
1286 struct vfio_device *device = df->device; in vfio_device_fops_write()
1289 if (!smp_load_acquire(&df->access_granted)) in vfio_device_fops_write()
1290 return -EINVAL; in vfio_device_fops_write()
1292 if (unlikely(!device->ops->write)) in vfio_device_fops_write()
1293 return -EINVAL; in vfio_device_fops_write()
1295 return device->ops->write(device, buf, count, ppos); in vfio_device_fops_write()
1300 struct vfio_device_file *df = filep->private_data; in vfio_device_fops_mmap()
1301 struct vfio_device *device = df->device; in vfio_device_fops_mmap()
1304 if (!smp_load_acquire(&df->access_granted)) in vfio_device_fops_mmap()
1305 return -EINVAL; in vfio_device_fops_mmap()
1307 if (unlikely(!device->ops->mmap)) in vfio_device_fops_mmap()
1308 return -EINVAL; in vfio_device_fops_mmap()
1310 return device->ops->mmap(device, vma); in vfio_device_fops_mmap()
1326 struct vfio_device_file *df = file->private_data; in vfio_device_from_file()
1328 if (file->f_op != &vfio_device_fops) in vfio_device_from_file()
1330 return df->device; in vfio_device_from_file()
1334 * vfio_file_is_valid - True if the file is valid vfio file
1345 * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file
1349 * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop
1364 return device_iommu_capable(device->dev, in vfio_file_enforced_coherent()
1373 struct vfio_device_file *df = file->private_data; in vfio_device_file_set_kvm()
1380 spin_lock(&df->kvm_ref_lock); in vfio_device_file_set_kvm()
1381 df->kvm = kvm; in vfio_device_file_set_kvm()
1382 spin_unlock(&df->kvm_ref_lock); in vfio_device_file_set_kvm()
1386 * vfio_file_set_kvm - Link a kvm with VFIO drivers
1391 * device->kvm if one was associated with the file.
1407 * Sub-module support
1411 * reallocate a buffer with additional @size, filling in @id and @version
1419 size_t size, u16 id, u16 version) in vfio_info_cap_add() argument
1427 buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL); in vfio_info_cap_add()
1429 kfree(caps->buf); in vfio_info_cap_add()
1430 caps->buf = NULL; in vfio_info_cap_add()
1431 caps->size = 0; in vfio_info_cap_add()
1432 return ERR_PTR(-ENOMEM); in vfio_info_cap_add()
1435 caps->buf = buf; in vfio_info_cap_add()
1436 header = buf + caps->size; in vfio_info_cap_add()
1441 header->id = id; in vfio_info_cap_add()
1442 header->version = version; in vfio_info_cap_add()
1445 for (tmp = buf; tmp->next; tmp = buf + tmp->next) in vfio_info_cap_add()
1448 tmp->next = caps->size; in vfio_info_cap_add()
1449 caps->size += size; in vfio_info_cap_add()
1458 void *buf = (void *)caps->buf; in vfio_info_cap_shift()
1463 for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset) in vfio_info_cap_shift()
1464 tmp->next += offset; in vfio_info_cap_shift()
1473 header = vfio_info_cap_add(caps, size, cap->id, cap->version); in vfio_info_add_capability()
1477 memcpy(header + 1, cap + 1, size - sizeof(*header)); in vfio_info_add_capability()
1491 if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) || in vfio_set_irqs_validate_and_prepare()
1492 (hdr->count >= (U32_MAX - hdr->start)) || in vfio_set_irqs_validate_and_prepare()
1493 (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | in vfio_set_irqs_validate_and_prepare()
1495 return -EINVAL; in vfio_set_irqs_validate_and_prepare()
1500 if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs) in vfio_set_irqs_validate_and_prepare()
1501 return -EINVAL; in vfio_set_irqs_validate_and_prepare()
1503 switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { in vfio_set_irqs_validate_and_prepare()
1514 return -EINVAL; in vfio_set_irqs_validate_and_prepare()
1518 if (hdr->argsz - minsz < hdr->count * size) in vfio_set_irqs_validate_and_prepare()
1519 return -EINVAL; in vfio_set_irqs_validate_and_prepare()
1522 return -EINVAL; in vfio_set_irqs_validate_and_prepare()
1524 *data_size = hdr->count * size; in vfio_set_irqs_validate_and_prepare()
1548 /* group->container cannot change while a vfio device is open */ in vfio_pin_pages()
1550 return -EINVAL; in vfio_pin_pages()
1551 if (!device->ops->dma_unmap) in vfio_pin_pages()
1552 return -EINVAL; in vfio_pin_pages()
1556 if (device->iommufd_access) { in vfio_pin_pages()
1560 return -EINVAL; in vfio_pin_pages()
1568 device->iommufd_access, ALIGN_DOWN(iova, PAGE_SIZE), in vfio_pin_pages()
1575 return -EINVAL; in vfio_pin_pages()
1590 if (WARN_ON(!device->ops->dma_unmap)) in vfio_unpin_pages()
1597 if (device->iommufd_access) { in vfio_unpin_pages()
1600 iommufd_access_unpin_pages(device->iommufd_access, in vfio_unpin_pages()
1629 return -EINVAL; in vfio_dma_rw()
1635 if (device->iommufd_access) { in vfio_dma_rw()
1639 return -EINVAL; in vfio_dma_rw()
1641 /* VFIO historically tries to auto-detect a kthread */ in vfio_dma_rw()
1642 if (!current->mm) in vfio_dma_rw()
1646 return iommufd_access_rw(device->iommufd_access, iova, data, in vfio_dma_rw()
1649 return -EINVAL; in vfio_dma_rw()
1670 /* /sys/class/vfio-dev/vfioX */ in vfio_init()
1671 vfio.device_class = class_create("vfio-dev"); in vfio_init()