Lines Matching full:device

15 #include <linux/device.h>
62 …U mode. This mode provides no device isolation, no DMA translation, no host kernel protection, ca…
67 int vfio_assign_device_set(struct vfio_device *device, void *set_id) in vfio_assign_device_set() argument
110 device->dev_set = dev_set; in vfio_assign_device_set()
111 list_add_tail(&device->dev_set_list, &dev_set->device_list); in vfio_assign_device_set()
117 static void vfio_release_device_set(struct vfio_device *device) in vfio_release_device_set() argument
119 struct vfio_device_set *dev_set = device->dev_set; in vfio_release_device_set()
125 list_del(&device->dev_set_list); in vfio_release_device_set()
153 struct device *dev) in vfio_find_device_in_devset()
167 * Device objects - create, release, get, put, search
169 /* Device reference always implies a group reference */
170 void vfio_device_put_registration(struct vfio_device *device) in vfio_device_put_registration() argument
172 if (refcount_dec_and_test(&device->refcount)) in vfio_device_put_registration()
173 complete(&device->comp); in vfio_device_put_registration()
177 bool vfio_device_try_get_registration(struct vfio_device *device) in vfio_device_try_get_registration() argument
179 return refcount_inc_not_zero(&device->refcount); in vfio_device_try_get_registration()
187 static void vfio_device_release(struct device *dev) in vfio_device_release()
189 struct vfio_device *device = in vfio_device_release() local
190 container_of(dev, struct vfio_device, device); in vfio_device_release()
192 vfio_release_device_set(device); in vfio_device_release()
193 ida_free(&vfio.device_ida, device->index); in vfio_device_release()
195 if (device->ops->release) in vfio_device_release()
196 device->ops->release(device); in vfio_device_release()
198 iput(device->inode); in vfio_device_release()
200 kvfree(device); in vfio_device_release()
203 static int vfio_init_device(struct vfio_device *device, struct device *dev,
214 * Driver may provide an @init callback to cover device private data.
218 struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev, in _vfio_alloc_device()
221 struct vfio_device *device; in _vfio_alloc_device() local
227 device = kvzalloc(size, GFP_KERNEL); in _vfio_alloc_device()
228 if (!device) in _vfio_alloc_device()
231 ret = vfio_init_device(device, dev, ops); in _vfio_alloc_device()
234 return device; in _vfio_alloc_device()
237 kvfree(device); in _vfio_alloc_device()
273 static int vfio_init_device(struct vfio_device *device, struct device *dev, in vfio_init_device() argument
284 device->index = ret; in vfio_init_device()
285 init_completion(&device->comp); in vfio_init_device()
286 device->dev = dev; in vfio_init_device()
287 device->ops = ops; in vfio_init_device()
288 device->inode = vfio_fs_inode_new(); in vfio_init_device()
289 if (IS_ERR(device->inode)) { in vfio_init_device()
290 ret = PTR_ERR(device->inode); in vfio_init_device()
295 ret = ops->init(device); in vfio_init_device()
300 device_initialize(&device->device); in vfio_init_device()
301 device->device.release = vfio_device_release; in vfio_init_device()
302 device->device.class = vfio.device_class; in vfio_init_device()
303 device->device.parent = device->dev; in vfio_init_device()
307 iput(device->inode); in vfio_init_device()
310 vfio_release_device_set(device); in vfio_init_device()
311 ida_free(&vfio.device_ida, device->index); in vfio_init_device()
315 static int __vfio_register_dev(struct vfio_device *device, in __vfio_register_dev() argument
321 (!device->ops->bind_iommufd || in __vfio_register_dev()
322 !device->ops->unbind_iommufd || in __vfio_register_dev()
323 !device->ops->attach_ioas || in __vfio_register_dev()
324 !device->ops->detach_ioas))) in __vfio_register_dev()
328 * If the driver doesn't specify a set then the device is added to a in __vfio_register_dev()
331 if (!device->dev_set) in __vfio_register_dev()
332 vfio_assign_device_set(device, device); in __vfio_register_dev()
334 ret = dev_set_name(&device->device, "vfio%d", device->index); in __vfio_register_dev()
338 ret = vfio_device_set_group(device, type); in __vfio_register_dev()
347 if (type == VFIO_IOMMU && !vfio_device_is_noiommu(device) && in __vfio_register_dev()
348 !device_iommu_capable(device->dev, IOMMU_CAP_CACHE_COHERENCY)) { in __vfio_register_dev()
353 ret = vfio_device_add(device); in __vfio_register_dev()
358 refcount_set(&device->refcount, 1); in __vfio_register_dev()
360 vfio_device_group_register(device); in __vfio_register_dev()
361 vfio_device_debugfs_init(device); in __vfio_register_dev()
365 vfio_device_remove_group(device); in __vfio_register_dev()
369 int vfio_register_group_dev(struct vfio_device *device) in vfio_register_group_dev() argument
371 return __vfio_register_dev(device, VFIO_IOMMU); in vfio_register_group_dev()
376 * Register a virtual device without IOMMU backing. The user of this
377 * device must not be able to directly trigger unmediated DMA.
379 int vfio_register_emulated_iommu_dev(struct vfio_device *device) in vfio_register_emulated_iommu_dev() argument
381 return __vfio_register_dev(device, VFIO_EMULATED_IOMMU); in vfio_register_emulated_iommu_dev()
386 * Decrement the device reference count and wait for the device to be
387 * removed. Open file descriptors for the device... */
388 void vfio_unregister_group_dev(struct vfio_device *device) in vfio_unregister_group_dev() argument
395 * Prevent new device opened by userspace via the in vfio_unregister_group_dev()
398 vfio_device_group_unregister(device); in vfio_unregister_group_dev()
402 * new device opened by userspace in the cdev path. in vfio_unregister_group_dev()
404 vfio_device_del(device); in vfio_unregister_group_dev()
406 vfio_device_put_registration(device); in vfio_unregister_group_dev()
407 rc = try_wait_for_completion(&device->comp); in vfio_unregister_group_dev()
409 if (device->ops->request) in vfio_unregister_group_dev()
410 device->ops->request(device, i++); in vfio_unregister_group_dev()
413 rc = wait_for_completion_timeout(&device->comp, in vfio_unregister_group_dev()
417 &device->comp, HZ * 10); in vfio_unregister_group_dev()
420 dev_warn(device->dev, in vfio_unregister_group_dev()
421 "Device is currently in use, task" in vfio_unregister_group_dev()
423 "blocked until device is released", in vfio_unregister_group_dev()
429 vfio_device_debugfs_exit(device); in vfio_unregister_group_dev()
431 vfio_device_remove_group(device); in vfio_unregister_group_dev()
436 void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm) in vfio_device_get_kvm_safe() argument
442 lockdep_assert_held(&device->dev_set->lock); in vfio_device_get_kvm_safe()
464 device->put_kvm = pfn; in vfio_device_get_kvm_safe()
465 device->kvm = kvm; in vfio_device_get_kvm_safe()
468 void vfio_device_put_kvm(struct vfio_device *device) in vfio_device_put_kvm() argument
470 lockdep_assert_held(&device->dev_set->lock); in vfio_device_put_kvm()
472 if (!device->kvm) in vfio_device_put_kvm()
475 if (WARN_ON(!device->put_kvm)) in vfio_device_put_kvm()
478 device->put_kvm(device->kvm); in vfio_device_put_kvm()
479 device->put_kvm = NULL; in vfio_device_put_kvm()
483 device->kvm = NULL; in vfio_device_put_kvm()
488 static bool vfio_assert_device_open(struct vfio_device *device) in vfio_assert_device_open() argument
490 return !WARN_ON_ONCE(!READ_ONCE(device->open_count)); in vfio_assert_device_open()
494 vfio_allocate_device_file(struct vfio_device *device) in vfio_allocate_device_file() argument
502 df->device = device; in vfio_allocate_device_file()
510 struct vfio_device *device = df->device; in vfio_df_device_first_open() local
514 lockdep_assert_held(&device->dev_set->lock); in vfio_df_device_first_open()
516 if (!try_module_get(device->dev->driver->owner)) in vfio_df_device_first_open()
522 ret = vfio_device_group_use_iommu(device); in vfio_df_device_first_open()
526 if (device->ops->open_device) { in vfio_df_device_first_open()
527 ret = device->ops->open_device(device); in vfio_df_device_first_open()
537 vfio_device_group_unuse_iommu(device); in vfio_df_device_first_open()
539 module_put(device->dev->driver->owner); in vfio_df_device_first_open()
545 struct vfio_device *device = df->device; in vfio_df_device_last_close() local
548 lockdep_assert_held(&device->dev_set->lock); in vfio_df_device_last_close()
550 if (device->ops->close_device) in vfio_df_device_last_close()
551 device->ops->close_device(device); in vfio_df_device_last_close()
555 vfio_device_group_unuse_iommu(device); in vfio_df_device_last_close()
556 module_put(device->dev->driver->owner); in vfio_df_device_last_close()
561 struct vfio_device *device = df->device; in vfio_df_open() local
564 lockdep_assert_held(&device->dev_set->lock); in vfio_df_open()
567 * Only the group path allows the device to be opened multiple in vfio_df_open()
568 * times. The device cdev path doesn't have a secure way for it. in vfio_df_open()
570 if (device->open_count != 0 && !df->group) in vfio_df_open()
573 device->open_count++; in vfio_df_open()
574 if (device->open_count == 1) { in vfio_df_open()
577 device->open_count--; in vfio_df_open()
585 struct vfio_device *device = df->device; in vfio_df_close() local
587 lockdep_assert_held(&device->dev_set->lock); in vfio_df_close()
589 if (!vfio_assert_device_open(device)) in vfio_df_close()
591 if (device->open_count == 1) in vfio_df_close()
593 device->open_count--; in vfio_df_close()
600 static inline int vfio_device_pm_runtime_get(struct vfio_device *device) in vfio_device_pm_runtime_get() argument
602 struct device *dev = device->dev; in vfio_device_pm_runtime_get()
621 static inline void vfio_device_pm_runtime_put(struct vfio_device *device) in vfio_device_pm_runtime_put() argument
623 struct device *dev = device->dev; in vfio_device_pm_runtime_put()
630 * VFIO Device fd
635 struct vfio_device *device = df->device; in vfio_device_fops_release() local
642 vfio_device_put_registration(device); in vfio_device_fops_release()
651 * @cur_fsm - The current state the device is in
664 int vfio_mig_get_next_state(struct vfio_device *device, in vfio_mig_get_next_state() argument
836 (state_flags_table[cur_fsm] & device->migration_flags) != in vfio_mig_get_next_state()
841 (state_flags_table[new_fsm] & device->migration_flags) != in vfio_mig_get_next_state()
851 while ((state_flags_table[*next_fsm] & device->migration_flags) != in vfio_mig_get_next_state()
890 vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device, in vfio_ioctl_device_feature_mig_device_state() argument
900 if (!device->mig_ops) in vfio_ioctl_device_feature_mig_device_state()
916 ret = device->mig_ops->migration_get_state(device, in vfio_ioctl_device_feature_mig_device_state()
925 filp = device->mig_ops->migration_set_state(device, mig.device_state); in vfio_ioctl_device_feature_mig_device_state()
940 vfio_ioctl_device_feature_migration_data_size(struct vfio_device *device, in vfio_ioctl_device_feature_migration_data_size() argument
948 if (!device->mig_ops) in vfio_ioctl_device_feature_migration_data_size()
956 ret = device->mig_ops->migration_get_data_size(device, &stop_copy_length); in vfio_ioctl_device_feature_migration_data_size()
967 static int vfio_ioctl_device_feature_migration(struct vfio_device *device, in vfio_ioctl_device_feature_migration() argument
972 .flags = device->migration_flags, in vfio_ioctl_device_feature_migration()
976 if (!device->mig_ops) in vfio_ioctl_device_feature_migration()
1050 vfio_ioctl_device_feature_logging_start(struct vfio_device *device, in vfio_ioctl_device_feature_logging_start() argument
1066 if (!device->log_ops) in vfio_ioctl_device_feature_logging_start()
1118 ret = device->log_ops->log_start(device, &root, nnodes, in vfio_ioctl_device_feature_logging_start()
1125 device->log_ops->log_stop(device); in vfio_ioctl_device_feature_logging_start()
1134 vfio_ioctl_device_feature_logging_stop(struct vfio_device *device, in vfio_ioctl_device_feature_logging_stop() argument
1140 if (!device->log_ops) in vfio_ioctl_device_feature_logging_stop()
1148 return device->log_ops->log_stop(device); in vfio_ioctl_device_feature_logging_stop()
1155 struct vfio_device *device = opaque; in vfio_device_log_read_and_clear() local
1157 return device->log_ops->log_read_and_clear(device, iova, length, iter); in vfio_device_log_read_and_clear()
1161 vfio_ioctl_device_feature_logging_report(struct vfio_device *device, in vfio_ioctl_device_feature_logging_report() argument
1173 if (!device->log_ops) in vfio_ioctl_device_feature_logging_report()
1198 ret = iova_bitmap_for_each(iter, device, in vfio_ioctl_device_feature_logging_report()
1205 static int vfio_ioctl_device_feature(struct vfio_device *device, in vfio_ioctl_device_feature() argument
1232 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1236 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1240 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1244 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1248 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1252 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1255 if (unlikely(!device->ops->device_feature)) in vfio_ioctl_device_feature()
1257 return device->ops->device_feature(device, feature.flags, in vfio_ioctl_device_feature()
1263 static long vfio_get_region_info(struct vfio_device *device, in vfio_get_region_info() argument
1271 if (unlikely(!device->ops->get_region_info_caps)) in vfio_get_region_info()
1279 ret = device->ops->get_region_info_caps(device, &info, &caps); in vfio_get_region_info()
1312 struct vfio_device *device = df->device; in vfio_device_fops_unl_ioctl() local
1323 ret = vfio_device_pm_runtime_get(device); in vfio_device_fops_unl_ioctl()
1342 ret = vfio_ioctl_device_feature(device, uptr); in vfio_device_fops_unl_ioctl()
1346 ret = vfio_get_region_info(device, uptr); in vfio_device_fops_unl_ioctl()
1350 if (unlikely(!device->ops->ioctl)) in vfio_device_fops_unl_ioctl()
1353 ret = device->ops->ioctl(device, cmd, arg); in vfio_device_fops_unl_ioctl()
1357 vfio_device_pm_runtime_put(device); in vfio_device_fops_unl_ioctl()
1365 struct vfio_device *device = df->device; in vfio_device_fops_read() local
1371 if (unlikely(!device->ops->read)) in vfio_device_fops_read()
1374 return device->ops->read(device, buf, count, ppos); in vfio_device_fops_read()
1382 struct vfio_device *device = df->device; in vfio_device_fops_write() local
1388 if (unlikely(!device->ops->write)) in vfio_device_fops_write()
1391 return device->ops->write(device, buf, count, ppos); in vfio_device_fops_write()
1397 struct vfio_device *device = df->device; in vfio_device_fops_mmap() local
1403 if (unlikely(!device->ops->mmap)) in vfio_device_fops_mmap()
1406 return device->ops->mmap(device, vma); in vfio_device_fops_mmap()
1414 struct vfio_device *device = df->device; in vfio_device_show_fdinfo() local
1416 path = kobject_get_path(&device->dev->kobj, GFP_KERNEL); in vfio_device_show_fdinfo()
1420 seq_printf(m, "vfio-device-syspath: /sys%s\n", path); in vfio_device_show_fdinfo()
1445 return df->device; in vfio_device_from_file()
1450 * @file: VFIO group file or VFIO device file
1462 * @file: VFIO group file or VFIO device file
1470 struct vfio_device *device; in vfio_file_enforced_coherent() local
1477 device = vfio_device_from_file(file); in vfio_file_enforced_coherent()
1478 if (device) in vfio_file_enforced_coherent()
1479 return device_iommu_capable(device->dev, in vfio_file_enforced_coherent()
1493 * iommufd successfully in the vfio device cdev path. in vfio_device_file_set_kvm()
1502 * @file: VFIO group file or VFIO device file
1505 * When a VFIO device is first opened the KVM will be available in
1506 * device->kvm if one was associated with the file.
1649 * @device [in] : device
1660 int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova, in vfio_pin_pages() argument
1663 /* group->container cannot change while a vfio device is open */ in vfio_pin_pages()
1664 if (!pages || !npage || WARN_ON(!vfio_assert_device_open(device))) in vfio_pin_pages()
1666 if (!device->ops->dma_unmap) in vfio_pin_pages()
1668 if (vfio_device_has_container(device)) in vfio_pin_pages()
1669 return vfio_device_container_pin_pages(device, iova, in vfio_pin_pages()
1671 if (device->iommufd_access) { in vfio_pin_pages()
1683 device->iommufd_access, ALIGN_DOWN(iova, PAGE_SIZE), in vfio_pin_pages()
1696 * @device [in] : device
1701 void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage) in vfio_unpin_pages() argument
1703 if (WARN_ON(!vfio_assert_device_open(device))) in vfio_unpin_pages()
1705 if (WARN_ON(!device->ops->dma_unmap)) in vfio_unpin_pages()
1708 if (vfio_device_has_container(device)) { in vfio_unpin_pages()
1709 vfio_device_container_unpin_pages(device, iova, npage); in vfio_unpin_pages()
1712 if (device->iommufd_access) { in vfio_unpin_pages()
1715 iommufd_access_unpin_pages(device->iommufd_access, in vfio_unpin_pages()
1725 * behalf of the device.
1731 * not a real device DMA, it is not necessary to pin the user space memory.
1733 * @device [in] : VFIO device
1740 int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data, in vfio_dma_rw() argument
1743 if (!data || len <= 0 || !vfio_assert_device_open(device)) in vfio_dma_rw()
1746 if (vfio_device_has_container(device)) in vfio_dma_rw()
1747 return vfio_device_container_dma_rw(device, iova, in vfio_dma_rw()
1750 if (device->iommufd_access) { in vfio_dma_rw()
1761 return iommufd_access_rw(device->iommufd_access, iova, data, in vfio_dma_rw()