Lines Matching full:device
15 #include <linux/device.h>
55 …U mode. This mode provides no device isolation, no DMA translation, no host kernel protection, ca…
60 int vfio_assign_device_set(struct vfio_device *device, void *set_id) in vfio_assign_device_set() argument
103 device->dev_set = dev_set; in vfio_assign_device_set()
104 list_add_tail(&device->dev_set_list, &dev_set->device_list); in vfio_assign_device_set()
110 static void vfio_release_device_set(struct vfio_device *device) in vfio_release_device_set() argument
112 struct vfio_device_set *dev_set = device->dev_set; in vfio_release_device_set()
118 list_del(&device->dev_set_list); in vfio_release_device_set()
146 struct device *dev) in vfio_find_device_in_devset()
160 * Device objects - create, release, get, put, search
162 /* Device reference always implies a group reference */
163 void vfio_device_put_registration(struct vfio_device *device) in vfio_device_put_registration() argument
165 if (refcount_dec_and_test(&device->refcount)) in vfio_device_put_registration()
166 complete(&device->comp); in vfio_device_put_registration()
169 bool vfio_device_try_get_registration(struct vfio_device *device) in vfio_device_try_get_registration() argument
171 return refcount_inc_not_zero(&device->refcount); in vfio_device_try_get_registration()
178 static void vfio_device_release(struct device *dev) in vfio_device_release()
180 struct vfio_device *device = in vfio_device_release() local
181 container_of(dev, struct vfio_device, device); in vfio_device_release()
183 vfio_release_device_set(device); in vfio_device_release()
184 ida_free(&vfio.device_ida, device->index); in vfio_device_release()
186 if (device->ops->release) in vfio_device_release()
187 device->ops->release(device); in vfio_device_release()
189 kvfree(device); in vfio_device_release()
192 static int vfio_init_device(struct vfio_device *device, struct device *dev,
203 * Driver may provide an @init callback to cover device private data.
207 struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev, in _vfio_alloc_device()
210 struct vfio_device *device; in _vfio_alloc_device() local
216 device = kvzalloc(size, GFP_KERNEL); in _vfio_alloc_device()
217 if (!device) in _vfio_alloc_device()
220 ret = vfio_init_device(device, dev, ops); in _vfio_alloc_device()
223 return device; in _vfio_alloc_device()
226 kvfree(device); in _vfio_alloc_device()
234 static int vfio_init_device(struct vfio_device *device, struct device *dev, in vfio_init_device() argument
245 device->index = ret; in vfio_init_device()
246 init_completion(&device->comp); in vfio_init_device()
247 device->dev = dev; in vfio_init_device()
248 device->ops = ops; in vfio_init_device()
251 ret = ops->init(device); in vfio_init_device()
256 device_initialize(&device->device); in vfio_init_device()
257 device->device.release = vfio_device_release; in vfio_init_device()
258 device->device.class = vfio.device_class; in vfio_init_device()
259 device->device.parent = device->dev; in vfio_init_device()
263 vfio_release_device_set(device); in vfio_init_device()
264 ida_free(&vfio.device_ida, device->index); in vfio_init_device()
268 static int __vfio_register_dev(struct vfio_device *device, in __vfio_register_dev() argument
274 (!device->ops->bind_iommufd || in __vfio_register_dev()
275 !device->ops->unbind_iommufd || in __vfio_register_dev()
276 !device->ops->attach_ioas || in __vfio_register_dev()
277 !device->ops->detach_ioas))) in __vfio_register_dev()
281 * If the driver doesn't specify a set then the device is added to a in __vfio_register_dev()
284 if (!device->dev_set) in __vfio_register_dev()
285 vfio_assign_device_set(device, device); in __vfio_register_dev()
287 ret = dev_set_name(&device->device, "vfio%d", device->index); in __vfio_register_dev()
291 ret = vfio_device_set_group(device, type); in __vfio_register_dev()
300 if (type == VFIO_IOMMU && !vfio_device_is_noiommu(device) && in __vfio_register_dev()
301 !device_iommu_capable(device->dev, IOMMU_CAP_CACHE_COHERENCY)) { in __vfio_register_dev()
306 ret = vfio_device_add(device); in __vfio_register_dev()
311 refcount_set(&device->refcount, 1); in __vfio_register_dev()
313 vfio_device_group_register(device); in __vfio_register_dev()
314 vfio_device_debugfs_init(device); in __vfio_register_dev()
318 vfio_device_remove_group(device); in __vfio_register_dev()
322 int vfio_register_group_dev(struct vfio_device *device) in vfio_register_group_dev() argument
324 return __vfio_register_dev(device, VFIO_IOMMU); in vfio_register_group_dev()
329 * Register a virtual device without IOMMU backing. The user of this
330 * device must not be able to directly trigger unmediated DMA.
332 int vfio_register_emulated_iommu_dev(struct vfio_device *device) in vfio_register_emulated_iommu_dev() argument
334 return __vfio_register_dev(device, VFIO_EMULATED_IOMMU); in vfio_register_emulated_iommu_dev()
339 * Decrement the device reference count and wait for the device to be
340 * removed. Open file descriptors for the device... */
341 void vfio_unregister_group_dev(struct vfio_device *device) in vfio_unregister_group_dev() argument
348 * Prevent new device opened by userspace via the in vfio_unregister_group_dev()
351 vfio_device_group_unregister(device); in vfio_unregister_group_dev()
355 * new device opened by userspace in the cdev path. in vfio_unregister_group_dev()
357 vfio_device_del(device); in vfio_unregister_group_dev()
359 vfio_device_put_registration(device); in vfio_unregister_group_dev()
360 rc = try_wait_for_completion(&device->comp); in vfio_unregister_group_dev()
362 if (device->ops->request) in vfio_unregister_group_dev()
363 device->ops->request(device, i++); in vfio_unregister_group_dev()
366 rc = wait_for_completion_timeout(&device->comp, in vfio_unregister_group_dev()
370 &device->comp, HZ * 10); in vfio_unregister_group_dev()
373 dev_warn(device->dev, in vfio_unregister_group_dev()
374 "Device is currently in use, task" in vfio_unregister_group_dev()
376 "blocked until device is released", in vfio_unregister_group_dev()
382 vfio_device_debugfs_exit(device); in vfio_unregister_group_dev()
384 vfio_device_remove_group(device); in vfio_unregister_group_dev()
389 void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm) in vfio_device_get_kvm_safe() argument
395 lockdep_assert_held(&device->dev_set->lock); in vfio_device_get_kvm_safe()
417 device->put_kvm = pfn; in vfio_device_get_kvm_safe()
418 device->kvm = kvm; in vfio_device_get_kvm_safe()
421 void vfio_device_put_kvm(struct vfio_device *device) in vfio_device_put_kvm() argument
423 lockdep_assert_held(&device->dev_set->lock); in vfio_device_put_kvm()
425 if (!device->kvm) in vfio_device_put_kvm()
428 if (WARN_ON(!device->put_kvm)) in vfio_device_put_kvm()
431 device->put_kvm(device->kvm); in vfio_device_put_kvm()
432 device->put_kvm = NULL; in vfio_device_put_kvm()
436 device->kvm = NULL; in vfio_device_put_kvm()
441 static bool vfio_assert_device_open(struct vfio_device *device) in vfio_assert_device_open() argument
443 return !WARN_ON_ONCE(!READ_ONCE(device->open_count)); in vfio_assert_device_open()
447 vfio_allocate_device_file(struct vfio_device *device) in vfio_allocate_device_file() argument
455 df->device = device; in vfio_allocate_device_file()
463 struct vfio_device *device = df->device; in vfio_df_device_first_open() local
467 lockdep_assert_held(&device->dev_set->lock); in vfio_df_device_first_open()
469 if (!try_module_get(device->dev->driver->owner)) in vfio_df_device_first_open()
475 ret = vfio_device_group_use_iommu(device); in vfio_df_device_first_open()
479 if (device->ops->open_device) { in vfio_df_device_first_open()
480 ret = device->ops->open_device(device); in vfio_df_device_first_open()
490 vfio_device_group_unuse_iommu(device); in vfio_df_device_first_open()
492 module_put(device->dev->driver->owner); in vfio_df_device_first_open()
498 struct vfio_device *device = df->device; in vfio_df_device_last_close() local
501 lockdep_assert_held(&device->dev_set->lock); in vfio_df_device_last_close()
503 if (device->ops->close_device) in vfio_df_device_last_close()
504 device->ops->close_device(device); in vfio_df_device_last_close()
508 vfio_device_group_unuse_iommu(device); in vfio_df_device_last_close()
509 module_put(device->dev->driver->owner); in vfio_df_device_last_close()
514 struct vfio_device *device = df->device; in vfio_df_open() local
517 lockdep_assert_held(&device->dev_set->lock); in vfio_df_open()
520 * Only the group path allows the device to be opened multiple in vfio_df_open()
521 * times. The device cdev path doesn't have a secure way for it. in vfio_df_open()
523 if (device->open_count != 0 && !df->group) in vfio_df_open()
526 device->open_count++; in vfio_df_open()
527 if (device->open_count == 1) { in vfio_df_open()
530 device->open_count--; in vfio_df_open()
538 struct vfio_device *device = df->device; in vfio_df_close() local
540 lockdep_assert_held(&device->dev_set->lock); in vfio_df_close()
542 vfio_assert_device_open(device); in vfio_df_close()
543 if (device->open_count == 1) in vfio_df_close()
545 device->open_count--; in vfio_df_close()
552 static inline int vfio_device_pm_runtime_get(struct vfio_device *device) in vfio_device_pm_runtime_get() argument
554 struct device *dev = device->dev; in vfio_device_pm_runtime_get()
573 static inline void vfio_device_pm_runtime_put(struct vfio_device *device) in vfio_device_pm_runtime_put() argument
575 struct device *dev = device->dev; in vfio_device_pm_runtime_put()
582 * VFIO Device fd
587 struct vfio_device *device = df->device; in vfio_device_fops_release() local
594 vfio_device_put_registration(device); in vfio_device_fops_release()
603 * @cur_fsm - The current state the device is in
616 int vfio_mig_get_next_state(struct vfio_device *device, in vfio_mig_get_next_state() argument
788 (state_flags_table[cur_fsm] & device->migration_flags) != in vfio_mig_get_next_state()
793 (state_flags_table[new_fsm] & device->migration_flags) != in vfio_mig_get_next_state()
803 while ((state_flags_table[*next_fsm] & device->migration_flags) != in vfio_mig_get_next_state()
842 vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device, in vfio_ioctl_device_feature_mig_device_state() argument
852 if (!device->mig_ops) in vfio_ioctl_device_feature_mig_device_state()
868 ret = device->mig_ops->migration_get_state(device, in vfio_ioctl_device_feature_mig_device_state()
877 filp = device->mig_ops->migration_set_state(device, mig.device_state); in vfio_ioctl_device_feature_mig_device_state()
892 vfio_ioctl_device_feature_migration_data_size(struct vfio_device *device, in vfio_ioctl_device_feature_migration_data_size() argument
900 if (!device->mig_ops) in vfio_ioctl_device_feature_migration_data_size()
908 ret = device->mig_ops->migration_get_data_size(device, &stop_copy_length); in vfio_ioctl_device_feature_migration_data_size()
919 static int vfio_ioctl_device_feature_migration(struct vfio_device *device, in vfio_ioctl_device_feature_migration() argument
924 .flags = device->migration_flags, in vfio_ioctl_device_feature_migration()
928 if (!device->mig_ops) in vfio_ioctl_device_feature_migration()
1002 vfio_ioctl_device_feature_logging_start(struct vfio_device *device, in vfio_ioctl_device_feature_logging_start() argument
1018 if (!device->log_ops) in vfio_ioctl_device_feature_logging_start()
1071 ret = device->log_ops->log_start(device, &root, nnodes, in vfio_ioctl_device_feature_logging_start()
1078 device->log_ops->log_stop(device); in vfio_ioctl_device_feature_logging_start()
1087 vfio_ioctl_device_feature_logging_stop(struct vfio_device *device, in vfio_ioctl_device_feature_logging_stop() argument
1093 if (!device->log_ops) in vfio_ioctl_device_feature_logging_stop()
1101 return device->log_ops->log_stop(device); in vfio_ioctl_device_feature_logging_stop()
1108 struct vfio_device *device = opaque; in vfio_device_log_read_and_clear() local
1110 return device->log_ops->log_read_and_clear(device, iova, length, iter); in vfio_device_log_read_and_clear()
1114 vfio_ioctl_device_feature_logging_report(struct vfio_device *device, in vfio_ioctl_device_feature_logging_report() argument
1126 if (!device->log_ops) in vfio_ioctl_device_feature_logging_report()
1151 ret = iova_bitmap_for_each(iter, device, in vfio_ioctl_device_feature_logging_report()
1158 static int vfio_ioctl_device_feature(struct vfio_device *device, in vfio_ioctl_device_feature() argument
1185 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1189 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1193 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1197 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1201 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1205 device, feature.flags, arg->data, in vfio_ioctl_device_feature()
1208 if (unlikely(!device->ops->device_feature)) in vfio_ioctl_device_feature()
1210 return device->ops->device_feature(device, feature.flags, in vfio_ioctl_device_feature()
1220 struct vfio_device *device = df->device; in vfio_device_fops_unl_ioctl() local
1231 ret = vfio_device_pm_runtime_get(device); in vfio_device_fops_unl_ioctl()
1250 ret = vfio_ioctl_device_feature(device, uptr); in vfio_device_fops_unl_ioctl()
1254 if (unlikely(!device->ops->ioctl)) in vfio_device_fops_unl_ioctl()
1257 ret = device->ops->ioctl(device, cmd, arg); in vfio_device_fops_unl_ioctl()
1261 vfio_device_pm_runtime_put(device); in vfio_device_fops_unl_ioctl()
1269 struct vfio_device *device = df->device; in vfio_device_fops_read() local
1275 if (unlikely(!device->ops->read)) in vfio_device_fops_read()
1278 return device->ops->read(device, buf, count, ppos); in vfio_device_fops_read()
1286 struct vfio_device *device = df->device; in vfio_device_fops_write() local
1292 if (unlikely(!device->ops->write)) in vfio_device_fops_write()
1295 return device->ops->write(device, buf, count, ppos); in vfio_device_fops_write()
1301 struct vfio_device *device = df->device; in vfio_device_fops_mmap() local
1307 if (unlikely(!device->ops->mmap)) in vfio_device_fops_mmap()
1310 return device->ops->mmap(device, vma); in vfio_device_fops_mmap()
1330 return df->device; in vfio_device_from_file()
1335 * @file: VFIO group file or VFIO device file
1347 * @file: VFIO group file or VFIO device file
1355 struct vfio_device *device; in vfio_file_enforced_coherent() local
1362 device = vfio_device_from_file(file); in vfio_file_enforced_coherent()
1363 if (device) in vfio_file_enforced_coherent()
1364 return device_iommu_capable(device->dev, in vfio_file_enforced_coherent()
1378 * iommufd successfully in the vfio device cdev path. in vfio_device_file_set_kvm()
1387 * @file: VFIO group file or VFIO device file
1390 * When a VFIO device is first opened the KVM will be available in
1391 * device->kvm if one was associated with the file.
1534 * @device [in] : device
1545 int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova, in vfio_pin_pages() argument
1548 /* group->container cannot change while a vfio device is open */ in vfio_pin_pages()
1549 if (!pages || !npage || WARN_ON(!vfio_assert_device_open(device))) in vfio_pin_pages()
1551 if (!device->ops->dma_unmap) in vfio_pin_pages()
1553 if (vfio_device_has_container(device)) in vfio_pin_pages()
1554 return vfio_device_container_pin_pages(device, iova, in vfio_pin_pages()
1556 if (device->iommufd_access) { in vfio_pin_pages()
1568 device->iommufd_access, ALIGN_DOWN(iova, PAGE_SIZE), in vfio_pin_pages()
1581 * @device [in] : device
1586 void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage) in vfio_unpin_pages() argument
1588 if (WARN_ON(!vfio_assert_device_open(device))) in vfio_unpin_pages()
1590 if (WARN_ON(!device->ops->dma_unmap)) in vfio_unpin_pages()
1593 if (vfio_device_has_container(device)) { in vfio_unpin_pages()
1594 vfio_device_container_unpin_pages(device, iova, npage); in vfio_unpin_pages()
1597 if (device->iommufd_access) { in vfio_unpin_pages()
1600 iommufd_access_unpin_pages(device->iommufd_access, in vfio_unpin_pages()
1610 * behalf of the device.
1616 * not a real device DMA, it is not necessary to pin the user space memory.
1618 * @device [in] : VFIO device
1625 int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data, in vfio_dma_rw() argument
1628 if (!data || len <= 0 || !vfio_assert_device_open(device)) in vfio_dma_rw()
1631 if (vfio_device_has_container(device)) in vfio_dma_rw()
1632 return vfio_device_container_dma_rw(device, iova, in vfio_dma_rw()
1635 if (device->iommufd_access) { in vfio_dma_rw()
1646 return iommufd_access_rw(device->iommufd_access, iova, data, in vfio_dma_rw()