Lines Matching +full:ctx +full:- +full:asid
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Intel Corporation.
71 u64 last, u32 asid);
77 return as->id; in iotlb_to_asid()
80 static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid) in asid_to_as() argument
82 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS]; in asid_to_as()
86 if (as->id == asid) in asid_to_as()
92 static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid) in asid_to_iotlb() argument
94 struct vhost_vdpa_as *as = asid_to_as(v, asid); in asid_to_iotlb()
99 return &as->iotlb; in asid_to_iotlb()
102 static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid) in vhost_vdpa_alloc_as() argument
104 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS]; in vhost_vdpa_alloc_as()
107 if (asid_to_as(v, asid)) in vhost_vdpa_alloc_as()
110 if (asid >= v->vdpa->nas) in vhost_vdpa_alloc_as()
117 vhost_iotlb_init(&as->iotlb, 0, 0); in vhost_vdpa_alloc_as()
118 as->id = asid; in vhost_vdpa_alloc_as()
119 hlist_add_head(&as->hash_link, head); in vhost_vdpa_alloc_as()
125 u32 asid) in vhost_vdpa_find_alloc_as() argument
127 struct vhost_vdpa_as *as = asid_to_as(v, asid); in vhost_vdpa_find_alloc_as()
132 return vhost_vdpa_alloc_as(v, asid); in vhost_vdpa_find_alloc_as()
135 static void vhost_vdpa_reset_map(struct vhost_vdpa *v, u32 asid) in vhost_vdpa_reset_map() argument
137 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_reset_map()
138 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_reset_map()
140 if (ops->reset_map) in vhost_vdpa_reset_map()
141 ops->reset_map(vdpa, asid); in vhost_vdpa_reset_map()
144 static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid) in vhost_vdpa_remove_as() argument
146 struct vhost_vdpa_as *as = asid_to_as(v, asid); in vhost_vdpa_remove_as()
149 return -EINVAL; in vhost_vdpa_remove_as()
151 hlist_del(&as->hash_link); in vhost_vdpa_remove_as()
152 vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid); in vhost_vdpa_remove_as()
160 vhost_vdpa_reset_map(v, asid); in vhost_vdpa_remove_as()
170 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev); in handle_vq_kick()
171 const struct vdpa_config_ops *ops = v->vdpa->config; in handle_vq_kick()
173 ops->kick_vq(v->vdpa, vq - v->vqs); in handle_vq_kick()
179 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx; in vhost_vdpa_virtqueue_cb()
190 struct eventfd_ctx *config_ctx = v->config_ctx; in vhost_vdpa_config_cb()
200 struct vhost_virtqueue *vq = &v->vqs[qid]; in vhost_vdpa_setup_vq_irq()
201 const struct vdpa_config_ops *ops = v->vdpa->config; in vhost_vdpa_setup_vq_irq()
202 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_setup_vq_irq()
205 if (!ops->get_vq_irq) in vhost_vdpa_setup_vq_irq()
208 irq = ops->get_vq_irq(vdpa, qid); in vhost_vdpa_setup_vq_irq()
212 irq_bypass_unregister_producer(&vq->call_ctx.producer); in vhost_vdpa_setup_vq_irq()
213 if (!vq->call_ctx.ctx) in vhost_vdpa_setup_vq_irq()
216 vq->call_ctx.producer.token = vq->call_ctx.ctx; in vhost_vdpa_setup_vq_irq()
217 vq->call_ctx.producer.irq = irq; in vhost_vdpa_setup_vq_irq()
218 ret = irq_bypass_register_producer(&vq->call_ctx.producer); in vhost_vdpa_setup_vq_irq()
220 dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n", in vhost_vdpa_setup_vq_irq()
221 qid, vq->call_ctx.producer.token, ret); in vhost_vdpa_setup_vq_irq()
226 struct vhost_virtqueue *vq = &v->vqs[qid]; in vhost_vdpa_unsetup_vq_irq()
228 irq_bypass_unregister_producer(&vq->call_ctx.producer); in vhost_vdpa_unsetup_vq_irq()
233 struct vdpa_device *vdpa = v->vdpa; in _compat_vdpa_reset()
236 v->suspended = false; in _compat_vdpa_reset()
238 if (v->vdev.vqs) { in _compat_vdpa_reset()
239 flags |= !vhost_backend_has_feature(v->vdev.vqs[0], in _compat_vdpa_reset()
249 v->in_batch = 0; in vhost_vdpa_reset()
255 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_bind_mm()
256 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_bind_mm()
258 if (!vdpa->use_va || !ops->bind_mm) in vhost_vdpa_bind_mm()
261 return ops->bind_mm(vdpa, v->vdev.mm); in vhost_vdpa_bind_mm()
266 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_unbind_mm()
267 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_unbind_mm()
269 if (!vdpa->use_va || !ops->unbind_mm) in vhost_vdpa_unbind_mm()
272 ops->unbind_mm(vdpa); in vhost_vdpa_unbind_mm()
277 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_get_device_id()
278 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_get_device_id()
281 device_id = ops->get_device_id(vdpa); in vhost_vdpa_get_device_id()
284 return -EFAULT; in vhost_vdpa_get_device_id()
291 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_get_status()
292 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_get_status()
295 status = ops->get_status(vdpa); in vhost_vdpa_get_status()
298 return -EFAULT; in vhost_vdpa_get_status()
305 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_set_status()
306 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_set_status()
308 u32 nvqs = v->nvqs; in vhost_vdpa_set_status()
313 return -EFAULT; in vhost_vdpa_set_status()
315 status_old = ops->get_status(vdpa); in vhost_vdpa_set_status()
322 return -EINVAL; in vhost_vdpa_set_status()
345 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_config_validate()
346 size_t size = vdpa->config->get_config_size(vdpa); in vhost_vdpa_config_validate()
348 if (c->len == 0 || c->off > size) in vhost_vdpa_config_validate()
349 return -EINVAL; in vhost_vdpa_config_validate()
351 if (c->len > size - c->off) in vhost_vdpa_config_validate()
352 return -E2BIG; in vhost_vdpa_config_validate()
360 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_get_config()
366 return -EFAULT; in vhost_vdpa_get_config()
368 return -EINVAL; in vhost_vdpa_get_config()
371 return -ENOMEM; in vhost_vdpa_get_config()
375 if (copy_to_user(c->buf, buf, config.len)) { in vhost_vdpa_get_config()
377 return -EFAULT; in vhost_vdpa_get_config()
387 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_set_config()
393 return -EFAULT; in vhost_vdpa_set_config()
395 return -EINVAL; in vhost_vdpa_set_config()
397 buf = vmemdup_user(c->buf, config.len); in vhost_vdpa_set_config()
409 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_can_suspend()
410 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_can_suspend()
412 return ops->suspend; in vhost_vdpa_can_suspend()
417 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_can_resume()
418 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_can_resume()
420 return ops->resume; in vhost_vdpa_can_resume()
425 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_has_desc_group()
426 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_has_desc_group()
428 return ops->get_vq_desc_group; in vhost_vdpa_has_desc_group()
433 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_get_features()
434 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_get_features()
437 features = ops->get_device_features(vdpa); in vhost_vdpa_get_features()
440 return -EFAULT; in vhost_vdpa_get_features()
447 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_get_backend_features()
448 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_get_backend_features()
450 if (!ops->get_backend_features) in vhost_vdpa_get_backend_features()
453 return ops->get_backend_features(vdpa); in vhost_vdpa_get_backend_features()
458 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_has_persistent_map()
459 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_has_persistent_map()
461 return (!ops->set_map && !ops->dma_map) || ops->reset_map || in vhost_vdpa_has_persistent_map()
467 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_set_features()
468 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_set_features()
469 struct vhost_dev *d = &v->vdev; in vhost_vdpa_set_features()
478 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK) in vhost_vdpa_set_features()
479 return -EBUSY; in vhost_vdpa_set_features()
482 return -EFAULT; in vhost_vdpa_set_features()
485 return -EINVAL; in vhost_vdpa_set_features()
488 actual_features = ops->get_driver_features(vdpa); in vhost_vdpa_set_features()
489 for (i = 0; i < d->nvqs; ++i) { in vhost_vdpa_set_features()
490 struct vhost_virtqueue *vq = d->vqs[i]; in vhost_vdpa_set_features()
492 mutex_lock(&vq->mutex); in vhost_vdpa_set_features()
493 vq->acked_features = actual_features; in vhost_vdpa_set_features()
494 mutex_unlock(&vq->mutex); in vhost_vdpa_set_features()
502 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_get_vring_num()
503 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_get_vring_num()
506 num = ops->get_vq_num_max(vdpa); in vhost_vdpa_get_vring_num()
509 return -EFAULT; in vhost_vdpa_get_vring_num()
516 if (v->config_ctx) { in vhost_vdpa_config_put()
517 eventfd_ctx_put(v->config_ctx); in vhost_vdpa_config_put()
518 v->config_ctx = NULL; in vhost_vdpa_config_put()
526 struct eventfd_ctx *ctx; in vhost_vdpa_set_config_call() local
531 return -EFAULT; in vhost_vdpa_set_config_call()
533 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd); in vhost_vdpa_set_config_call()
534 swap(ctx, v->config_ctx); in vhost_vdpa_set_config_call()
536 if (!IS_ERR_OR_NULL(ctx)) in vhost_vdpa_set_config_call()
537 eventfd_ctx_put(ctx); in vhost_vdpa_set_config_call()
539 if (IS_ERR(v->config_ctx)) { in vhost_vdpa_set_config_call()
540 long ret = PTR_ERR(v->config_ctx); in vhost_vdpa_set_config_call()
542 v->config_ctx = NULL; in vhost_vdpa_set_config_call()
546 v->vdpa->config->set_config_cb(v->vdpa, &cb); in vhost_vdpa_set_config_call()
554 .first = v->range.first, in vhost_vdpa_get_iova_range()
555 .last = v->range.last, in vhost_vdpa_get_iova_range()
559 return -EFAULT; in vhost_vdpa_get_iova_range()
565 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_get_config_size()
566 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_get_config_size()
569 size = ops->get_config_size(vdpa); in vhost_vdpa_get_config_size()
572 return -EFAULT; in vhost_vdpa_get_config_size()
579 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_get_vqs_count()
581 if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs))) in vhost_vdpa_get_vqs_count()
582 return -EFAULT; in vhost_vdpa_get_vqs_count()
594 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_suspend()
595 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_suspend()
598 if (!ops->suspend) in vhost_vdpa_suspend()
599 return -EOPNOTSUPP; in vhost_vdpa_suspend()
601 ret = ops->suspend(vdpa); in vhost_vdpa_suspend()
603 v->suspended = true; in vhost_vdpa_suspend()
614 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_resume()
615 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_resume()
618 if (!ops->resume) in vhost_vdpa_resume()
619 return -EOPNOTSUPP; in vhost_vdpa_resume()
621 ret = ops->resume(vdpa); in vhost_vdpa_resume()
623 v->suspended = false; in vhost_vdpa_resume()
631 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_vring_ioctl()
632 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_vring_ioctl()
644 if (idx >= v->nvqs) in vhost_vdpa_vring_ioctl()
645 return -ENOBUFS; in vhost_vdpa_vring_ioctl()
647 idx = array_index_nospec(idx, v->nvqs); in vhost_vdpa_vring_ioctl()
648 vq = &v->vqs[idx]; in vhost_vdpa_vring_ioctl()
653 return -EFAULT; in vhost_vdpa_vring_ioctl()
654 ops->set_vq_ready(vdpa, idx, s.num); in vhost_vdpa_vring_ioctl()
657 if (!ops->get_vq_group) in vhost_vdpa_vring_ioctl()
658 return -EOPNOTSUPP; in vhost_vdpa_vring_ioctl()
660 s.num = ops->get_vq_group(vdpa, idx); in vhost_vdpa_vring_ioctl()
661 if (s.num >= vdpa->ngroups) in vhost_vdpa_vring_ioctl()
662 return -EIO; in vhost_vdpa_vring_ioctl()
664 return -EFAULT; in vhost_vdpa_vring_ioctl()
668 return -EOPNOTSUPP; in vhost_vdpa_vring_ioctl()
670 s.num = ops->get_vq_desc_group(vdpa, idx); in vhost_vdpa_vring_ioctl()
671 if (s.num >= vdpa->ngroups) in vhost_vdpa_vring_ioctl()
672 return -EIO; in vhost_vdpa_vring_ioctl()
674 return -EFAULT; in vhost_vdpa_vring_ioctl()
678 return -EFAULT; in vhost_vdpa_vring_ioctl()
679 if (s.num >= vdpa->nas) in vhost_vdpa_vring_ioctl()
680 return -EINVAL; in vhost_vdpa_vring_ioctl()
681 if (!ops->set_group_asid) in vhost_vdpa_vring_ioctl()
682 return -EOPNOTSUPP; in vhost_vdpa_vring_ioctl()
683 return ops->set_group_asid(vdpa, idx, s.num); in vhost_vdpa_vring_ioctl()
685 r = ops->get_vq_state(v->vdpa, idx, &vq_state); in vhost_vdpa_vring_ioctl()
690 vq->last_avail_idx = vq_state.packed.last_avail_idx | in vhost_vdpa_vring_ioctl()
692 vq->last_used_idx = vq_state.packed.last_used_idx | in vhost_vdpa_vring_ioctl()
695 vq->last_avail_idx = vq_state.split.avail_index; in vhost_vdpa_vring_ioctl()
700 r = vhost_vring_ioctl(&v->vdev, cmd, argp); in vhost_vdpa_vring_ioctl()
706 if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended) in vhost_vdpa_vring_ioctl()
707 return -EINVAL; in vhost_vdpa_vring_ioctl()
709 if (ops->set_vq_address(vdpa, idx, in vhost_vdpa_vring_ioctl()
710 (u64)(uintptr_t)vq->desc, in vhost_vdpa_vring_ioctl()
711 (u64)(uintptr_t)vq->avail, in vhost_vdpa_vring_ioctl()
712 (u64)(uintptr_t)vq->used)) in vhost_vdpa_vring_ioctl()
713 r = -EINVAL; in vhost_vdpa_vring_ioctl()
717 if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended) in vhost_vdpa_vring_ioctl()
718 return -EINVAL; in vhost_vdpa_vring_ioctl()
721 vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff; in vhost_vdpa_vring_ioctl()
722 vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000); in vhost_vdpa_vring_ioctl()
723 vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff; in vhost_vdpa_vring_ioctl()
724 vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000); in vhost_vdpa_vring_ioctl()
726 vq_state.split.avail_index = vq->last_avail_idx; in vhost_vdpa_vring_ioctl()
728 r = ops->set_vq_state(vdpa, idx, &vq_state); in vhost_vdpa_vring_ioctl()
732 if (vq->call_ctx.ctx) { in vhost_vdpa_vring_ioctl()
735 cb.trigger = vq->call_ctx.ctx; in vhost_vdpa_vring_ioctl()
741 ops->set_vq_cb(vdpa, idx, &cb); in vhost_vdpa_vring_ioctl()
746 ops->set_vq_num(vdpa, idx, vq->num); in vhost_vdpa_vring_ioctl()
756 struct vhost_vdpa *v = filep->private_data; in vhost_vdpa_unlocked_ioctl()
757 struct vhost_dev *d = &v->vdev; in vhost_vdpa_unlocked_ioctl()
765 return -EFAULT; in vhost_vdpa_unlocked_ioctl()
772 return -EOPNOTSUPP; in vhost_vdpa_unlocked_ioctl()
775 return -EOPNOTSUPP; in vhost_vdpa_unlocked_ioctl()
778 return -EOPNOTSUPP; in vhost_vdpa_unlocked_ioctl()
781 return -EINVAL; in vhost_vdpa_unlocked_ioctl()
784 return -EOPNOTSUPP; in vhost_vdpa_unlocked_ioctl()
787 return -EOPNOTSUPP; in vhost_vdpa_unlocked_ioctl()
788 vhost_set_backend_features(&v->vdev, features); in vhost_vdpa_unlocked_ioctl()
792 mutex_lock(&d->mutex); in vhost_vdpa_unlocked_ioctl()
820 if (copy_to_user(argp, &v->vdpa->ngroups, in vhost_vdpa_unlocked_ioctl()
821 sizeof(v->vdpa->ngroups))) in vhost_vdpa_unlocked_ioctl()
822 r = -EFAULT; in vhost_vdpa_unlocked_ioctl()
825 if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas))) in vhost_vdpa_unlocked_ioctl()
826 r = -EFAULT; in vhost_vdpa_unlocked_ioctl()
830 r = -ENOIOCTLCMD; in vhost_vdpa_unlocked_ioctl()
847 r = -EFAULT; in vhost_vdpa_unlocked_ioctl()
865 r = vhost_dev_ioctl(&v->vdev, cmd, argp); in vhost_vdpa_unlocked_ioctl()
866 if (r == -ENOIOCTLCMD) in vhost_vdpa_unlocked_ioctl()
882 mutex_unlock(&d->mutex); in vhost_vdpa_unlocked_ioctl()
886 struct vhost_iotlb_map *map, u32 asid) in vhost_vdpa_general_unmap() argument
888 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_general_unmap()
889 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_general_unmap()
890 if (ops->dma_map) { in vhost_vdpa_general_unmap()
891 ops->dma_unmap(vdpa, asid, map->start, map->size); in vhost_vdpa_general_unmap()
892 } else if (ops->set_map == NULL) { in vhost_vdpa_general_unmap()
893 iommu_unmap(v->domain, map->start, map->size); in vhost_vdpa_general_unmap()
898 u64 start, u64 last, u32 asid) in vhost_vdpa_pa_unmap() argument
900 struct vhost_dev *dev = &v->vdev; in vhost_vdpa_pa_unmap()
906 pinned = PFN_DOWN(map->size); in vhost_vdpa_pa_unmap()
907 for (pfn = PFN_DOWN(map->addr); in vhost_vdpa_pa_unmap()
908 pinned > 0; pfn++, pinned--) { in vhost_vdpa_pa_unmap()
910 if (map->perm & VHOST_ACCESS_WO) in vhost_vdpa_pa_unmap()
914 atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm); in vhost_vdpa_pa_unmap()
915 vhost_vdpa_general_unmap(v, map, asid); in vhost_vdpa_pa_unmap()
921 u64 start, u64 last, u32 asid) in vhost_vdpa_va_unmap() argument
927 map_file = (struct vdpa_map_file *)map->opaque; in vhost_vdpa_va_unmap()
928 fput(map_file->file); in vhost_vdpa_va_unmap()
930 vhost_vdpa_general_unmap(v, map, asid); in vhost_vdpa_va_unmap()
937 u64 last, u32 asid) in vhost_vdpa_iotlb_unmap() argument
939 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_iotlb_unmap()
941 if (vdpa->use_va) in vhost_vdpa_iotlb_unmap()
942 return vhost_vdpa_va_unmap(v, iotlb, start, last, asid); in vhost_vdpa_iotlb_unmap()
944 return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid); in vhost_vdpa_iotlb_unmap()
972 struct vhost_dev *dev = &v->vdev; in vhost_vdpa_map()
973 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_map()
974 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_map()
975 u32 asid = iotlb_to_asid(iotlb); in vhost_vdpa_map() local
978 r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1, in vhost_vdpa_map()
983 if (ops->dma_map) { in vhost_vdpa_map()
984 r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque); in vhost_vdpa_map()
985 } else if (ops->set_map) { in vhost_vdpa_map()
986 if (!v->in_batch) in vhost_vdpa_map()
987 r = ops->set_map(vdpa, asid, iotlb); in vhost_vdpa_map()
989 r = iommu_map(v->domain, iova, pa, size, in vhost_vdpa_map()
994 vhost_iotlb_del_range(iotlb, iova, iova + size - 1); in vhost_vdpa_map()
998 if (!vdpa->use_va) in vhost_vdpa_map()
999 atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm); in vhost_vdpa_map()
1008 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_unmap()
1009 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_unmap()
1010 u32 asid = iotlb_to_asid(iotlb); in vhost_vdpa_unmap() local
1012 vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid); in vhost_vdpa_unmap()
1014 if (ops->set_map) { in vhost_vdpa_unmap()
1015 if (!v->in_batch) in vhost_vdpa_unmap()
1016 ops->set_map(vdpa, asid, iotlb); in vhost_vdpa_unmap()
1025 struct vhost_dev *dev = &v->vdev; in vhost_vdpa_va_map()
1031 mmap_read_lock(dev->mm); in vhost_vdpa_va_map()
1034 vma = find_vma(dev->mm, uaddr); in vhost_vdpa_va_map()
1036 ret = -EINVAL; in vhost_vdpa_va_map()
1039 map_size = min(size, vma->vm_end - uaddr); in vhost_vdpa_va_map()
1040 if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) && in vhost_vdpa_va_map()
1041 !(vma->vm_flags & (VM_IO | VM_PFNMAP)))) in vhost_vdpa_va_map()
1046 ret = -ENOMEM; in vhost_vdpa_va_map()
1049 offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start; in vhost_vdpa_va_map()
1050 map_file->offset = offset; in vhost_vdpa_va_map()
1051 map_file->file = get_file(vma->vm_file); in vhost_vdpa_va_map()
1055 fput(map_file->file); in vhost_vdpa_va_map()
1060 size -= map_size; in vhost_vdpa_va_map()
1065 vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova); in vhost_vdpa_va_map()
1067 mmap_read_unlock(dev->mm); in vhost_vdpa_va_map()
1076 struct vhost_dev *dev = &v->vdev; in vhost_vdpa_pa_map()
1089 return -ENOMEM; in vhost_vdpa_pa_map()
1096 ret = -EINVAL; in vhost_vdpa_pa_map()
1100 mmap_read_lock(dev->mm); in vhost_vdpa_pa_map()
1103 if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) { in vhost_vdpa_pa_map()
1104 ret = -ENOMEM; in vhost_vdpa_pa_map()
1121 ret = -ENOMEM; in vhost_vdpa_pa_map()
1136 csize = PFN_PHYS(last_pfn - map_pfn + 1); in vhost_vdpa_pa_map()
1150 pinned - i); in vhost_vdpa_pa_map()
1163 npages -= pinned; in vhost_vdpa_pa_map()
1167 ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1), in vhost_vdpa_pa_map()
1190 mmap_read_unlock(dev->mm); in vhost_vdpa_pa_map()
1201 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_process_iotlb_update()
1203 if (msg->iova < v->range.first || !msg->size || in vhost_vdpa_process_iotlb_update()
1204 msg->iova > U64_MAX - msg->size + 1 || in vhost_vdpa_process_iotlb_update()
1205 msg->iova + msg->size - 1 > v->range.last) in vhost_vdpa_process_iotlb_update()
1206 return -EINVAL; in vhost_vdpa_process_iotlb_update()
1208 if (vhost_iotlb_itree_first(iotlb, msg->iova, in vhost_vdpa_process_iotlb_update()
1209 msg->iova + msg->size - 1)) in vhost_vdpa_process_iotlb_update()
1210 return -EEXIST; in vhost_vdpa_process_iotlb_update()
1212 if (vdpa->use_va) in vhost_vdpa_process_iotlb_update()
1213 return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size, in vhost_vdpa_process_iotlb_update()
1214 msg->uaddr, msg->perm); in vhost_vdpa_process_iotlb_update()
1216 return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr, in vhost_vdpa_process_iotlb_update()
1217 msg->perm); in vhost_vdpa_process_iotlb_update()
1220 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid, in vhost_vdpa_process_iotlb_msg() argument
1224 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_process_iotlb_msg()
1225 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_process_iotlb_msg()
1230 mutex_lock(&dev->mutex); in vhost_vdpa_process_iotlb_msg()
1236 if (msg->type == VHOST_IOTLB_UPDATE || in vhost_vdpa_process_iotlb_msg()
1237 msg->type == VHOST_IOTLB_BATCH_BEGIN) { in vhost_vdpa_process_iotlb_msg()
1238 as = vhost_vdpa_find_alloc_as(v, asid); in vhost_vdpa_process_iotlb_msg()
1240 dev_err(&v->dev, "can't find and alloc asid %d\n", in vhost_vdpa_process_iotlb_msg()
1241 asid); in vhost_vdpa_process_iotlb_msg()
1242 r = -EINVAL; in vhost_vdpa_process_iotlb_msg()
1245 iotlb = &as->iotlb; in vhost_vdpa_process_iotlb_msg()
1247 iotlb = asid_to_iotlb(v, asid); in vhost_vdpa_process_iotlb_msg()
1249 if ((v->in_batch && v->batch_asid != asid) || !iotlb) { in vhost_vdpa_process_iotlb_msg()
1250 if (v->in_batch && v->batch_asid != asid) { in vhost_vdpa_process_iotlb_msg()
1251 dev_info(&v->dev, "batch id %d asid %d\n", in vhost_vdpa_process_iotlb_msg()
1252 v->batch_asid, asid); in vhost_vdpa_process_iotlb_msg()
1255 dev_err(&v->dev, "no iotlb for asid %d\n", asid); in vhost_vdpa_process_iotlb_msg()
1256 r = -EINVAL; in vhost_vdpa_process_iotlb_msg()
1260 switch (msg->type) { in vhost_vdpa_process_iotlb_msg()
1265 vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size); in vhost_vdpa_process_iotlb_msg()
1268 v->batch_asid = asid; in vhost_vdpa_process_iotlb_msg()
1269 v->in_batch = true; in vhost_vdpa_process_iotlb_msg()
1272 if (v->in_batch && ops->set_map) in vhost_vdpa_process_iotlb_msg()
1273 ops->set_map(vdpa, asid, iotlb); in vhost_vdpa_process_iotlb_msg()
1274 v->in_batch = false; in vhost_vdpa_process_iotlb_msg()
1277 r = -EINVAL; in vhost_vdpa_process_iotlb_msg()
1281 mutex_unlock(&dev->mutex); in vhost_vdpa_process_iotlb_msg()
1289 struct file *file = iocb->ki_filp; in vhost_vdpa_chr_write_iter()
1290 struct vhost_vdpa *v = file->private_data; in vhost_vdpa_chr_write_iter()
1291 struct vhost_dev *dev = &v->vdev; in vhost_vdpa_chr_write_iter()
1298 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_alloc_domain()
1299 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_alloc_domain()
1305 if (ops->set_map || ops->dma_map) in vhost_vdpa_alloc_domain()
1308 bus = dma_dev->bus; in vhost_vdpa_alloc_domain()
1310 return -EFAULT; in vhost_vdpa_alloc_domain()
1313 dev_warn_once(&v->dev, in vhost_vdpa_alloc_domain()
1315 return -ENOTSUPP; in vhost_vdpa_alloc_domain()
1318 v->domain = iommu_domain_alloc(bus); in vhost_vdpa_alloc_domain()
1319 if (!v->domain) in vhost_vdpa_alloc_domain()
1320 return -EIO; in vhost_vdpa_alloc_domain()
1322 ret = iommu_attach_device(v->domain, dma_dev); in vhost_vdpa_alloc_domain()
1329 iommu_domain_free(v->domain); in vhost_vdpa_alloc_domain()
1330 v->domain = NULL; in vhost_vdpa_alloc_domain()
1336 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_free_domain()
1339 if (v->domain) { in vhost_vdpa_free_domain()
1340 iommu_detach_device(v->domain, dma_dev); in vhost_vdpa_free_domain()
1341 iommu_domain_free(v->domain); in vhost_vdpa_free_domain()
1344 v->domain = NULL; in vhost_vdpa_free_domain()
1349 struct vdpa_iova_range *range = &v->range; in vhost_vdpa_set_iova_range()
1350 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_set_iova_range()
1351 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_set_iova_range()
1353 if (ops->get_iova_range) { in vhost_vdpa_set_iova_range()
1354 *range = ops->get_iova_range(vdpa); in vhost_vdpa_set_iova_range()
1355 } else if (v->domain && v->domain->geometry.force_aperture) { in vhost_vdpa_set_iova_range()
1356 range->first = v->domain->geometry.aperture_start; in vhost_vdpa_set_iova_range()
1357 range->last = v->domain->geometry.aperture_end; in vhost_vdpa_set_iova_range()
1359 range->first = 0; in vhost_vdpa_set_iova_range()
1360 range->last = ULLONG_MAX; in vhost_vdpa_set_iova_range()
1367 u32 asid; in vhost_vdpa_cleanup() local
1369 for (asid = 0; asid < v->vdpa->nas; asid++) { in vhost_vdpa_cleanup()
1370 as = asid_to_as(v, asid); in vhost_vdpa_cleanup()
1372 vhost_vdpa_remove_as(v, asid); in vhost_vdpa_cleanup()
1376 vhost_dev_cleanup(&v->vdev); in vhost_vdpa_cleanup()
1377 kfree(v->vdev.vqs); in vhost_vdpa_cleanup()
1378 v->vdev.vqs = NULL; in vhost_vdpa_cleanup()
1389 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev); in vhost_vdpa_open()
1391 opened = atomic_cmpxchg(&v->opened, 0, 1); in vhost_vdpa_open()
1393 return -EBUSY; in vhost_vdpa_open()
1395 nvqs = v->nvqs; in vhost_vdpa_open()
1402 r = -ENOMEM; in vhost_vdpa_open()
1406 dev = &v->vdev; in vhost_vdpa_open()
1408 vqs[i] = &v->vqs[i]; in vhost_vdpa_open()
1409 vqs[i]->handle_kick = handle_vq_kick; in vhost_vdpa_open()
1420 filep->private_data = v; in vhost_vdpa_open()
1427 atomic_dec(&v->opened); in vhost_vdpa_open()
1435 for (i = 0; i < v->nvqs; i++) in vhost_vdpa_clean_irq()
1441 struct vhost_vdpa *v = filep->private_data; in vhost_vdpa_release()
1442 struct vhost_dev *d = &v->vdev; in vhost_vdpa_release()
1444 mutex_lock(&d->mutex); in vhost_vdpa_release()
1445 filep->private_data = NULL; in vhost_vdpa_release()
1448 vhost_dev_stop(&v->vdev); in vhost_vdpa_release()
1452 mutex_unlock(&d->mutex); in vhost_vdpa_release()
1454 atomic_dec(&v->opened); in vhost_vdpa_release()
1455 complete(&v->completion); in vhost_vdpa_release()
1463 struct vhost_vdpa *v = vmf->vma->vm_file->private_data; in vhost_vdpa_fault()
1464 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_fault()
1465 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_fault()
1467 struct vm_area_struct *vma = vmf->vma; in vhost_vdpa_fault()
1468 u16 index = vma->vm_pgoff; in vhost_vdpa_fault()
1470 notify = ops->get_vq_notification(vdpa, index); in vhost_vdpa_fault()
1472 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in vhost_vdpa_fault()
1473 if (remap_pfn_range(vma, vmf->address & PAGE_MASK, in vhost_vdpa_fault()
1475 vma->vm_page_prot)) in vhost_vdpa_fault()
1487 struct vhost_vdpa *v = vma->vm_file->private_data; in vhost_vdpa_mmap()
1488 struct vdpa_device *vdpa = v->vdpa; in vhost_vdpa_mmap()
1489 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_mmap()
1491 unsigned long index = vma->vm_pgoff; in vhost_vdpa_mmap()
1493 if (vma->vm_end - vma->vm_start != PAGE_SIZE) in vhost_vdpa_mmap()
1494 return -EINVAL; in vhost_vdpa_mmap()
1495 if ((vma->vm_flags & VM_SHARED) == 0) in vhost_vdpa_mmap()
1496 return -EINVAL; in vhost_vdpa_mmap()
1497 if (vma->vm_flags & VM_READ) in vhost_vdpa_mmap()
1498 return -EINVAL; in vhost_vdpa_mmap()
1500 return -EINVAL; in vhost_vdpa_mmap()
1501 if (!ops->get_vq_notification) in vhost_vdpa_mmap()
1502 return -ENOTSUPP; in vhost_vdpa_mmap()
1508 notify = ops->get_vq_notification(vdpa, index); in vhost_vdpa_mmap()
1509 if (notify.addr & (PAGE_SIZE - 1)) in vhost_vdpa_mmap()
1510 return -EINVAL; in vhost_vdpa_mmap()
1511 if (vma->vm_end - vma->vm_start != notify.size) in vhost_vdpa_mmap()
1512 return -ENOTSUPP; in vhost_vdpa_mmap()
1515 vma->vm_ops = &vhost_vdpa_vm_ops; in vhost_vdpa_mmap()
1537 ida_simple_remove(&vhost_vdpa_ida, v->minor); in vhost_vdpa_release_dev()
1538 kfree(v->vqs); in vhost_vdpa_release_dev()
1544 const struct vdpa_config_ops *ops = vdpa->config; in vhost_vdpa_probe()
1552 if (!ops->set_map && !ops->dma_map && in vhost_vdpa_probe()
1553 (vdpa->ngroups > 1 || vdpa->nas > 1)) in vhost_vdpa_probe()
1554 return -EOPNOTSUPP; in vhost_vdpa_probe()
1558 return -ENOMEM; in vhost_vdpa_probe()
1567 atomic_set(&v->opened, 0); in vhost_vdpa_probe()
1568 v->minor = minor; in vhost_vdpa_probe()
1569 v->vdpa = vdpa; in vhost_vdpa_probe()
1570 v->nvqs = vdpa->nvqs; in vhost_vdpa_probe()
1571 v->virtio_id = ops->get_device_id(vdpa); in vhost_vdpa_probe()
1573 device_initialize(&v->dev); in vhost_vdpa_probe()
1574 v->dev.release = vhost_vdpa_release_dev; in vhost_vdpa_probe()
1575 v->dev.parent = &vdpa->dev; in vhost_vdpa_probe()
1576 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor); in vhost_vdpa_probe()
1577 v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue), in vhost_vdpa_probe()
1579 if (!v->vqs) { in vhost_vdpa_probe()
1580 r = -ENOMEM; in vhost_vdpa_probe()
1584 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor); in vhost_vdpa_probe()
1588 cdev_init(&v->cdev, &vhost_vdpa_fops); in vhost_vdpa_probe()
1589 v->cdev.owner = THIS_MODULE; in vhost_vdpa_probe()
1591 r = cdev_device_add(&v->cdev, &v->dev); in vhost_vdpa_probe()
1595 init_completion(&v->completion); in vhost_vdpa_probe()
1599 INIT_HLIST_HEAD(&v->as[i]); in vhost_vdpa_probe()
1604 put_device(&v->dev); in vhost_vdpa_probe()
1613 cdev_device_del(&v->cdev, &v->dev); in vhost_vdpa_remove()
1616 opened = atomic_cmpxchg(&v->opened, 0, 1); in vhost_vdpa_remove()
1619 wait_for_completion(&v->completion); in vhost_vdpa_remove()
1622 put_device(&v->dev); in vhost_vdpa_remove()
1638 "vhost-vdpa"); in vhost_vdpa_init()
1665 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");