Lines Matching +full:ctx +full:- +full:asid
1 // SPDX-License-Identifier: GPL-2.0-only
49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
55 vq->user_be = !virtio_legacy_is_little_endian(); in vhost_disable_cross_endian()
60 vq->user_be = true; in vhost_enable_cross_endian_big()
65 vq->user_be = false; in vhost_enable_cross_endian_little()
72 if (vq->private_data) in vhost_set_vring_endian()
73 return -EBUSY; in vhost_set_vring_endian()
76 return -EFAULT; in vhost_set_vring_endian()
80 return -EINVAL; in vhost_set_vring_endian()
95 .num = vq->user_be in vhost_get_vring_endian()
99 return -EFAULT; in vhost_get_vring_endian()
111 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be; in vhost_init_is_le()
120 return -ENOIOCTLCMD; in vhost_set_vring_endian()
126 return -ENOIOCTLCMD; in vhost_get_vring_endian()
131 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) in vhost_init_is_le()
151 complete(&s->wait_event); in vhost_flush_work()
160 poll->wqh = wqh; in vhost_poll_func()
161 add_wait_queue(wqh, &poll->wait); in vhost_poll_func()
168 struct vhost_work *work = &poll->work; in vhost_poll_wakeup()
170 if (!(key_to_poll(key) & poll->mask)) in vhost_poll_wakeup()
173 if (!poll->dev->use_worker) in vhost_poll_wakeup()
174 work->fn(work); in vhost_poll_wakeup()
183 clear_bit(VHOST_WORK_QUEUED, &work->flags); in vhost_work_init()
184 work->fn = fn; in vhost_work_init()
193 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); in vhost_poll_init()
194 init_poll_funcptr(&poll->table, vhost_poll_func); in vhost_poll_init()
195 poll->mask = mask; in vhost_poll_init()
196 poll->dev = dev; in vhost_poll_init()
197 poll->wqh = NULL; in vhost_poll_init()
198 poll->vq = vq; in vhost_poll_init()
200 vhost_work_init(&poll->work, fn); in vhost_poll_init()
210 if (poll->wqh) in vhost_poll_start()
213 mask = vfs_poll(file, &poll->table); in vhost_poll_start()
215 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask)); in vhost_poll_start()
218 return -EINVAL; in vhost_poll_start()
229 if (poll->wqh) { in vhost_poll_stop()
230 remove_wait_queue(poll->wqh, &poll->wait); in vhost_poll_stop()
231 poll->wqh = NULL; in vhost_poll_stop()
239 if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) { in vhost_worker_queue()
244 llist_add(&work->node, &worker->work_list); in vhost_worker_queue()
245 vhost_task_wake(worker->vtsk); in vhost_worker_queue()
255 worker = rcu_dereference(vq->worker); in vhost_vq_work_queue()
279 * vhost_worker_flush - flush a worker
301 xa_for_each(&dev->worker_xa, i, worker) { in vhost_dev_flush()
302 mutex_lock(&worker->mutex); in vhost_dev_flush()
303 if (!worker->attachment_cnt) { in vhost_dev_flush()
304 mutex_unlock(&worker->mutex); in vhost_dev_flush()
308 mutex_unlock(&worker->mutex); in vhost_dev_flush()
320 worker = rcu_dereference(vq->worker); in vhost_vq_has_work()
321 if (worker && !llist_empty(&worker->work_list)) in vhost_vq_has_work()
331 vhost_vq_work_queue(poll->vq, &poll->work); in vhost_poll_queue()
340 vq->meta_iotlb[j] = NULL; in __vhost_vq_meta_reset()
347 for (i = 0; i < d->nvqs; ++i) in vhost_vq_meta_reset()
348 __vhost_vq_meta_reset(d->vqs[i]); in vhost_vq_meta_reset()
353 call_ctx->ctx = NULL; in vhost_vring_call_reset()
354 memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer)); in vhost_vring_call_reset()
359 return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq); in vhost_vq_is_setup()
366 vq->num = 1; in vhost_vq_reset()
367 vq->desc = NULL; in vhost_vq_reset()
368 vq->avail = NULL; in vhost_vq_reset()
369 vq->used = NULL; in vhost_vq_reset()
370 vq->last_avail_idx = 0; in vhost_vq_reset()
371 vq->avail_idx = 0; in vhost_vq_reset()
372 vq->last_used_idx = 0; in vhost_vq_reset()
373 vq->signalled_used = 0; in vhost_vq_reset()
374 vq->signalled_used_valid = false; in vhost_vq_reset()
375 vq->used_flags = 0; in vhost_vq_reset()
376 vq->log_used = false; in vhost_vq_reset()
377 vq->log_addr = -1ull; in vhost_vq_reset()
378 vq->private_data = NULL; in vhost_vq_reset()
379 vq->acked_features = 0; in vhost_vq_reset()
380 vq->acked_backend_features = 0; in vhost_vq_reset()
381 vq->log_base = NULL; in vhost_vq_reset()
382 vq->error_ctx = NULL; in vhost_vq_reset()
383 vq->kick = NULL; in vhost_vq_reset()
384 vq->log_ctx = NULL; in vhost_vq_reset()
387 vq->busyloop_timeout = 0; in vhost_vq_reset()
388 vq->umem = NULL; in vhost_vq_reset()
389 vq->iotlb = NULL; in vhost_vq_reset()
390 rcu_assign_pointer(vq->worker, NULL); in vhost_vq_reset()
391 vhost_vring_call_reset(&vq->call_ctx); in vhost_vq_reset()
401 node = llist_del_all(&worker->work_list); in vhost_worker()
409 clear_bit(VHOST_WORK_QUEUED, &work->flags); in vhost_worker()
410 kcov_remote_start_common(worker->kcov_handle); in vhost_worker()
411 work->fn(work); in vhost_worker()
422 kfree(vq->indirect); in vhost_vq_free_iovecs()
423 vq->indirect = NULL; in vhost_vq_free_iovecs()
424 kfree(vq->log); in vhost_vq_free_iovecs()
425 vq->log = NULL; in vhost_vq_free_iovecs()
426 kfree(vq->heads); in vhost_vq_free_iovecs()
427 vq->heads = NULL; in vhost_vq_free_iovecs()
436 for (i = 0; i < dev->nvqs; ++i) { in vhost_dev_alloc_iovecs()
437 vq = dev->vqs[i]; in vhost_dev_alloc_iovecs()
438 vq->indirect = kmalloc_array(UIO_MAXIOV, in vhost_dev_alloc_iovecs()
439 sizeof(*vq->indirect), in vhost_dev_alloc_iovecs()
441 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log), in vhost_dev_alloc_iovecs()
443 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads), in vhost_dev_alloc_iovecs()
445 if (!vq->indirect || !vq->log || !vq->heads) in vhost_dev_alloc_iovecs()
451 for (; i >= 0; --i) in vhost_dev_alloc_iovecs()
452 vhost_vq_free_iovecs(dev->vqs[i]); in vhost_dev_alloc_iovecs()
453 return -ENOMEM; in vhost_dev_alloc_iovecs()
460 for (i = 0; i < dev->nvqs; ++i) in vhost_dev_free_iovecs()
461 vhost_vq_free_iovecs(dev->vqs[i]); in vhost_dev_free_iovecs()
467 struct vhost_dev *dev = vq->dev; in vhost_exceeds_weight()
469 if ((dev->byte_weight && total_len >= dev->byte_weight) || in vhost_exceeds_weight()
470 pkts >= dev->weight) { in vhost_exceeds_weight()
471 vhost_poll_queue(&vq->poll); in vhost_exceeds_weight()
485 return size_add(struct_size(vq->avail, ring, num), event); in vhost_get_avail_size()
494 return size_add(struct_size(vq->used, ring, num), event); in vhost_get_used_size()
500 return sizeof(*vq->desc) * num; in vhost_get_desc_size()
507 int (*msg_handler)(struct vhost_dev *dev, u32 asid, in vhost_dev_init() argument
513 dev->vqs = vqs; in vhost_dev_init()
514 dev->nvqs = nvqs; in vhost_dev_init()
515 mutex_init(&dev->mutex); in vhost_dev_init()
516 dev->log_ctx = NULL; in vhost_dev_init()
517 dev->umem = NULL; in vhost_dev_init()
518 dev->iotlb = NULL; in vhost_dev_init()
519 dev->mm = NULL; in vhost_dev_init()
520 dev->iov_limit = iov_limit; in vhost_dev_init()
521 dev->weight = weight; in vhost_dev_init()
522 dev->byte_weight = byte_weight; in vhost_dev_init()
523 dev->use_worker = use_worker; in vhost_dev_init()
524 dev->msg_handler = msg_handler; in vhost_dev_init()
525 init_waitqueue_head(&dev->wait); in vhost_dev_init()
526 INIT_LIST_HEAD(&dev->read_list); in vhost_dev_init()
527 INIT_LIST_HEAD(&dev->pending_list); in vhost_dev_init()
528 spin_lock_init(&dev->iotlb_lock); in vhost_dev_init()
529 xa_init_flags(&dev->worker_xa, XA_FLAGS_ALLOC); in vhost_dev_init()
531 for (i = 0; i < dev->nvqs; ++i) { in vhost_dev_init()
532 vq = dev->vqs[i]; in vhost_dev_init()
533 vq->log = NULL; in vhost_dev_init()
534 vq->indirect = NULL; in vhost_dev_init()
535 vq->heads = NULL; in vhost_dev_init()
536 vq->dev = dev; in vhost_dev_init()
537 mutex_init(&vq->mutex); in vhost_dev_init()
539 if (vq->handle_kick) in vhost_dev_init()
540 vhost_poll_init(&vq->poll, vq->handle_kick, in vhost_dev_init()
550 return dev->mm == current->mm ? 0 : -EPERM; in vhost_dev_check_owner()
557 return dev->mm; in vhost_dev_has_owner()
564 if (dev->use_worker) { in vhost_attach_mm()
565 dev->mm = get_task_mm(current); in vhost_attach_mm()
573 dev->mm = current->mm; in vhost_attach_mm()
574 mmgrab(dev->mm); in vhost_attach_mm()
580 if (!dev->mm) in vhost_detach_mm()
583 if (dev->use_worker) in vhost_detach_mm()
584 mmput(dev->mm); in vhost_detach_mm()
586 mmdrop(dev->mm); in vhost_detach_mm()
588 dev->mm = NULL; in vhost_detach_mm()
597 WARN_ON(!llist_empty(&worker->work_list)); in vhost_worker_destroy()
598 xa_erase(&dev->worker_xa, worker->id); in vhost_worker_destroy()
599 vhost_task_stop(worker->vtsk); in vhost_worker_destroy()
608 if (!dev->use_worker) in vhost_workers_free()
611 for (i = 0; i < dev->nvqs; i++) in vhost_workers_free()
612 rcu_assign_pointer(dev->vqs[i]->worker, NULL); in vhost_workers_free()
617 xa_for_each(&dev->worker_xa, i, worker) in vhost_workers_free()
619 xa_destroy(&dev->worker_xa); in vhost_workers_free()
634 snprintf(name, sizeof(name), "vhost-%d", current->pid); in vhost_worker_create()
640 mutex_init(&worker->mutex); in vhost_worker_create()
641 init_llist_head(&worker->work_list); in vhost_worker_create()
642 worker->kcov_handle = kcov_common_handle(); in vhost_worker_create()
643 worker->vtsk = vtsk; in vhost_worker_create()
647 ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL); in vhost_worker_create()
650 worker->id = id; in vhost_worker_create()
667 old_worker = rcu_dereference_check(vq->worker, in __vhost_vq_attach_worker()
668 lockdep_is_held(&vq->dev->mutex)); in __vhost_vq_attach_worker()
670 mutex_lock(&worker->mutex); in __vhost_vq_attach_worker()
671 worker->attachment_cnt++; in __vhost_vq_attach_worker()
672 mutex_unlock(&worker->mutex); in __vhost_vq_attach_worker()
673 rcu_assign_pointer(vq->worker, worker); in __vhost_vq_attach_worker()
681 mutex_lock(&old_worker->mutex); in __vhost_vq_attach_worker()
682 old_worker->attachment_cnt--; in __vhost_vq_attach_worker()
690 mutex_lock(&vq->mutex); in __vhost_vq_attach_worker()
691 if (!vhost_vq_get_backend(vq) && !vq->kick) { in __vhost_vq_attach_worker()
692 mutex_unlock(&vq->mutex); in __vhost_vq_attach_worker()
693 mutex_unlock(&old_worker->mutex); in __vhost_vq_attach_worker()
699 WARN_ON(!old_worker->attachment_cnt && in __vhost_vq_attach_worker()
700 !llist_empty(&old_worker->work_list)); in __vhost_vq_attach_worker()
703 mutex_unlock(&vq->mutex); in __vhost_vq_attach_worker()
709 mutex_unlock(&old_worker->mutex); in __vhost_vq_attach_worker()
716 unsigned long index = info->worker_id; in vhost_vq_attach_worker()
717 struct vhost_dev *dev = vq->dev; in vhost_vq_attach_worker()
720 if (!dev->use_worker) in vhost_vq_attach_worker()
721 return -EINVAL; in vhost_vq_attach_worker()
723 worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT); in vhost_vq_attach_worker()
724 if (!worker || worker->id != info->worker_id) in vhost_vq_attach_worker()
725 return -ENODEV; in vhost_vq_attach_worker()
739 return -ENOMEM; in vhost_new_worker()
741 info->worker_id = worker->id; in vhost_new_worker()
749 unsigned long index = info->worker_id; in vhost_free_worker()
752 worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT); in vhost_free_worker()
753 if (!worker || worker->id != info->worker_id) in vhost_free_worker()
754 return -ENODEV; in vhost_free_worker()
756 mutex_lock(&worker->mutex); in vhost_free_worker()
757 if (worker->attachment_cnt) { in vhost_free_worker()
758 mutex_unlock(&worker->mutex); in vhost_free_worker()
759 return -EBUSY; in vhost_free_worker()
761 mutex_unlock(&worker->mutex); in vhost_free_worker()
778 if (idx >= dev->nvqs) in vhost_get_vq_from_user()
779 return -ENOBUFS; in vhost_get_vq_from_user()
781 idx = array_index_nospec(idx, dev->nvqs); in vhost_get_vq_from_user()
783 *vq = dev->vqs[idx]; in vhost_get_vq_from_user()
799 if (!dev->use_worker) in vhost_worker_ioctl()
800 return -EINVAL; in vhost_worker_ioctl()
803 return -EINVAL; in vhost_worker_ioctl()
814 ret = -EFAULT; in vhost_worker_ioctl()
818 return -EFAULT; in vhost_worker_ioctl()
825 return -ENOIOCTLCMD; in vhost_worker_ioctl()
835 ret = -EFAULT; in vhost_worker_ioctl()
842 worker = rcu_dereference_check(vq->worker, in vhost_worker_ioctl()
843 lockdep_is_held(&dev->mutex)); in vhost_worker_ioctl()
845 ret = -EINVAL; in vhost_worker_ioctl()
850 ring_worker.worker_id = worker->id; in vhost_worker_ioctl()
853 ret = -EFAULT; in vhost_worker_ioctl()
856 ret = -ENOIOCTLCMD; in vhost_worker_ioctl()
872 err = -EBUSY; in vhost_dev_set_owner()
882 if (dev->use_worker) { in vhost_dev_set_owner()
891 err = -ENOMEM; in vhost_dev_set_owner()
895 for (i = 0; i < dev->nvqs; i++) in vhost_dev_set_owner()
896 __vhost_vq_attach_worker(dev->vqs[i], worker); in vhost_dev_set_owner()
929 dev->umem = umem; in vhost_dev_reset_owner()
933 for (i = 0; i < dev->nvqs; ++i) in vhost_dev_reset_owner()
934 dev->vqs[i]->umem = umem; in vhost_dev_reset_owner()
942 for (i = 0; i < dev->nvqs; ++i) { in vhost_dev_stop()
943 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) in vhost_dev_stop()
944 vhost_poll_stop(&dev->vqs[i]->poll); in vhost_dev_stop()
955 spin_lock(&dev->iotlb_lock); in vhost_clear_msg()
957 list_for_each_entry_safe(node, n, &dev->read_list, node) { in vhost_clear_msg()
958 list_del(&node->node); in vhost_clear_msg()
962 list_for_each_entry_safe(node, n, &dev->pending_list, node) { in vhost_clear_msg()
963 list_del(&node->node); in vhost_clear_msg()
967 spin_unlock(&dev->iotlb_lock); in vhost_clear_msg()
975 for (i = 0; i < dev->nvqs; ++i) { in vhost_dev_cleanup()
976 if (dev->vqs[i]->error_ctx) in vhost_dev_cleanup()
977 eventfd_ctx_put(dev->vqs[i]->error_ctx); in vhost_dev_cleanup()
978 if (dev->vqs[i]->kick) in vhost_dev_cleanup()
979 fput(dev->vqs[i]->kick); in vhost_dev_cleanup()
980 if (dev->vqs[i]->call_ctx.ctx) in vhost_dev_cleanup()
981 eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx); in vhost_dev_cleanup()
982 vhost_vq_reset(dev, dev->vqs[i]); in vhost_dev_cleanup()
985 if (dev->log_ctx) in vhost_dev_cleanup()
986 eventfd_ctx_put(dev->log_ctx); in vhost_dev_cleanup()
987 dev->log_ctx = NULL; in vhost_dev_cleanup()
989 vhost_iotlb_free(dev->umem); in vhost_dev_cleanup()
990 dev->umem = NULL; in vhost_dev_cleanup()
991 vhost_iotlb_free(dev->iotlb); in vhost_dev_cleanup()
992 dev->iotlb = NULL; in vhost_dev_cleanup()
994 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); in vhost_dev_cleanup()
1005 if (a > ULONG_MAX - (unsigned long)log_base || in log_access_ok()
1010 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); in log_access_ok()
1022 return uaddr > ULONG_MAX - size + 1; in vhost_overflow()
1034 list_for_each_entry(map, &umem->list, link) { in vq_memory_access_ok()
1035 unsigned long a = map->addr; in vq_memory_access_ok()
1037 if (vhost_overflow(map->addr, map->size)) in vq_memory_access_ok()
1041 if (!access_ok((void __user *)a, map->size)) in vq_memory_access_ok()
1044 map->start, in vq_memory_access_ok()
1045 map->size)) in vq_memory_access_ok()
1055 const struct vhost_iotlb_map *map = vq->meta_iotlb[type]; in vhost_vq_meta_fetch()
1060 return (void __user *)(uintptr_t)(map->addr + addr - map->start); in vhost_vq_meta_fetch()
1070 for (i = 0; i < d->nvqs; ++i) { in memory_access_ok()
1074 mutex_lock(&d->vqs[i]->mutex); in memory_access_ok()
1075 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL); in memory_access_ok()
1077 if (d->vqs[i]->private_data) in memory_access_ok()
1078 ok = vq_memory_access_ok(d->vqs[i]->log_base, in memory_access_ok()
1082 mutex_unlock(&d->vqs[i]->mutex); in memory_access_ok()
1097 if (!vq->iotlb) in vhost_copy_to_user()
1102 * could be access through iotlb. So -EAGAIN should in vhost_copy_to_user()
1113 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov, in vhost_copy_to_user()
1114 ARRAY_SIZE(vq->iotlb_iov), in vhost_copy_to_user()
1118 iov_iter_init(&t, ITER_DEST, vq->iotlb_iov, ret, size); in vhost_copy_to_user()
1132 if (!vq->iotlb) in vhost_copy_from_user()
1137 * could be access through iotlb. So -EAGAIN should in vhost_copy_from_user()
1148 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov, in vhost_copy_from_user()
1149 ARRAY_SIZE(vq->iotlb_iov), in vhost_copy_from_user()
1157 iov_iter_init(&f, ITER_SOURCE, vq->iotlb_iov, ret, size); in vhost_copy_from_user()
1173 ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov, in __vhost_get_user_slow()
1174 ARRAY_SIZE(vq->iotlb_iov), in __vhost_get_user_slow()
1183 if (ret != 1 || vq->iotlb_iov[0].iov_len != size) { in __vhost_get_user_slow()
1190 return vq->iotlb_iov[0].iov_base; in __vhost_get_user_slow()
1195 * could be access through iotlb. So -EAGAIN should
1213 if (!vq->iotlb) { \
1222 ret = -EFAULT; \
1229 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), in vhost_put_avail_event()
1237 return vhost_copy_to_user(vq, vq->used->ring + idx, head, in vhost_put_used()
1244 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), in vhost_put_used_flags()
1245 &vq->used->flags); in vhost_put_used_flags()
1251 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), in vhost_put_used_idx()
1252 &vq->used->idx); in vhost_put_used_idx()
1258 if (!vq->iotlb) { \
1268 ret = -EFAULT; \
1282 for (i = 0; i < d->nvqs; ++i) in vhost_dev_lock_vqs()
1283 mutex_lock_nested(&d->vqs[i]->mutex, i); in vhost_dev_lock_vqs()
1289 for (i = 0; i < d->nvqs; ++i) in vhost_dev_unlock_vqs()
1290 mutex_unlock(&d->vqs[i]->mutex); in vhost_dev_unlock_vqs()
1296 return vhost_get_avail(vq, *idx, &vq->avail->idx); in vhost_get_avail_idx()
1303 &vq->avail->ring[idx & (vq->num - 1)]); in vhost_get_avail_head()
1309 return vhost_get_avail(vq, *flags, &vq->avail->flags); in vhost_get_avail_flags()
1321 return vhost_get_used(vq, *idx, &vq->used->idx); in vhost_get_used_idx()
1327 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); in vhost_get_desc()
1335 spin_lock(&d->iotlb_lock); in vhost_iotlb_notify_vq()
1337 list_for_each_entry_safe(node, n, &d->pending_list, node) { in vhost_iotlb_notify_vq()
1338 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; in vhost_iotlb_notify_vq()
1339 if (msg->iova <= vq_msg->iova && in vhost_iotlb_notify_vq()
1340 msg->iova + msg->size - 1 >= vq_msg->iova && in vhost_iotlb_notify_vq()
1341 vq_msg->type == VHOST_IOTLB_MISS) { in vhost_iotlb_notify_vq()
1342 vhost_poll_queue(&node->vq->poll); in vhost_iotlb_notify_vq()
1343 list_del(&node->node); in vhost_iotlb_notify_vq()
1348 spin_unlock(&d->iotlb_lock); in vhost_iotlb_notify_vq()
1368 static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid, in vhost_process_iotlb_msg() argument
1373 if (asid != 0) in vhost_process_iotlb_msg()
1374 return -EINVAL; in vhost_process_iotlb_msg()
1376 mutex_lock(&dev->mutex); in vhost_process_iotlb_msg()
1378 switch (msg->type) { in vhost_process_iotlb_msg()
1380 if (!dev->iotlb) { in vhost_process_iotlb_msg()
1381 ret = -EFAULT; in vhost_process_iotlb_msg()
1384 if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) { in vhost_process_iotlb_msg()
1385 ret = -EFAULT; in vhost_process_iotlb_msg()
1389 if (vhost_iotlb_add_range(dev->iotlb, msg->iova, in vhost_process_iotlb_msg()
1390 msg->iova + msg->size - 1, in vhost_process_iotlb_msg()
1391 msg->uaddr, msg->perm)) { in vhost_process_iotlb_msg()
1392 ret = -ENOMEM; in vhost_process_iotlb_msg()
1398 if (!dev->iotlb) { in vhost_process_iotlb_msg()
1399 ret = -EFAULT; in vhost_process_iotlb_msg()
1403 vhost_iotlb_del_range(dev->iotlb, msg->iova, in vhost_process_iotlb_msg()
1404 msg->iova + msg->size - 1); in vhost_process_iotlb_msg()
1407 ret = -EINVAL; in vhost_process_iotlb_msg()
1412 mutex_unlock(&dev->mutex); in vhost_process_iotlb_msg()
1422 u32 asid = 0; in vhost_chr_write_iter() local
1426 ret = -EINVAL; in vhost_chr_write_iter()
1435 offset = offsetof(struct vhost_msg, iotlb) - sizeof(int); in vhost_chr_write_iter()
1438 if (vhost_backend_has_feature(dev->vqs[0], in vhost_chr_write_iter()
1440 ret = copy_from_iter(&asid, sizeof(asid), from); in vhost_chr_write_iter()
1441 if (ret != sizeof(asid)) { in vhost_chr_write_iter()
1442 ret = -EINVAL; in vhost_chr_write_iter()
1450 ret = -EINVAL; in vhost_chr_write_iter()
1457 ret = -EINVAL; in vhost_chr_write_iter()
1462 ret = -EINVAL; in vhost_chr_write_iter()
1466 if (dev->msg_handler) in vhost_chr_write_iter()
1467 ret = dev->msg_handler(dev, asid, &msg); in vhost_chr_write_iter()
1469 ret = vhost_process_iotlb_msg(dev, asid, &msg); in vhost_chr_write_iter()
1471 ret = -EFAULT; in vhost_chr_write_iter()
1487 poll_wait(file, &dev->wait, wait); in vhost_chr_poll()
1489 if (!list_empty(&dev->read_list)) in vhost_chr_poll()
1509 prepare_to_wait(&dev->wait, &wait, in vhost_chr_read_iter()
1512 node = vhost_dequeue_msg(dev, &dev->read_list); in vhost_chr_read_iter()
1516 ret = -EAGAIN; in vhost_chr_read_iter()
1520 ret = -ERESTARTSYS; in vhost_chr_read_iter()
1523 if (!dev->iotlb) { in vhost_chr_read_iter()
1524 ret = -EBADFD; in vhost_chr_read_iter()
1532 finish_wait(&dev->wait, &wait); in vhost_chr_read_iter()
1536 void *start = &node->msg; in vhost_chr_read_iter()
1538 switch (node->msg.type) { in vhost_chr_read_iter()
1540 size = sizeof(node->msg); in vhost_chr_read_iter()
1541 msg = &node->msg.iotlb; in vhost_chr_read_iter()
1544 size = sizeof(node->msg_v2); in vhost_chr_read_iter()
1545 msg = &node->msg_v2.iotlb; in vhost_chr_read_iter()
1553 if (ret != size || msg->type != VHOST_IOTLB_MISS) { in vhost_chr_read_iter()
1557 vhost_enqueue_msg(dev, &dev->pending_list, node); in vhost_chr_read_iter()
1566 struct vhost_dev *dev = vq->dev; in vhost_iotlb_miss()
1573 return -ENOMEM; in vhost_iotlb_miss()
1576 node->msg_v2.type = VHOST_IOTLB_MSG_V2; in vhost_iotlb_miss()
1577 msg = &node->msg_v2.iotlb; in vhost_iotlb_miss()
1579 msg = &node->msg.iotlb; in vhost_iotlb_miss()
1582 msg->type = VHOST_IOTLB_MISS; in vhost_iotlb_miss()
1583 msg->iova = iova; in vhost_iotlb_miss()
1584 msg->perm = access; in vhost_iotlb_miss()
1586 vhost_enqueue_msg(dev, &dev->read_list, node); in vhost_iotlb_miss()
1599 if (vq->iotlb) in vq_access_ok()
1614 if (likely(map->perm & access)) in vhost_vq_meta_update()
1615 vq->meta_iotlb[type] = map; in vhost_vq_meta_update()
1622 struct vhost_iotlb *umem = vq->iotlb; in iotlb_access_ok()
1623 u64 s = 0, size, orig_addr = addr, last = addr + len - 1; in iotlb_access_ok()
1630 if (map == NULL || map->start > addr) { in iotlb_access_ok()
1633 } else if (!(map->perm & access)) { in iotlb_access_ok()
1640 size = map->size - addr + map->start; in iotlb_access_ok()
1654 unsigned int num = vq->num; in vq_meta_prefetch()
1656 if (!vq->iotlb) in vq_meta_prefetch()
1659 return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc, in vq_meta_prefetch()
1661 iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail, in vq_meta_prefetch()
1664 iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used, in vq_meta_prefetch()
1673 return memory_access_ok(dev, dev->umem, 1); in vhost_log_access_ok()
1684 if (vq->iotlb) in vq_log_used_access_ok()
1688 vhost_get_used_size(vq, vq->num)); in vq_log_used_access_ok()
1696 return vq_memory_access_ok(log_base, vq->umem, in vq_log_access_ok()
1698 vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr); in vq_log_access_ok()
1705 if (!vq_log_access_ok(vq, vq->log_base)) in vhost_vq_access_ok()
1708 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used); in vhost_vq_access_ok()
1721 return -EFAULT; in vhost_set_memory()
1723 return -EOPNOTSUPP; in vhost_set_memory()
1725 return -E2BIG; in vhost_set_memory()
1729 return -ENOMEM; in vhost_set_memory()
1732 if (copy_from_user(newmem->regions, m->regions, in vhost_set_memory()
1735 return -EFAULT; in vhost_set_memory()
1741 return -ENOMEM; in vhost_set_memory()
1744 for (region = newmem->regions; in vhost_set_memory()
1745 region < newmem->regions + mem.nregions; in vhost_set_memory()
1748 region->guest_phys_addr, in vhost_set_memory()
1749 region->guest_phys_addr + in vhost_set_memory()
1750 region->memory_size - 1, in vhost_set_memory()
1751 region->userspace_addr, in vhost_set_memory()
1759 oldumem = d->umem; in vhost_set_memory()
1760 d->umem = newumem; in vhost_set_memory()
1763 for (i = 0; i < d->nvqs; ++i) { in vhost_set_memory()
1764 mutex_lock(&d->vqs[i]->mutex); in vhost_set_memory()
1765 d->vqs[i]->umem = newumem; in vhost_set_memory()
1766 mutex_unlock(&d->vqs[i]->mutex); in vhost_set_memory()
1776 return -EFAULT; in vhost_set_memory()
1787 if (vq->private_data) in vhost_vring_set_num()
1788 return -EBUSY; in vhost_vring_set_num()
1791 return -EFAULT; in vhost_vring_set_num()
1793 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) in vhost_vring_set_num()
1794 return -EINVAL; in vhost_vring_set_num()
1795 vq->num = s.num; in vhost_vring_set_num()
1807 return -EFAULT; in vhost_vring_set_addr()
1809 return -EOPNOTSUPP; in vhost_vring_set_addr()
1816 return -EFAULT; in vhost_vring_set_addr()
1819 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); in vhost_vring_set_addr()
1820 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); in vhost_vring_set_addr()
1821 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || in vhost_vring_set_addr()
1822 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || in vhost_vring_set_addr()
1823 (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) in vhost_vring_set_addr()
1824 return -EINVAL; in vhost_vring_set_addr()
1829 if (vq->private_data) { in vhost_vring_set_addr()
1830 if (!vq_access_ok(vq, vq->num, in vhost_vring_set_addr()
1834 return -EINVAL; in vhost_vring_set_addr()
1837 if (!vq_log_used_access_ok(vq, vq->log_base, in vhost_vring_set_addr()
1840 return -EINVAL; in vhost_vring_set_addr()
1843 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); in vhost_vring_set_addr()
1844 vq->desc = (void __user *)(unsigned long)a.desc_user_addr; in vhost_vring_set_addr()
1845 vq->avail = (void __user *)(unsigned long)a.avail_user_addr; in vhost_vring_set_addr()
1846 vq->log_addr = a.log_guest_addr; in vhost_vring_set_addr()
1847 vq->used = (void __user *)(unsigned long)a.used_user_addr; in vhost_vring_set_addr()
1859 mutex_lock(&vq->mutex); in vhost_vring_set_num_addr()
1872 mutex_unlock(&vq->mutex); in vhost_vring_set_num_addr()
1880 struct eventfd_ctx *ctx = NULL; in vhost_vring_ioctl() local
1896 mutex_lock(&vq->mutex); in vhost_vring_ioctl()
1902 if (vq->private_data) { in vhost_vring_ioctl()
1903 r = -EBUSY; in vhost_vring_ioctl()
1907 r = -EFAULT; in vhost_vring_ioctl()
1911 vq->last_avail_idx = s.num & 0xffff; in vhost_vring_ioctl()
1912 vq->last_used_idx = (s.num >> 16) & 0xffff; in vhost_vring_ioctl()
1915 r = -EINVAL; in vhost_vring_ioctl()
1918 vq->last_avail_idx = s.num; in vhost_vring_ioctl()
1921 vq->avail_idx = vq->last_avail_idx; in vhost_vring_ioctl()
1926 s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16); in vhost_vring_ioctl()
1928 s.num = vq->last_avail_idx; in vhost_vring_ioctl()
1930 r = -EFAULT; in vhost_vring_ioctl()
1934 r = -EFAULT; in vhost_vring_ioctl()
1942 if (eventfp != vq->kick) { in vhost_vring_ioctl()
1943 pollstop = (filep = vq->kick) != NULL; in vhost_vring_ioctl()
1944 pollstart = (vq->kick = eventfp) != NULL; in vhost_vring_ioctl()
1950 r = -EFAULT; in vhost_vring_ioctl()
1953 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd); in vhost_vring_ioctl()
1954 if (IS_ERR(ctx)) { in vhost_vring_ioctl()
1955 r = PTR_ERR(ctx); in vhost_vring_ioctl()
1959 swap(ctx, vq->call_ctx.ctx); in vhost_vring_ioctl()
1963 r = -EFAULT; in vhost_vring_ioctl()
1966 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd); in vhost_vring_ioctl()
1967 if (IS_ERR(ctx)) { in vhost_vring_ioctl()
1968 r = PTR_ERR(ctx); in vhost_vring_ioctl()
1971 swap(ctx, vq->error_ctx); in vhost_vring_ioctl()
1981 r = -EFAULT; in vhost_vring_ioctl()
1984 vq->busyloop_timeout = s.num; in vhost_vring_ioctl()
1988 s.num = vq->busyloop_timeout; in vhost_vring_ioctl()
1990 r = -EFAULT; in vhost_vring_ioctl()
1993 r = -ENOIOCTLCMD; in vhost_vring_ioctl()
1996 if (pollstop && vq->handle_kick) in vhost_vring_ioctl()
1997 vhost_poll_stop(&vq->poll); in vhost_vring_ioctl()
1999 if (!IS_ERR_OR_NULL(ctx)) in vhost_vring_ioctl()
2000 eventfd_ctx_put(ctx); in vhost_vring_ioctl()
2004 if (pollstart && vq->handle_kick) in vhost_vring_ioctl()
2005 r = vhost_poll_start(&vq->poll, vq->kick); in vhost_vring_ioctl()
2007 mutex_unlock(&vq->mutex); in vhost_vring_ioctl()
2009 if (pollstop && vq->handle_kick) in vhost_vring_ioctl()
2010 vhost_dev_flush(vq->poll.dev); in vhost_vring_ioctl()
2022 return -ENOMEM; in vhost_init_device_iotlb()
2024 oiotlb = d->iotlb; in vhost_init_device_iotlb()
2025 d->iotlb = niotlb; in vhost_init_device_iotlb()
2027 for (i = 0; i < d->nvqs; ++i) { in vhost_init_device_iotlb()
2028 struct vhost_virtqueue *vq = d->vqs[i]; in vhost_init_device_iotlb()
2030 mutex_lock(&vq->mutex); in vhost_init_device_iotlb()
2031 vq->iotlb = niotlb; in vhost_init_device_iotlb()
2033 mutex_unlock(&vq->mutex); in vhost_init_device_iotlb()
2045 struct eventfd_ctx *ctx; in vhost_dev_ioctl() local
2067 r = -EFAULT; in vhost_dev_ioctl()
2071 r = -EFAULT; in vhost_dev_ioctl()
2074 for (i = 0; i < d->nvqs; ++i) { in vhost_dev_ioctl()
2077 vq = d->vqs[i]; in vhost_dev_ioctl()
2078 mutex_lock(&vq->mutex); in vhost_dev_ioctl()
2080 if (vq->private_data && !vq_log_access_ok(vq, base)) in vhost_dev_ioctl()
2081 r = -EFAULT; in vhost_dev_ioctl()
2083 vq->log_base = base; in vhost_dev_ioctl()
2084 mutex_unlock(&vq->mutex); in vhost_dev_ioctl()
2091 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd); in vhost_dev_ioctl()
2092 if (IS_ERR(ctx)) { in vhost_dev_ioctl()
2093 r = PTR_ERR(ctx); in vhost_dev_ioctl()
2096 swap(ctx, d->log_ctx); in vhost_dev_ioctl()
2097 for (i = 0; i < d->nvqs; ++i) { in vhost_dev_ioctl()
2098 mutex_lock(&d->vqs[i]->mutex); in vhost_dev_ioctl()
2099 d->vqs[i]->log_ctx = d->log_ctx; in vhost_dev_ioctl()
2100 mutex_unlock(&d->vqs[i]->mutex); in vhost_dev_ioctl()
2102 if (ctx) in vhost_dev_ioctl()
2103 eventfd_ctx_put(ctx); in vhost_dev_ioctl()
2106 r = -ENOIOCTLCMD; in vhost_dev_ioctl()
2116 * returning -EFAULT). See Documentation/arch/x86/exception-tables.rst.
2151 return -EFAULT; in log_write()
2157 write_length -= VHOST_PAGE_SIZE; in log_write()
2165 struct vhost_iotlb *umem = vq->umem; in log_write_hva()
2176 list_for_each_entry(u, &umem->list, link) { in log_write_hva()
2177 if (u->addr > hva - 1 + len || in log_write_hva()
2178 u->addr - 1 + u->size < hva) in log_write_hva()
2180 start = max(u->addr, hva); in log_write_hva()
2181 end = min(u->addr - 1 + u->size, hva - 1 + len); in log_write_hva()
2182 l = end - start + 1; in log_write_hva()
2183 r = log_write(vq->log_base, in log_write_hva()
2184 u->start + start - u->addr, in log_write_hva()
2193 return -EFAULT; in log_write_hva()
2195 len -= min; in log_write_hva()
2204 struct iovec *iov = vq->log_iov; in log_used()
2207 if (!vq->iotlb) in log_used()
2208 return log_write(vq->log_base, vq->log_addr + used_offset, len); in log_used()
2210 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, in log_used()
2233 if (vq->iotlb) { in vhost_log_write()
2245 r = log_write(vq->log_base, log[i].addr, l); in vhost_log_write()
2248 len -= l; in vhost_log_write()
2250 if (vq->log_ctx) in vhost_log_write()
2251 eventfd_signal(vq->log_ctx); in vhost_log_write()
2265 return -EFAULT; in vhost_update_used_flags()
2266 if (unlikely(vq->log_used)) { in vhost_update_used_flags()
2270 used = &vq->used->flags; in vhost_update_used_flags()
2271 log_used(vq, (used - (void __user *)vq->used), in vhost_update_used_flags()
2272 sizeof vq->used->flags); in vhost_update_used_flags()
2273 if (vq->log_ctx) in vhost_update_used_flags()
2274 eventfd_signal(vq->log_ctx); in vhost_update_used_flags()
2282 return -EFAULT; in vhost_update_avail_event()
2283 if (unlikely(vq->log_used)) { in vhost_update_avail_event()
2289 log_used(vq, (used - (void __user *)vq->used), in vhost_update_avail_event()
2291 if (vq->log_ctx) in vhost_update_avail_event()
2292 eventfd_signal(vq->log_ctx); in vhost_update_avail_event()
2301 bool is_le = vq->is_le; in vhost_vq_init_access()
2303 if (!vq->private_data) in vhost_vq_init_access()
2311 vq->signalled_used_valid = false; in vhost_vq_init_access()
2312 if (!vq->iotlb && in vhost_vq_init_access()
2313 !access_ok(&vq->used->idx, sizeof vq->used->idx)) { in vhost_vq_init_access()
2314 r = -EFAULT; in vhost_vq_init_access()
2320 &vq->used->idx); in vhost_vq_init_access()
2323 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); in vhost_vq_init_access()
2327 vq->is_le = is_le; in vhost_vq_init_access()
2336 struct vhost_dev *dev = vq->dev; in translate_desc()
2337 struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem; in translate_desc()
2339 u64 s = 0, last = addr + len - 1; in translate_desc()
2345 ret = -ENOBUFS; in translate_desc()
2350 if (map == NULL || map->start > addr) { in translate_desc()
2351 if (umem != dev->iotlb) { in translate_desc()
2352 ret = -EFAULT; in translate_desc()
2355 ret = -EAGAIN; in translate_desc()
2357 } else if (!(map->perm & access)) { in translate_desc()
2358 ret = -EPERM; in translate_desc()
2363 size = map->size - addr + map->start; in translate_desc()
2364 _iov->iov_len = min((u64)len - s, size); in translate_desc()
2365 _iov->iov_base = (void __user *)(unsigned long) in translate_desc()
2366 (map->addr + addr - map->start); in translate_desc()
2372 if (ret == -EAGAIN) in translate_desc()
2379 * or -1U if we're at the end. */
2385 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT))) in next_desc()
2386 return -1U; in next_desc()
2389 next = vhost16_to_cpu(vq, READ_ONCE(desc->next)); in next_desc()
2401 u32 len = vhost32_to_cpu(vq, indirect->len); in get_indirect()
2411 return -EINVAL; in get_indirect()
2414 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect, in get_indirect()
2417 if (ret != -EAGAIN) in get_indirect()
2421 iov_iter_init(&from, ITER_SOURCE, vq->indirect, ret, len); in get_indirect()
2427 indirect->len); in get_indirect()
2428 return -E2BIG; in get_indirect()
2437 return -EINVAL; in get_indirect()
2441 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); in get_indirect()
2442 return -EINVAL; in get_indirect()
2446 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); in get_indirect()
2447 return -EINVAL; in get_indirect()
2457 iov_size - iov_count, access); in get_indirect()
2459 if (ret != -EAGAIN) in get_indirect()
2478 return -EINVAL; in get_indirect()
2482 } while ((i = next_desc(vq, &desc)) != -1); in get_indirect()
2491 * This function returns the descriptor number found, or vq->num (which is
2507 last_avail_idx = vq->last_avail_idx; in vhost_get_vq_desc()
2509 if (vq->avail_idx == vq->last_avail_idx) { in vhost_get_vq_desc()
2512 &vq->avail->idx); in vhost_get_vq_desc()
2513 return -EFAULT; in vhost_get_vq_desc()
2515 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); in vhost_get_vq_desc()
2517 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { in vhost_get_vq_desc()
2519 last_avail_idx, vq->avail_idx); in vhost_get_vq_desc()
2520 return -EFAULT; in vhost_get_vq_desc()
2526 if (vq->avail_idx == last_avail_idx) in vhost_get_vq_desc()
2527 return vq->num; in vhost_get_vq_desc()
2540 &vq->avail->ring[last_avail_idx % vq->num]); in vhost_get_vq_desc()
2541 return -EFAULT; in vhost_get_vq_desc()
2547 if (unlikely(head >= vq->num)) { in vhost_get_vq_desc()
2549 head, vq->num); in vhost_get_vq_desc()
2550 return -EINVAL; in vhost_get_vq_desc()
2561 if (unlikely(i >= vq->num)) { in vhost_get_vq_desc()
2563 i, vq->num, head); in vhost_get_vq_desc()
2564 return -EINVAL; in vhost_get_vq_desc()
2566 if (unlikely(++found > vq->num)) { in vhost_get_vq_desc()
2569 i, vq->num, head); in vhost_get_vq_desc()
2570 return -EINVAL; in vhost_get_vq_desc()
2575 i, vq->desc + i); in vhost_get_vq_desc()
2576 return -EFAULT; in vhost_get_vq_desc()
2583 if (ret != -EAGAIN) in vhost_get_vq_desc()
2597 iov_size - iov_count, access); in vhost_get_vq_desc()
2599 if (ret != -EAGAIN) in vhost_get_vq_desc()
2619 return -EINVAL; in vhost_get_vq_desc()
2623 } while ((i = next_desc(vq, &desc)) != -1); in vhost_get_vq_desc()
2626 vq->last_avail_idx++; in vhost_get_vq_desc()
2630 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); in vhost_get_vq_desc()
2638 vq->last_avail_idx -= n; in vhost_discard_vq_desc()
2663 start = vq->last_used_idx & (vq->num - 1); in __vhost_add_used_n()
2664 used = vq->used->ring + start; in __vhost_add_used_n()
2667 return -EFAULT; in __vhost_add_used_n()
2669 if (unlikely(vq->log_used)) { in __vhost_add_used_n()
2673 log_used(vq, ((void __user *)used - (void __user *)vq->used), in __vhost_add_used_n()
2676 old = vq->last_used_idx; in __vhost_add_used_n()
2677 new = (vq->last_used_idx += count); in __vhost_add_used_n()
2682 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) in __vhost_add_used_n()
2683 vq->signalled_used_valid = false; in __vhost_add_used_n()
2694 start = vq->last_used_idx & (vq->num - 1); in vhost_add_used_n()
2695 n = vq->num - start; in vhost_add_used_n()
2701 count -= n; in vhost_add_used_n()
2709 return -EFAULT; in vhost_add_used_n()
2711 if (unlikely(vq->log_used)) { in vhost_add_used_n()
2716 sizeof vq->used->idx); in vhost_add_used_n()
2717 if (vq->log_ctx) in vhost_add_used_n()
2718 eventfd_signal(vq->log_ctx); in vhost_add_used_n()
2735 unlikely(vq->avail_idx == vq->last_avail_idx)) in vhost_notify()
2746 old = vq->signalled_used; in vhost_notify()
2747 v = vq->signalled_used_valid; in vhost_notify()
2748 new = vq->signalled_used = vq->last_used_idx; in vhost_notify()
2749 vq->signalled_used_valid = true; in vhost_notify()
2765 if (vq->call_ctx.ctx && vhost_notify(dev, vq)) in vhost_signal()
2766 eventfd_signal(vq->call_ctx.ctx); in vhost_signal()
2780 /* multi-buffer version of vhost_add_used_and_signal */
2796 if (vq->avail_idx != vq->last_avail_idx) in vhost_vq_avail_empty()
2802 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); in vhost_vq_avail_empty()
2804 return vq->avail_idx == vq->last_avail_idx; in vhost_vq_avail_empty()
2814 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) in vhost_enable_notify()
2816 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; in vhost_enable_notify()
2821 &vq->used->flags, r); in vhost_enable_notify()
2838 &vq->avail->idx, r); in vhost_enable_notify()
2841 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); in vhost_enable_notify()
2843 return vq->avail_idx != vq->last_avail_idx; in vhost_enable_notify()
2852 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) in vhost_disable_notify()
2854 vq->used_flags |= VRING_USED_F_NO_NOTIFY; in vhost_disable_notify()
2859 &vq->used->flags, r); in vhost_disable_notify()
2872 node->vq = vq; in vhost_new_msg()
2873 node->msg.type = type; in vhost_new_msg()
2881 spin_lock(&dev->iotlb_lock); in vhost_enqueue_msg()
2882 list_add_tail(&node->node, head); in vhost_enqueue_msg()
2883 spin_unlock(&dev->iotlb_lock); in vhost_enqueue_msg()
2885 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); in vhost_enqueue_msg()
2894 spin_lock(&dev->iotlb_lock); in vhost_dequeue_msg()
2898 list_del(&node->node); in vhost_dequeue_msg()
2900 spin_unlock(&dev->iotlb_lock); in vhost_dequeue_msg()
2911 mutex_lock(&dev->mutex); in vhost_set_backend_features()
2912 for (i = 0; i < dev->nvqs; ++i) { in vhost_set_backend_features()
2913 vq = dev->vqs[i]; in vhost_set_backend_features()
2914 mutex_lock(&vq->mutex); in vhost_set_backend_features()
2915 vq->acked_backend_features = features; in vhost_set_backend_features()
2916 mutex_unlock(&vq->mutex); in vhost_set_backend_features()
2918 mutex_unlock(&dev->mutex); in vhost_set_backend_features()