Lines Matching +full:iommu +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0-only
16 #include <linux/dma-map-ops.h>
32 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
58 struct vdpasim *vdpasim = mm_work->vdpasim; in vdpasim_mm_work_fn()
60 mm_work->ret = 0; in vdpasim_mm_work_fn()
63 vdpasim->mm_bound = mm_work->mm_to_bind; in vdpasim_mm_work_fn()
69 struct kthread_work *work = &mm_work->work; in vdpasim_worker_change_mm_sync()
72 kthread_queue_work(vdpasim->worker, work); in vdpasim_worker_change_mm_sync()
87 if (!vq->cb) in vdpasim_vq_notify()
90 vq->cb(vq->private); in vdpasim_vq_notify()
95 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_queue_ready()
96 uint16_t last_avail_idx = vq->vring.last_avail_idx; in vdpasim_queue_ready()
98 (uintptr_t)vq->desc_addr; in vdpasim_queue_ready()
100 (uintptr_t)vq->driver_addr; in vdpasim_queue_ready()
102 (uintptr_t)vq->device_addr; in vdpasim_queue_ready()
104 if (use_va && vdpasim->mm_bound) { in vdpasim_queue_ready()
105 vringh_init_iotlb_va(&vq->vring, vdpasim->features, vq->num, in vdpasim_queue_ready()
108 vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, in vdpasim_queue_ready()
112 vq->vring.last_avail_idx = last_avail_idx; in vdpasim_queue_ready()
117 * the same at vq start. This is how vhost-user works in a in vdpasim_queue_ready()
123 vq->vring.last_used_idx = last_avail_idx; in vdpasim_queue_ready()
124 vq->vring.notify = vdpasim_vq_notify; in vdpasim_queue_ready()
130 vq->ready = false; in vdpasim_vq_reset()
131 vq->desc_addr = 0; in vdpasim_vq_reset()
132 vq->driver_addr = 0; in vdpasim_vq_reset()
133 vq->device_addr = 0; in vdpasim_vq_reset()
134 vq->cb = NULL; in vdpasim_vq_reset()
135 vq->private = NULL; in vdpasim_vq_reset()
136 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features, in vdpasim_vq_reset()
139 vq->vring.notify = NULL; in vdpasim_vq_reset()
146 spin_lock(&vdpasim->iommu_lock); in vdpasim_do_reset()
148 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) { in vdpasim_do_reset()
149 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]); in vdpasim_do_reset()
150 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0], in vdpasim_do_reset()
151 &vdpasim->iommu_lock); in vdpasim_do_reset()
155 for (i = 0; i < vdpasim->dev_attr.nas; i++) { in vdpasim_do_reset()
156 vhost_iotlb_reset(&vdpasim->iommu[i]); in vdpasim_do_reset()
157 vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX, in vdpasim_do_reset()
159 vdpasim->iommu_pt[i] = true; in vdpasim_do_reset()
163 vdpasim->running = true; in vdpasim_do_reset()
164 spin_unlock(&vdpasim->iommu_lock); in vdpasim_do_reset()
166 vdpasim->features = 0; in vdpasim_do_reset()
167 vdpasim->status = 0; in vdpasim_do_reset()
168 ++vdpasim->generation; in vdpasim_do_reset()
177 struct mm_struct *mm = vdpasim->mm_bound; in vdpasim_work_fn()
185 vdpasim->dev_attr.work_fn(vdpasim); in vdpasim_work_fn()
200 int i, ret = -ENOMEM; in vdpasim_create()
202 if (!dev_attr->alloc_size) in vdpasim_create()
203 return ERR_PTR(-EINVAL); in vdpasim_create()
205 if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { in vdpasim_create()
206 if (config->device_features & in vdpasim_create()
207 ~dev_attr->supported_features) in vdpasim_create()
208 return ERR_PTR(-EINVAL); in vdpasim_create()
209 dev_attr->supported_features = in vdpasim_create()
210 config->device_features; in vdpasim_create()
219 dev_attr->ngroups, dev_attr->nas, in vdpasim_create()
220 dev_attr->alloc_size, in vdpasim_create()
221 dev_attr->name, use_va); in vdpasim_create()
228 vdpasim->dev_attr = *dev_attr; in vdpasim_create()
229 dev = &vdpasim->vdpa.dev; in vdpasim_create()
231 kthread_init_work(&vdpasim->work, vdpasim_work_fn); in vdpasim_create()
232 vdpasim->worker = kthread_create_worker(0, "vDPA sim worker: %s", in vdpasim_create()
233 dev_attr->name); in vdpasim_create()
234 if (IS_ERR(vdpasim->worker)) in vdpasim_create()
237 mutex_init(&vdpasim->mutex); in vdpasim_create()
238 spin_lock_init(&vdpasim->iommu_lock); in vdpasim_create()
240 dev->dma_mask = &dev->coherent_dma_mask; in vdpasim_create()
243 vdpasim->vdpa.mdev = dev_attr->mgmt_dev; in vdpasim_create()
245 vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL); in vdpasim_create()
246 if (!vdpasim->config) in vdpasim_create()
249 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue), in vdpasim_create()
251 if (!vdpasim->vqs) in vdpasim_create()
254 vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas, in vdpasim_create()
255 sizeof(*vdpasim->iommu), GFP_KERNEL); in vdpasim_create()
256 if (!vdpasim->iommu) in vdpasim_create()
259 vdpasim->iommu_pt = kmalloc_array(vdpasim->dev_attr.nas, in vdpasim_create()
260 sizeof(*vdpasim->iommu_pt), GFP_KERNEL); in vdpasim_create()
261 if (!vdpasim->iommu_pt) in vdpasim_create()
264 for (i = 0; i < vdpasim->dev_attr.nas; i++) { in vdpasim_create()
265 vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0); in vdpasim_create()
266 vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX, 0, in vdpasim_create()
268 vdpasim->iommu_pt[i] = true; in vdpasim_create()
271 for (i = 0; i < dev_attr->nvqs; i++) in vdpasim_create()
272 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0], in vdpasim_create()
273 &vdpasim->iommu_lock); in vdpasim_create()
275 vdpasim->vdpa.dma_dev = dev; in vdpasim_create()
288 kthread_queue_work(vdpasim->worker, &vdpasim->work); in vdpasim_schedule_work()
297 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_address()
299 vq->desc_addr = desc_area; in vdpasim_set_vq_address()
300 vq->driver_addr = driver_area; in vdpasim_set_vq_address()
301 vq->device_addr = device_area; in vdpasim_set_vq_address()
309 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_num()
311 vq->num = num; in vdpasim_set_vq_num()
317 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_kick_vq()
319 if (!vdpasim->running && in vdpasim_kick_vq()
320 (vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) { in vdpasim_kick_vq()
321 vdpasim->pending_kick = true; in vdpasim_kick_vq()
325 if (vq->ready) in vdpasim_kick_vq()
333 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_cb()
335 vq->cb = cb->callback; in vdpasim_set_vq_cb()
336 vq->private = cb->private; in vdpasim_set_vq_cb()
342 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_ready()
345 mutex_lock(&vdpasim->mutex); in vdpasim_set_vq_ready()
346 old_ready = vq->ready; in vdpasim_set_vq_ready()
347 vq->ready = ready; in vdpasim_set_vq_ready()
348 if (vq->ready && !old_ready) { in vdpasim_set_vq_ready()
351 mutex_unlock(&vdpasim->mutex); in vdpasim_set_vq_ready()
357 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_get_vq_ready()
359 return vq->ready; in vdpasim_get_vq_ready()
366 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_state()
367 struct vringh *vrh = &vq->vring; in vdpasim_set_vq_state()
369 mutex_lock(&vdpasim->mutex); in vdpasim_set_vq_state()
370 vrh->last_avail_idx = state->split.avail_index; in vdpasim_set_vq_state()
371 mutex_unlock(&vdpasim->mutex); in vdpasim_set_vq_state()
380 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_get_vq_state()
381 struct vringh *vrh = &vq->vring; in vdpasim_get_vq_state()
383 state->split.avail_index = vrh->last_avail_idx; in vdpasim_get_vq_state()
393 if (vdpasim->dev_attr.get_stats) in vdpasim_get_vq_stats()
394 return vdpasim->dev_attr.get_stats(vdpasim, idx, in vdpasim_get_vq_stats()
396 return -EOPNOTSUPP; in vdpasim_get_vq_stats()
417 return vdpasim->dev_attr.supported_features; in vdpasim_get_device_features()
431 return -EINVAL; in vdpasim_set_driver_features()
433 vdpasim->features = features & vdpasim->dev_attr.supported_features; in vdpasim_set_driver_features()
442 return vdpasim->features; in vdpasim_get_driver_features()
460 return vdpasim->dev_attr.id; in vdpasim_get_device_id()
473 mutex_lock(&vdpasim->mutex); in vdpasim_get_status()
474 status = vdpasim->status; in vdpasim_get_status()
475 mutex_unlock(&vdpasim->mutex); in vdpasim_get_status()
484 mutex_lock(&vdpasim->mutex); in vdpasim_set_status()
485 vdpasim->status = status; in vdpasim_set_status()
486 mutex_unlock(&vdpasim->mutex); in vdpasim_set_status()
493 mutex_lock(&vdpasim->mutex); in vdpasim_compat_reset()
494 vdpasim->status = 0; in vdpasim_compat_reset()
496 mutex_unlock(&vdpasim->mutex); in vdpasim_compat_reset()
510 mutex_lock(&vdpasim->mutex); in vdpasim_suspend()
511 vdpasim->running = false; in vdpasim_suspend()
512 mutex_unlock(&vdpasim->mutex); in vdpasim_suspend()
522 mutex_lock(&vdpasim->mutex); in vdpasim_resume()
523 vdpasim->running = true; in vdpasim_resume()
525 if (vdpasim->pending_kick) { in vdpasim_resume()
527 for (i = 0; i < vdpasim->dev_attr.nvqs; ++i) in vdpasim_resume()
530 vdpasim->pending_kick = false; in vdpasim_resume()
533 mutex_unlock(&vdpasim->mutex); in vdpasim_resume()
542 return vdpasim->dev_attr.config_size; in vdpasim_get_config_size()
550 if (offset + len > vdpasim->dev_attr.config_size) in vdpasim_get_config()
553 if (vdpasim->dev_attr.get_config) in vdpasim_get_config()
554 vdpasim->dev_attr.get_config(vdpasim, vdpasim->config); in vdpasim_get_config()
556 memcpy(buf, vdpasim->config + offset, len); in vdpasim_get_config()
564 if (offset + len > vdpasim->dev_attr.config_size) in vdpasim_set_config()
567 memcpy(vdpasim->config + offset, buf, len); in vdpasim_set_config()
569 if (vdpasim->dev_attr.set_config) in vdpasim_set_config()
570 vdpasim->dev_attr.set_config(vdpasim, vdpasim->config); in vdpasim_set_config()
577 return vdpasim->generation; in vdpasim_get_generation()
594 struct vhost_iotlb *iommu; in vdpasim_set_group_asid() local
597 if (group > vdpasim->dev_attr.ngroups) in vdpasim_set_group_asid()
598 return -EINVAL; in vdpasim_set_group_asid()
600 if (asid >= vdpasim->dev_attr.nas) in vdpasim_set_group_asid()
601 return -EINVAL; in vdpasim_set_group_asid()
603 iommu = &vdpasim->iommu[asid]; in vdpasim_set_group_asid()
605 mutex_lock(&vdpasim->mutex); in vdpasim_set_group_asid()
607 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) in vdpasim_set_group_asid()
609 vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu, in vdpasim_set_group_asid()
610 &vdpasim->iommu_lock); in vdpasim_set_group_asid()
612 mutex_unlock(&vdpasim->mutex); in vdpasim_set_group_asid()
621 struct vhost_iotlb_map *map; in vdpasim_set_map() local
622 struct vhost_iotlb *iommu; in vdpasim_set_map() local
623 u64 start = 0ULL, last = 0ULL - 1; in vdpasim_set_map()
626 if (asid >= vdpasim->dev_attr.nas) in vdpasim_set_map()
627 return -EINVAL; in vdpasim_set_map()
629 spin_lock(&vdpasim->iommu_lock); in vdpasim_set_map()
631 iommu = &vdpasim->iommu[asid]; in vdpasim_set_map()
632 vhost_iotlb_reset(iommu); in vdpasim_set_map()
633 vdpasim->iommu_pt[asid] = false; in vdpasim_set_map()
635 for (map = vhost_iotlb_itree_first(iotlb, start, last); map; in vdpasim_set_map()
636 map = vhost_iotlb_itree_next(map, start, last)) { in vdpasim_set_map()
637 ret = vhost_iotlb_add_range(iommu, map->start, in vdpasim_set_map()
638 map->last, map->addr, map->perm); in vdpasim_set_map()
642 spin_unlock(&vdpasim->iommu_lock); in vdpasim_set_map()
646 vhost_iotlb_reset(iommu); in vdpasim_set_map()
647 spin_unlock(&vdpasim->iommu_lock); in vdpasim_set_map()
655 if (asid >= vdpasim->dev_attr.nas) in vdpasim_reset_map()
656 return -EINVAL; in vdpasim_reset_map()
658 spin_lock(&vdpasim->iommu_lock); in vdpasim_reset_map()
659 if (vdpasim->iommu_pt[asid]) in vdpasim_reset_map()
661 vhost_iotlb_reset(&vdpasim->iommu[asid]); in vdpasim_reset_map()
662 vhost_iotlb_add_range(&vdpasim->iommu[asid], 0, ULONG_MAX, in vdpasim_reset_map()
664 vdpasim->iommu_pt[asid] = true; in vdpasim_reset_map()
666 spin_unlock(&vdpasim->iommu_lock); in vdpasim_reset_map()
701 if (asid >= vdpasim->dev_attr.nas) in vdpasim_dma_map()
702 return -EINVAL; in vdpasim_dma_map()
704 spin_lock(&vdpasim->iommu_lock); in vdpasim_dma_map()
705 if (vdpasim->iommu_pt[asid]) { in vdpasim_dma_map()
706 vhost_iotlb_reset(&vdpasim->iommu[asid]); in vdpasim_dma_map()
707 vdpasim->iommu_pt[asid] = false; in vdpasim_dma_map()
709 ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova, in vdpasim_dma_map()
710 iova + size - 1, pa, perm, opaque); in vdpasim_dma_map()
711 spin_unlock(&vdpasim->iommu_lock); in vdpasim_dma_map()
721 if (asid >= vdpasim->dev_attr.nas) in vdpasim_dma_unmap()
722 return -EINVAL; in vdpasim_dma_unmap()
724 if (vdpasim->iommu_pt[asid]) { in vdpasim_dma_unmap()
725 vhost_iotlb_reset(&vdpasim->iommu[asid]); in vdpasim_dma_unmap()
726 vdpasim->iommu_pt[asid] = false; in vdpasim_dma_unmap()
729 spin_lock(&vdpasim->iommu_lock); in vdpasim_dma_unmap()
730 vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1); in vdpasim_dma_unmap()
731 spin_unlock(&vdpasim->iommu_lock); in vdpasim_dma_unmap()
741 kthread_cancel_work_sync(&vdpasim->work); in vdpasim_free()
742 kthread_destroy_worker(vdpasim->worker); in vdpasim_free()
744 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) { in vdpasim_free()
745 vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov); in vdpasim_free()
746 vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov); in vdpasim_free()
749 vdpasim->dev_attr.free(vdpasim); in vdpasim_free()
751 for (i = 0; i < vdpasim->dev_attr.nas; i++) in vdpasim_free()
752 vhost_iotlb_reset(&vdpasim->iommu[i]); in vdpasim_free()
753 kfree(vdpasim->iommu); in vdpasim_free()
754 kfree(vdpasim->iommu_pt); in vdpasim_free()
755 kfree(vdpasim->vqs); in vdpasim_free()
756 kfree(vdpasim->config); in vdpasim_free()