1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VDPA device simulator core.
4 *
5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
7 *
8 */
9
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/slab.h>
16 #include <linux/dma-map-ops.h>
17 #include <linux/vringh.h>
18 #include <linux/vdpa.h>
19 #include <linux/vhost_iotlb.h>
20 #include <uapi/linux/vdpa.h>
21 #include <uapi/linux/vhost_types.h>
22
23 #include "vdpa_sim.h"
24
25 #define DRV_VERSION "0.1"
26 #define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
27 #define DRV_DESC "vDPA Device Simulator core"
28 #define DRV_LICENSE "GPL v2"
29
30 static int batch_mapping = 1;
31 module_param(batch_mapping, int, 0444);
32 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
33
34 static int max_iotlb_entries = 2048;
35 module_param(max_iotlb_entries, int, 0444);
36 MODULE_PARM_DESC(max_iotlb_entries,
37 "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
38
39 static bool use_va = true;
40 module_param(use_va, bool, 0444);
41 MODULE_PARM_DESC(use_va, "Enable/disable the device's ability to use VA");
42
43 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE
44 #define VDPASIM_QUEUE_MAX 256
45 #define VDPASIM_VENDOR_ID 0
46
47 struct vdpasim_mm_work {
48 struct kthread_work work;
49 struct vdpasim *vdpasim;
50 struct mm_struct *mm_to_bind;
51 int ret;
52 };
53
vdpasim_mm_work_fn(struct kthread_work * work)54 static void vdpasim_mm_work_fn(struct kthread_work *work)
55 {
56 struct vdpasim_mm_work *mm_work =
57 container_of(work, struct vdpasim_mm_work, work);
58 struct vdpasim *vdpasim = mm_work->vdpasim;
59
60 mm_work->ret = 0;
61
62 //TODO: should we attach the cgroup of the mm owner?
63 vdpasim->mm_bound = mm_work->mm_to_bind;
64 }
65
vdpasim_worker_change_mm_sync(struct vdpasim * vdpasim,struct vdpasim_mm_work * mm_work)66 static void vdpasim_worker_change_mm_sync(struct vdpasim *vdpasim,
67 struct vdpasim_mm_work *mm_work)
68 {
69 struct kthread_work *work = &mm_work->work;
70
71 kthread_init_work(work, vdpasim_mm_work_fn);
72 kthread_queue_work(vdpasim->worker, work);
73
74 kthread_flush_work(work);
75 }
76
vdpa_to_sim(struct vdpa_device * vdpa)77 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
78 {
79 return container_of(vdpa, struct vdpasim, vdpa);
80 }
81
vdpasim_vq_notify(struct vringh * vring)82 static void vdpasim_vq_notify(struct vringh *vring)
83 {
84 struct vdpasim_virtqueue *vq =
85 container_of(vring, struct vdpasim_virtqueue, vring);
86
87 if (!vq->cb)
88 return;
89
90 vq->cb(vq->private);
91 }
92
vdpasim_queue_ready(struct vdpasim * vdpasim,unsigned int idx)93 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
94 {
95 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
96 uint16_t last_avail_idx = vq->vring.last_avail_idx;
97 struct vring_desc *desc = (struct vring_desc *)
98 (uintptr_t)vq->desc_addr;
99 struct vring_avail *avail = (struct vring_avail *)
100 (uintptr_t)vq->driver_addr;
101 struct vring_used *used = (struct vring_used *)
102 (uintptr_t)vq->device_addr;
103
104 if (use_va && vdpasim->mm_bound) {
105 vringh_init_iotlb_va(&vq->vring, vdpasim->features, vq->num,
106 true, desc, avail, used);
107 } else {
108 vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num,
109 true, desc, avail, used);
110 }
111
112 vq->vring.last_avail_idx = last_avail_idx;
113
114 /*
115 * Since vdpa_sim does not support receive inflight descriptors as a
116 * destination of a migration, let's set both avail_idx and used_idx
117 * the same at vq start. This is how vhost-user works in a
118 * VHOST_SET_VRING_BASE call.
119 *
120 * Although the simple fix is to set last_used_idx at
121 * vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready.
122 */
123 vq->vring.last_used_idx = last_avail_idx;
124 vq->vring.notify = vdpasim_vq_notify;
125 }
126
vdpasim_vq_reset(struct vdpasim * vdpasim,struct vdpasim_virtqueue * vq)127 static void vdpasim_vq_reset(struct vdpasim *vdpasim,
128 struct vdpasim_virtqueue *vq)
129 {
130 vq->ready = false;
131 vq->desc_addr = 0;
132 vq->driver_addr = 0;
133 vq->device_addr = 0;
134 vq->cb = NULL;
135 vq->private = NULL;
136 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
137 VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
138
139 vq->vring.notify = NULL;
140 }
141
vdpasim_do_reset(struct vdpasim * vdpasim,u32 flags)142 static void vdpasim_do_reset(struct vdpasim *vdpasim, u32 flags)
143 {
144 int i;
145
146 spin_lock(&vdpasim->iommu_lock);
147
148 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
149 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
150 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
151 &vdpasim->iommu_lock);
152 }
153
154 if (flags & VDPA_RESET_F_CLEAN_MAP) {
155 for (i = 0; i < vdpasim->dev_attr.nas; i++) {
156 vhost_iotlb_reset(&vdpasim->iommu[i]);
157 vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX,
158 0, VHOST_MAP_RW);
159 vdpasim->iommu_pt[i] = true;
160 }
161 }
162
163 vdpasim->running = true;
164 spin_unlock(&vdpasim->iommu_lock);
165
166 vdpasim->features = 0;
167 vdpasim->status = 0;
168 ++vdpasim->generation;
169 }
170
171 static const struct vdpa_config_ops vdpasim_config_ops;
172 static const struct vdpa_config_ops vdpasim_batch_config_ops;
173
vdpasim_work_fn(struct kthread_work * work)174 static void vdpasim_work_fn(struct kthread_work *work)
175 {
176 struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
177 struct mm_struct *mm = vdpasim->mm_bound;
178
179 if (use_va && mm) {
180 if (!mmget_not_zero(mm))
181 return;
182 kthread_use_mm(mm);
183 }
184
185 vdpasim->dev_attr.work_fn(vdpasim);
186
187 if (use_va && mm) {
188 kthread_unuse_mm(mm);
189 mmput(mm);
190 }
191 }
192
vdpasim_create(struct vdpasim_dev_attr * dev_attr,const struct vdpa_dev_set_config * config)193 struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
194 const struct vdpa_dev_set_config *config)
195 {
196 const struct vdpa_config_ops *ops;
197 struct vdpa_device *vdpa;
198 struct vdpasim *vdpasim;
199 struct device *dev;
200 int i, ret = -ENOMEM;
201
202 if (!dev_attr->alloc_size)
203 return ERR_PTR(-EINVAL);
204
205 if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
206 if (config->device_features &
207 ~dev_attr->supported_features)
208 return ERR_PTR(-EINVAL);
209 dev_attr->supported_features =
210 config->device_features;
211 }
212
213 if (batch_mapping)
214 ops = &vdpasim_batch_config_ops;
215 else
216 ops = &vdpasim_config_ops;
217
218 vdpa = __vdpa_alloc_device(NULL, ops,
219 dev_attr->ngroups, dev_attr->nas,
220 dev_attr->alloc_size,
221 dev_attr->name, use_va);
222 if (IS_ERR(vdpa)) {
223 ret = PTR_ERR(vdpa);
224 goto err_alloc;
225 }
226
227 vdpasim = vdpa_to_sim(vdpa);
228 vdpasim->dev_attr = *dev_attr;
229 dev = &vdpasim->vdpa.dev;
230
231 kthread_init_work(&vdpasim->work, vdpasim_work_fn);
232 vdpasim->worker = kthread_create_worker(0, "vDPA sim worker: %s",
233 dev_attr->name);
234 if (IS_ERR(vdpasim->worker))
235 goto err_iommu;
236
237 mutex_init(&vdpasim->mutex);
238 spin_lock_init(&vdpasim->iommu_lock);
239
240 dev->dma_mask = &dev->coherent_dma_mask;
241 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
242 goto err_iommu;
243 vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
244
245 vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
246 if (!vdpasim->config)
247 goto err_iommu;
248
249 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
250 GFP_KERNEL);
251 if (!vdpasim->vqs)
252 goto err_iommu;
253
254 vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas,
255 sizeof(*vdpasim->iommu), GFP_KERNEL);
256 if (!vdpasim->iommu)
257 goto err_iommu;
258
259 vdpasim->iommu_pt = kmalloc_array(vdpasim->dev_attr.nas,
260 sizeof(*vdpasim->iommu_pt), GFP_KERNEL);
261 if (!vdpasim->iommu_pt)
262 goto err_iommu;
263
264 for (i = 0; i < vdpasim->dev_attr.nas; i++) {
265 vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
266 vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX, 0,
267 VHOST_MAP_RW);
268 vdpasim->iommu_pt[i] = true;
269 }
270
271 for (i = 0; i < dev_attr->nvqs; i++)
272 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
273 &vdpasim->iommu_lock);
274
275 vdpasim->vdpa.dma_dev = dev;
276
277 return vdpasim;
278
279 err_iommu:
280 put_device(dev);
281 err_alloc:
282 return ERR_PTR(ret);
283 }
284 EXPORT_SYMBOL_GPL(vdpasim_create);
285
vdpasim_schedule_work(struct vdpasim * vdpasim)286 void vdpasim_schedule_work(struct vdpasim *vdpasim)
287 {
288 kthread_queue_work(vdpasim->worker, &vdpasim->work);
289 }
290 EXPORT_SYMBOL_GPL(vdpasim_schedule_work);
291
vdpasim_set_vq_address(struct vdpa_device * vdpa,u16 idx,u64 desc_area,u64 driver_area,u64 device_area)292 static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
293 u64 desc_area, u64 driver_area,
294 u64 device_area)
295 {
296 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
297 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
298
299 vq->desc_addr = desc_area;
300 vq->driver_addr = driver_area;
301 vq->device_addr = device_area;
302
303 return 0;
304 }
305
vdpasim_set_vq_num(struct vdpa_device * vdpa,u16 idx,u32 num)306 static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
307 {
308 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
309 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
310
311 vq->num = num;
312 }
313
vdpasim_kick_vq(struct vdpa_device * vdpa,u16 idx)314 static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
315 {
316 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
317 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
318
319 if (!vdpasim->running &&
320 (vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
321 vdpasim->pending_kick = true;
322 return;
323 }
324
325 if (vq->ready)
326 vdpasim_schedule_work(vdpasim);
327 }
328
vdpasim_set_vq_cb(struct vdpa_device * vdpa,u16 idx,struct vdpa_callback * cb)329 static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
330 struct vdpa_callback *cb)
331 {
332 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
333 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
334
335 vq->cb = cb->callback;
336 vq->private = cb->private;
337 }
338
vdpasim_set_vq_ready(struct vdpa_device * vdpa,u16 idx,bool ready)339 static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
340 {
341 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
342 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
343 bool old_ready;
344
345 mutex_lock(&vdpasim->mutex);
346 old_ready = vq->ready;
347 vq->ready = ready;
348 if (vq->ready && !old_ready) {
349 vdpasim_queue_ready(vdpasim, idx);
350 }
351 mutex_unlock(&vdpasim->mutex);
352 }
353
vdpasim_get_vq_ready(struct vdpa_device * vdpa,u16 idx)354 static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
355 {
356 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
357 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
358
359 return vq->ready;
360 }
361
vdpasim_set_vq_state(struct vdpa_device * vdpa,u16 idx,const struct vdpa_vq_state * state)362 static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
363 const struct vdpa_vq_state *state)
364 {
365 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
366 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
367 struct vringh *vrh = &vq->vring;
368
369 mutex_lock(&vdpasim->mutex);
370 vrh->last_avail_idx = state->split.avail_index;
371 mutex_unlock(&vdpasim->mutex);
372
373 return 0;
374 }
375
vdpasim_get_vq_state(struct vdpa_device * vdpa,u16 idx,struct vdpa_vq_state * state)376 static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
377 struct vdpa_vq_state *state)
378 {
379 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
380 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
381 struct vringh *vrh = &vq->vring;
382
383 state->split.avail_index = vrh->last_avail_idx;
384 return 0;
385 }
386
vdpasim_get_vq_stats(struct vdpa_device * vdpa,u16 idx,struct sk_buff * msg,struct netlink_ext_ack * extack)387 static int vdpasim_get_vq_stats(struct vdpa_device *vdpa, u16 idx,
388 struct sk_buff *msg,
389 struct netlink_ext_ack *extack)
390 {
391 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
392
393 if (vdpasim->dev_attr.get_stats)
394 return vdpasim->dev_attr.get_stats(vdpasim, idx,
395 msg, extack);
396 return -EOPNOTSUPP;
397 }
398
vdpasim_get_vq_align(struct vdpa_device * vdpa)399 static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
400 {
401 return VDPASIM_QUEUE_ALIGN;
402 }
403
vdpasim_get_vq_group(struct vdpa_device * vdpa,u16 idx)404 static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
405 {
406 /* RX and TX belongs to group 0, CVQ belongs to group 1 */
407 if (idx == 2)
408 return 1;
409 else
410 return 0;
411 }
412
vdpasim_get_device_features(struct vdpa_device * vdpa)413 static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
414 {
415 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
416
417 return vdpasim->dev_attr.supported_features;
418 }
419
vdpasim_get_backend_features(const struct vdpa_device * vdpa)420 static u64 vdpasim_get_backend_features(const struct vdpa_device *vdpa)
421 {
422 return BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK);
423 }
424
vdpasim_set_driver_features(struct vdpa_device * vdpa,u64 features)425 static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
426 {
427 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
428
429 /* DMA mapping must be done by driver */
430 if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
431 return -EINVAL;
432
433 vdpasim->features = features & vdpasim->dev_attr.supported_features;
434
435 return 0;
436 }
437
vdpasim_get_driver_features(struct vdpa_device * vdpa)438 static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
439 {
440 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
441
442 return vdpasim->features;
443 }
444
vdpasim_set_config_cb(struct vdpa_device * vdpa,struct vdpa_callback * cb)445 static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
446 struct vdpa_callback *cb)
447 {
448 /* We don't support config interrupt */
449 }
450
vdpasim_get_vq_num_max(struct vdpa_device * vdpa)451 static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
452 {
453 return VDPASIM_QUEUE_MAX;
454 }
455
vdpasim_get_device_id(struct vdpa_device * vdpa)456 static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
457 {
458 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
459
460 return vdpasim->dev_attr.id;
461 }
462
vdpasim_get_vendor_id(struct vdpa_device * vdpa)463 static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
464 {
465 return VDPASIM_VENDOR_ID;
466 }
467
vdpasim_get_status(struct vdpa_device * vdpa)468 static u8 vdpasim_get_status(struct vdpa_device *vdpa)
469 {
470 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
471 u8 status;
472
473 mutex_lock(&vdpasim->mutex);
474 status = vdpasim->status;
475 mutex_unlock(&vdpasim->mutex);
476
477 return status;
478 }
479
vdpasim_set_status(struct vdpa_device * vdpa,u8 status)480 static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
481 {
482 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
483
484 mutex_lock(&vdpasim->mutex);
485 vdpasim->status = status;
486 mutex_unlock(&vdpasim->mutex);
487 }
488
vdpasim_compat_reset(struct vdpa_device * vdpa,u32 flags)489 static int vdpasim_compat_reset(struct vdpa_device *vdpa, u32 flags)
490 {
491 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
492
493 mutex_lock(&vdpasim->mutex);
494 vdpasim->status = 0;
495 vdpasim_do_reset(vdpasim, flags);
496 mutex_unlock(&vdpasim->mutex);
497
498 return 0;
499 }
500
vdpasim_reset(struct vdpa_device * vdpa)501 static int vdpasim_reset(struct vdpa_device *vdpa)
502 {
503 return vdpasim_compat_reset(vdpa, 0);
504 }
505
vdpasim_suspend(struct vdpa_device * vdpa)506 static int vdpasim_suspend(struct vdpa_device *vdpa)
507 {
508 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
509
510 mutex_lock(&vdpasim->mutex);
511 vdpasim->running = false;
512 mutex_unlock(&vdpasim->mutex);
513
514 return 0;
515 }
516
vdpasim_resume(struct vdpa_device * vdpa)517 static int vdpasim_resume(struct vdpa_device *vdpa)
518 {
519 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
520 int i;
521
522 mutex_lock(&vdpasim->mutex);
523 vdpasim->running = true;
524
525 if (vdpasim->pending_kick) {
526 /* Process pending descriptors */
527 for (i = 0; i < vdpasim->dev_attr.nvqs; ++i)
528 vdpasim_kick_vq(vdpa, i);
529
530 vdpasim->pending_kick = false;
531 }
532
533 mutex_unlock(&vdpasim->mutex);
534
535 return 0;
536 }
537
vdpasim_get_config_size(struct vdpa_device * vdpa)538 static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
539 {
540 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
541
542 return vdpasim->dev_attr.config_size;
543 }
544
vdpasim_get_config(struct vdpa_device * vdpa,unsigned int offset,void * buf,unsigned int len)545 static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
546 void *buf, unsigned int len)
547 {
548 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
549
550 if (offset + len > vdpasim->dev_attr.config_size)
551 return;
552
553 if (vdpasim->dev_attr.get_config)
554 vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
555
556 memcpy(buf, vdpasim->config + offset, len);
557 }
558
vdpasim_set_config(struct vdpa_device * vdpa,unsigned int offset,const void * buf,unsigned int len)559 static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
560 const void *buf, unsigned int len)
561 {
562 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
563
564 if (offset + len > vdpasim->dev_attr.config_size)
565 return;
566
567 memcpy(vdpasim->config + offset, buf, len);
568
569 if (vdpasim->dev_attr.set_config)
570 vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
571 }
572
vdpasim_get_generation(struct vdpa_device * vdpa)573 static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
574 {
575 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
576
577 return vdpasim->generation;
578 }
579
vdpasim_get_iova_range(struct vdpa_device * vdpa)580 static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
581 {
582 struct vdpa_iova_range range = {
583 .first = 0ULL,
584 .last = ULLONG_MAX,
585 };
586
587 return range;
588 }
589
vdpasim_set_group_asid(struct vdpa_device * vdpa,unsigned int group,unsigned int asid)590 static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
591 unsigned int asid)
592 {
593 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
594 struct vhost_iotlb *iommu;
595 int i;
596
597 if (group > vdpasim->dev_attr.ngroups)
598 return -EINVAL;
599
600 if (asid >= vdpasim->dev_attr.nas)
601 return -EINVAL;
602
603 iommu = &vdpasim->iommu[asid];
604
605 mutex_lock(&vdpasim->mutex);
606
607 for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
608 if (vdpasim_get_vq_group(vdpa, i) == group)
609 vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
610 &vdpasim->iommu_lock);
611
612 mutex_unlock(&vdpasim->mutex);
613
614 return 0;
615 }
616
vdpasim_set_map(struct vdpa_device * vdpa,unsigned int asid,struct vhost_iotlb * iotlb)617 static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
618 struct vhost_iotlb *iotlb)
619 {
620 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
621 struct vhost_iotlb_map *map;
622 struct vhost_iotlb *iommu;
623 u64 start = 0ULL, last = 0ULL - 1;
624 int ret;
625
626 if (asid >= vdpasim->dev_attr.nas)
627 return -EINVAL;
628
629 spin_lock(&vdpasim->iommu_lock);
630
631 iommu = &vdpasim->iommu[asid];
632 vhost_iotlb_reset(iommu);
633 vdpasim->iommu_pt[asid] = false;
634
635 for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
636 map = vhost_iotlb_itree_next(map, start, last)) {
637 ret = vhost_iotlb_add_range(iommu, map->start,
638 map->last, map->addr, map->perm);
639 if (ret)
640 goto err;
641 }
642 spin_unlock(&vdpasim->iommu_lock);
643 return 0;
644
645 err:
646 vhost_iotlb_reset(iommu);
647 spin_unlock(&vdpasim->iommu_lock);
648 return ret;
649 }
650
vdpasim_reset_map(struct vdpa_device * vdpa,unsigned int asid)651 static int vdpasim_reset_map(struct vdpa_device *vdpa, unsigned int asid)
652 {
653 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
654
655 if (asid >= vdpasim->dev_attr.nas)
656 return -EINVAL;
657
658 spin_lock(&vdpasim->iommu_lock);
659 if (vdpasim->iommu_pt[asid])
660 goto out;
661 vhost_iotlb_reset(&vdpasim->iommu[asid]);
662 vhost_iotlb_add_range(&vdpasim->iommu[asid], 0, ULONG_MAX,
663 0, VHOST_MAP_RW);
664 vdpasim->iommu_pt[asid] = true;
665 out:
666 spin_unlock(&vdpasim->iommu_lock);
667 return 0;
668 }
669
vdpasim_bind_mm(struct vdpa_device * vdpa,struct mm_struct * mm)670 static int vdpasim_bind_mm(struct vdpa_device *vdpa, struct mm_struct *mm)
671 {
672 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
673 struct vdpasim_mm_work mm_work;
674
675 mm_work.vdpasim = vdpasim;
676 mm_work.mm_to_bind = mm;
677
678 vdpasim_worker_change_mm_sync(vdpasim, &mm_work);
679
680 return mm_work.ret;
681 }
682
vdpasim_unbind_mm(struct vdpa_device * vdpa)683 static void vdpasim_unbind_mm(struct vdpa_device *vdpa)
684 {
685 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
686 struct vdpasim_mm_work mm_work;
687
688 mm_work.vdpasim = vdpasim;
689 mm_work.mm_to_bind = NULL;
690
691 vdpasim_worker_change_mm_sync(vdpasim, &mm_work);
692 }
693
vdpasim_dma_map(struct vdpa_device * vdpa,unsigned int asid,u64 iova,u64 size,u64 pa,u32 perm,void * opaque)694 static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
695 u64 iova, u64 size,
696 u64 pa, u32 perm, void *opaque)
697 {
698 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
699 int ret;
700
701 if (asid >= vdpasim->dev_attr.nas)
702 return -EINVAL;
703
704 spin_lock(&vdpasim->iommu_lock);
705 if (vdpasim->iommu_pt[asid]) {
706 vhost_iotlb_reset(&vdpasim->iommu[asid]);
707 vdpasim->iommu_pt[asid] = false;
708 }
709 ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova,
710 iova + size - 1, pa, perm, opaque);
711 spin_unlock(&vdpasim->iommu_lock);
712
713 return ret;
714 }
715
vdpasim_dma_unmap(struct vdpa_device * vdpa,unsigned int asid,u64 iova,u64 size)716 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
717 u64 iova, u64 size)
718 {
719 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
720
721 if (asid >= vdpasim->dev_attr.nas)
722 return -EINVAL;
723
724 if (vdpasim->iommu_pt[asid]) {
725 vhost_iotlb_reset(&vdpasim->iommu[asid]);
726 vdpasim->iommu_pt[asid] = false;
727 }
728
729 spin_lock(&vdpasim->iommu_lock);
730 vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1);
731 spin_unlock(&vdpasim->iommu_lock);
732
733 return 0;
734 }
735
vdpasim_free(struct vdpa_device * vdpa)736 static void vdpasim_free(struct vdpa_device *vdpa)
737 {
738 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
739 int i;
740
741 kthread_cancel_work_sync(&vdpasim->work);
742 kthread_destroy_worker(vdpasim->worker);
743
744 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
745 vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
746 vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
747 }
748
749 vdpasim->dev_attr.free(vdpasim);
750
751 for (i = 0; i < vdpasim->dev_attr.nas; i++)
752 vhost_iotlb_reset(&vdpasim->iommu[i]);
753 kfree(vdpasim->iommu);
754 kfree(vdpasim->iommu_pt);
755 kfree(vdpasim->vqs);
756 kfree(vdpasim->config);
757 }
758
759 static const struct vdpa_config_ops vdpasim_config_ops = {
760 .set_vq_address = vdpasim_set_vq_address,
761 .set_vq_num = vdpasim_set_vq_num,
762 .kick_vq = vdpasim_kick_vq,
763 .set_vq_cb = vdpasim_set_vq_cb,
764 .set_vq_ready = vdpasim_set_vq_ready,
765 .get_vq_ready = vdpasim_get_vq_ready,
766 .set_vq_state = vdpasim_set_vq_state,
767 .get_vendor_vq_stats = vdpasim_get_vq_stats,
768 .get_vq_state = vdpasim_get_vq_state,
769 .get_vq_align = vdpasim_get_vq_align,
770 .get_vq_group = vdpasim_get_vq_group,
771 .get_device_features = vdpasim_get_device_features,
772 .get_backend_features = vdpasim_get_backend_features,
773 .set_driver_features = vdpasim_set_driver_features,
774 .get_driver_features = vdpasim_get_driver_features,
775 .set_config_cb = vdpasim_set_config_cb,
776 .get_vq_num_max = vdpasim_get_vq_num_max,
777 .get_device_id = vdpasim_get_device_id,
778 .get_vendor_id = vdpasim_get_vendor_id,
779 .get_status = vdpasim_get_status,
780 .set_status = vdpasim_set_status,
781 .reset = vdpasim_reset,
782 .compat_reset = vdpasim_compat_reset,
783 .suspend = vdpasim_suspend,
784 .resume = vdpasim_resume,
785 .get_config_size = vdpasim_get_config_size,
786 .get_config = vdpasim_get_config,
787 .set_config = vdpasim_set_config,
788 .get_generation = vdpasim_get_generation,
789 .get_iova_range = vdpasim_get_iova_range,
790 .set_group_asid = vdpasim_set_group_asid,
791 .dma_map = vdpasim_dma_map,
792 .dma_unmap = vdpasim_dma_unmap,
793 .reset_map = vdpasim_reset_map,
794 .bind_mm = vdpasim_bind_mm,
795 .unbind_mm = vdpasim_unbind_mm,
796 .free = vdpasim_free,
797 };
798
799 static const struct vdpa_config_ops vdpasim_batch_config_ops = {
800 .set_vq_address = vdpasim_set_vq_address,
801 .set_vq_num = vdpasim_set_vq_num,
802 .kick_vq = vdpasim_kick_vq,
803 .set_vq_cb = vdpasim_set_vq_cb,
804 .set_vq_ready = vdpasim_set_vq_ready,
805 .get_vq_ready = vdpasim_get_vq_ready,
806 .set_vq_state = vdpasim_set_vq_state,
807 .get_vendor_vq_stats = vdpasim_get_vq_stats,
808 .get_vq_state = vdpasim_get_vq_state,
809 .get_vq_align = vdpasim_get_vq_align,
810 .get_vq_group = vdpasim_get_vq_group,
811 .get_device_features = vdpasim_get_device_features,
812 .get_backend_features = vdpasim_get_backend_features,
813 .set_driver_features = vdpasim_set_driver_features,
814 .get_driver_features = vdpasim_get_driver_features,
815 .set_config_cb = vdpasim_set_config_cb,
816 .get_vq_num_max = vdpasim_get_vq_num_max,
817 .get_device_id = vdpasim_get_device_id,
818 .get_vendor_id = vdpasim_get_vendor_id,
819 .get_status = vdpasim_get_status,
820 .set_status = vdpasim_set_status,
821 .reset = vdpasim_reset,
822 .compat_reset = vdpasim_compat_reset,
823 .suspend = vdpasim_suspend,
824 .resume = vdpasim_resume,
825 .get_config_size = vdpasim_get_config_size,
826 .get_config = vdpasim_get_config,
827 .set_config = vdpasim_set_config,
828 .get_generation = vdpasim_get_generation,
829 .get_iova_range = vdpasim_get_iova_range,
830 .set_group_asid = vdpasim_set_group_asid,
831 .set_map = vdpasim_set_map,
832 .reset_map = vdpasim_reset_map,
833 .bind_mm = vdpasim_bind_mm,
834 .unbind_mm = vdpasim_unbind_mm,
835 .free = vdpasim_free,
836 };
837
838 MODULE_VERSION(DRV_VERSION);
839 MODULE_LICENSE(DRV_LICENSE);
840 MODULE_AUTHOR(DRV_AUTHOR);
841 MODULE_DESCRIPTION(DRV_DESC);
842