1 /*
2 * VFIO device
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19 */
20
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23
24 #include "hw/vfio/vfio-device.h"
25 #include "hw/vfio/pci.h"
26 #include "hw/hw.h"
27 #include "trace.h"
28 #include "qapi/error.h"
29 #include "qemu/error-report.h"
30 #include "qemu/units.h"
31 #include "monitor/monitor.h"
32 #include "vfio-helpers.h"
33
34 VFIODeviceList vfio_device_list =
35 QLIST_HEAD_INITIALIZER(vfio_device_list);
36
37 /*
38 * We want to differentiate hot reset of multiple in-use devices vs
39 * hot reset of a single in-use device. VFIO_DEVICE_RESET will already
40 * handle the case of doing hot resets when there is only a single
41 * device per bus. The in-use here refers to how many VFIODevices are
42 * affected. A hot reset that affects multiple devices, but only a
43 * single in-use device, means that we can call it from our bus
44 * ->reset() callback since the extent is effectively a single
45 * device. This allows us to make use of it in the hotplug path. When
46 * there are multiple in-use devices, we can only trigger the hot
47 * reset during a system reset and thus from our reset handler. We
48 * separate _one vs _multi here so that we don't overlap and do a
49 * double reset on the system reset path where both our reset handler
50 * and ->reset() callback are used. Calling _one() will only do a hot
51 * reset for the one in-use devices case, calling _multi() will do
52 * nothing if a _one() would have been sufficient.
53 */
vfio_device_reset_handler(void * opaque)54 void vfio_device_reset_handler(void *opaque)
55 {
56 VFIODevice *vbasedev;
57
58 trace_vfio_device_reset_handler();
59 QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
60 if (vbasedev->dev->realized) {
61 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
62 }
63 }
64
65 QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
66 if (vbasedev->dev->realized && vbasedev->needs_reset) {
67 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
68 }
69 }
70 }
71
72 /*
73 * Common VFIO interrupt disable
74 */
vfio_device_irq_disable(VFIODevice * vbasedev,int index)75 void vfio_device_irq_disable(VFIODevice *vbasedev, int index)
76 {
77 struct vfio_irq_set irq_set = {
78 .argsz = sizeof(irq_set),
79 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
80 .index = index,
81 .start = 0,
82 .count = 0,
83 };
84
85 vbasedev->io_ops->set_irqs(vbasedev, &irq_set);
86 }
87
vfio_device_irq_unmask(VFIODevice * vbasedev,int index)88 void vfio_device_irq_unmask(VFIODevice *vbasedev, int index)
89 {
90 struct vfio_irq_set irq_set = {
91 .argsz = sizeof(irq_set),
92 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
93 .index = index,
94 .start = 0,
95 .count = 1,
96 };
97
98 vbasedev->io_ops->set_irqs(vbasedev, &irq_set);
99 }
100
vfio_device_irq_mask(VFIODevice * vbasedev,int index)101 void vfio_device_irq_mask(VFIODevice *vbasedev, int index)
102 {
103 struct vfio_irq_set irq_set = {
104 .argsz = sizeof(irq_set),
105 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
106 .index = index,
107 .start = 0,
108 .count = 1,
109 };
110
111 vbasedev->io_ops->set_irqs(vbasedev, &irq_set);
112 }
113
action_to_str(int action)114 static inline const char *action_to_str(int action)
115 {
116 switch (action) {
117 case VFIO_IRQ_SET_ACTION_MASK:
118 return "MASK";
119 case VFIO_IRQ_SET_ACTION_UNMASK:
120 return "UNMASK";
121 case VFIO_IRQ_SET_ACTION_TRIGGER:
122 return "TRIGGER";
123 default:
124 return "UNKNOWN ACTION";
125 }
126 }
127
index_to_str(VFIODevice * vbasedev,int index)128 static const char *index_to_str(VFIODevice *vbasedev, int index)
129 {
130 if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
131 return NULL;
132 }
133
134 switch (index) {
135 case VFIO_PCI_INTX_IRQ_INDEX:
136 return "INTX";
137 case VFIO_PCI_MSI_IRQ_INDEX:
138 return "MSI";
139 case VFIO_PCI_MSIX_IRQ_INDEX:
140 return "MSIX";
141 case VFIO_PCI_ERR_IRQ_INDEX:
142 return "ERR";
143 case VFIO_PCI_REQ_IRQ_INDEX:
144 return "REQ";
145 default:
146 return NULL;
147 }
148 }
149
vfio_device_irq_set_signaling(VFIODevice * vbasedev,int index,int subindex,int action,int fd,Error ** errp)150 bool vfio_device_irq_set_signaling(VFIODevice *vbasedev, int index, int subindex,
151 int action, int fd, Error **errp)
152 {
153 ERRP_GUARD();
154 g_autofree struct vfio_irq_set *irq_set = NULL;
155 int argsz;
156 const char *name;
157 int32_t *pfd;
158
159 argsz = sizeof(*irq_set) + sizeof(*pfd);
160
161 irq_set = g_malloc0(argsz);
162 irq_set->argsz = argsz;
163 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action;
164 irq_set->index = index;
165 irq_set->start = subindex;
166 irq_set->count = 1;
167 pfd = (int32_t *)&irq_set->data;
168 *pfd = fd;
169
170 if (!vbasedev->io_ops->set_irqs(vbasedev, irq_set)) {
171 return true;
172 }
173
174 error_setg_errno(errp, errno, "VFIO_DEVICE_SET_IRQS failure");
175
176 name = index_to_str(vbasedev, index);
177 if (name) {
178 error_prepend(errp, "%s-%d: ", name, subindex);
179 } else {
180 error_prepend(errp, "index %d-%d: ", index, subindex);
181 }
182 error_prepend(errp,
183 "Failed to %s %s eventfd signaling for interrupt ",
184 fd < 0 ? "tear down" : "set up", action_to_str(action));
185 return false;
186 }
187
vfio_device_get_irq_info(VFIODevice * vbasedev,int index,struct vfio_irq_info * info)188 int vfio_device_get_irq_info(VFIODevice *vbasedev, int index,
189 struct vfio_irq_info *info)
190 {
191 memset(info, 0, sizeof(*info));
192
193 info->argsz = sizeof(*info);
194 info->index = index;
195
196 return vbasedev->io_ops->get_irq_info(vbasedev, info);
197 }
198
vfio_device_get_region_info(VFIODevice * vbasedev,int index,struct vfio_region_info ** info)199 int vfio_device_get_region_info(VFIODevice *vbasedev, int index,
200 struct vfio_region_info **info)
201 {
202 size_t argsz = sizeof(struct vfio_region_info);
203 int ret;
204
205 /* check cache */
206 if (vbasedev->reginfo[index] != NULL) {
207 *info = vbasedev->reginfo[index];
208 return 0;
209 }
210
211 *info = g_malloc0(argsz);
212
213 (*info)->index = index;
214 retry:
215 (*info)->argsz = argsz;
216
217 ret = vbasedev->io_ops->get_region_info(vbasedev, *info);
218 if (ret != 0) {
219 g_free(*info);
220 *info = NULL;
221 return ret;
222 }
223
224 if ((*info)->argsz > argsz) {
225 argsz = (*info)->argsz;
226 *info = g_realloc(*info, argsz);
227
228 goto retry;
229 }
230
231 /* fill cache */
232 vbasedev->reginfo[index] = *info;
233
234 return 0;
235 }
236
vfio_device_get_region_info_type(VFIODevice * vbasedev,uint32_t type,uint32_t subtype,struct vfio_region_info ** info)237 int vfio_device_get_region_info_type(VFIODevice *vbasedev, uint32_t type,
238 uint32_t subtype, struct vfio_region_info **info)
239 {
240 int i;
241
242 for (i = 0; i < vbasedev->num_regions; i++) {
243 struct vfio_info_cap_header *hdr;
244 struct vfio_region_info_cap_type *cap_type;
245
246 if (vfio_device_get_region_info(vbasedev, i, info)) {
247 continue;
248 }
249
250 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
251 if (!hdr) {
252 continue;
253 }
254
255 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
256
257 trace_vfio_device_get_region_info_type(vbasedev->name, i,
258 cap_type->type, cap_type->subtype);
259
260 if (cap_type->type == type && cap_type->subtype == subtype) {
261 return 0;
262 }
263 }
264
265 *info = NULL;
266 return -ENODEV;
267 }
268
vfio_device_has_region_cap(VFIODevice * vbasedev,int region,uint16_t cap_type)269 bool vfio_device_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
270 {
271 struct vfio_region_info *info = NULL;
272 bool ret = false;
273
274 if (!vfio_device_get_region_info(vbasedev, region, &info)) {
275 if (vfio_get_region_info_cap(info, cap_type)) {
276 ret = true;
277 }
278 }
279
280 return ret;
281 }
282
vfio_device_get_name(VFIODevice * vbasedev,Error ** errp)283 bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp)
284 {
285 ERRP_GUARD();
286 struct stat st;
287
288 if (vbasedev->fd < 0) {
289 if (stat(vbasedev->sysfsdev, &st) < 0) {
290 error_setg_errno(errp, errno, "no such host device");
291 error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev);
292 return false;
293 }
294 /* User may specify a name, e.g: VFIO platform device */
295 if (!vbasedev->name) {
296 vbasedev->name = g_path_get_basename(vbasedev->sysfsdev);
297 }
298 } else {
299 if (!vbasedev->iommufd) {
300 error_setg(errp, "Use FD passing only with iommufd backend");
301 return false;
302 }
303 /*
304 * Give a name with fd so any function printing out vbasedev->name
305 * will not break.
306 */
307 if (!vbasedev->name) {
308 vbasedev->name = g_strdup_printf("VFIO_FD%d", vbasedev->fd);
309 }
310 }
311
312 return true;
313 }
314
vfio_device_set_fd(VFIODevice * vbasedev,const char * str,Error ** errp)315 void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp)
316 {
317 ERRP_GUARD();
318 int fd = monitor_fd_param(monitor_cur(), str, errp);
319
320 if (fd < 0) {
321 error_prepend(errp, "Could not parse remote object fd %s:", str);
322 return;
323 }
324 vbasedev->fd = fd;
325 }
326
327 static VFIODeviceIOOps vfio_device_io_ops_ioctl;
328
vfio_device_init(VFIODevice * vbasedev,int type,VFIODeviceOps * ops,DeviceState * dev,bool ram_discard)329 void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops,
330 DeviceState *dev, bool ram_discard)
331 {
332 vbasedev->type = type;
333 vbasedev->ops = ops;
334 vbasedev->io_ops = &vfio_device_io_ops_ioctl;
335 vbasedev->dev = dev;
336 vbasedev->fd = -1;
337
338 vbasedev->ram_block_discard_allowed = ram_discard;
339 }
340
vfio_device_get_aw_bits(VFIODevice * vdev)341 int vfio_device_get_aw_bits(VFIODevice *vdev)
342 {
343 /*
344 * iova_ranges is a sorted list. For old kernels that support
345 * VFIO but not support query of iova ranges, iova_ranges is NULL,
346 * in this case HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX(64) is returned.
347 */
348 GList *l = g_list_last(vdev->bcontainer->iova_ranges);
349
350 if (l) {
351 Range *range = l->data;
352 return range_get_last_bit(range) + 1;
353 }
354
355 return HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX;
356 }
357
vfio_device_is_mdev(VFIODevice * vbasedev)358 bool vfio_device_is_mdev(VFIODevice *vbasedev)
359 {
360 g_autofree char *subsys = NULL;
361 g_autofree char *tmp = NULL;
362
363 if (!vbasedev->sysfsdev) {
364 return false;
365 }
366
367 tmp = g_strdup_printf("%s/subsystem", vbasedev->sysfsdev);
368 subsys = realpath(tmp, NULL);
369 return subsys && (strcmp(subsys, "/sys/bus/mdev") == 0);
370 }
371
vfio_device_hiod_create_and_realize(VFIODevice * vbasedev,const char * typename,Error ** errp)372 bool vfio_device_hiod_create_and_realize(VFIODevice *vbasedev,
373 const char *typename, Error **errp)
374 {
375 HostIOMMUDevice *hiod;
376
377 if (vbasedev->mdev) {
378 return true;
379 }
380
381 hiod = HOST_IOMMU_DEVICE(object_new(typename));
382
383 if (!HOST_IOMMU_DEVICE_GET_CLASS(hiod)->realize(hiod, vbasedev, errp)) {
384 object_unref(hiod);
385 return false;
386 }
387
388 vbasedev->hiod = hiod;
389 return true;
390 }
391
vfio_get_vfio_device(Object * obj)392 VFIODevice *vfio_get_vfio_device(Object *obj)
393 {
394 if (object_dynamic_cast(obj, TYPE_VFIO_PCI)) {
395 return &VFIO_PCI_BASE(obj)->vbasedev;
396 } else {
397 return NULL;
398 }
399 }
400
vfio_device_attach_by_iommu_type(const char * iommu_type,char * name,VFIODevice * vbasedev,AddressSpace * as,Error ** errp)401 bool vfio_device_attach_by_iommu_type(const char *iommu_type, char *name,
402 VFIODevice *vbasedev, AddressSpace *as,
403 Error **errp)
404 {
405 const VFIOIOMMUClass *ops =
406 VFIO_IOMMU_CLASS(object_class_by_name(iommu_type));
407
408 assert(ops);
409
410 return ops->attach_device(name, vbasedev, as, errp);
411 }
412
vfio_device_attach(char * name,VFIODevice * vbasedev,AddressSpace * as,Error ** errp)413 bool vfio_device_attach(char *name, VFIODevice *vbasedev,
414 AddressSpace *as, Error **errp)
415 {
416 const char *iommu_type = vbasedev->iommufd ?
417 TYPE_VFIO_IOMMU_IOMMUFD :
418 TYPE_VFIO_IOMMU_LEGACY;
419
420 return vfio_device_attach_by_iommu_type(iommu_type, name, vbasedev,
421 as, errp);
422 }
423
vfio_device_detach(VFIODevice * vbasedev)424 void vfio_device_detach(VFIODevice *vbasedev)
425 {
426 if (!vbasedev->bcontainer) {
427 return;
428 }
429 VFIO_IOMMU_GET_CLASS(vbasedev->bcontainer)->detach_device(vbasedev);
430 }
431
vfio_device_prepare(VFIODevice * vbasedev,VFIOContainerBase * bcontainer,struct vfio_device_info * info)432 void vfio_device_prepare(VFIODevice *vbasedev, VFIOContainerBase *bcontainer,
433 struct vfio_device_info *info)
434 {
435 vbasedev->num_irqs = info->num_irqs;
436 vbasedev->num_regions = info->num_regions;
437 vbasedev->flags = info->flags;
438 vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET);
439
440 vbasedev->bcontainer = bcontainer;
441 QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next);
442
443 QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next);
444
445 vbasedev->reginfo = g_new0(struct vfio_region_info *,
446 vbasedev->num_regions);
447 }
448
vfio_device_unprepare(VFIODevice * vbasedev)449 void vfio_device_unprepare(VFIODevice *vbasedev)
450 {
451 int i;
452
453 for (i = 0; i < vbasedev->num_regions; i++) {
454 g_free(vbasedev->reginfo[i]);
455 }
456 g_free(vbasedev->reginfo);
457 vbasedev->reginfo = NULL;
458
459 QLIST_REMOVE(vbasedev, container_next);
460 QLIST_REMOVE(vbasedev, global_next);
461 vbasedev->bcontainer = NULL;
462 }
463
464 /*
465 * Traditional ioctl() based io
466 */
467
vfio_device_io_device_feature(VFIODevice * vbasedev,struct vfio_device_feature * feature)468 static int vfio_device_io_device_feature(VFIODevice *vbasedev,
469 struct vfio_device_feature *feature)
470 {
471 int ret;
472
473 ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
474
475 return ret < 0 ? -errno : ret;
476 }
477
vfio_device_io_get_region_info(VFIODevice * vbasedev,struct vfio_region_info * info)478 static int vfio_device_io_get_region_info(VFIODevice *vbasedev,
479 struct vfio_region_info *info)
480 {
481 int ret;
482
483 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, info);
484
485 return ret < 0 ? -errno : ret;
486 }
487
vfio_device_io_get_irq_info(VFIODevice * vbasedev,struct vfio_irq_info * info)488 static int vfio_device_io_get_irq_info(VFIODevice *vbasedev,
489 struct vfio_irq_info *info)
490 {
491 int ret;
492
493 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, info);
494
495 return ret < 0 ? -errno : ret;
496 }
497
vfio_device_io_set_irqs(VFIODevice * vbasedev,struct vfio_irq_set * irqs)498 static int vfio_device_io_set_irqs(VFIODevice *vbasedev,
499 struct vfio_irq_set *irqs)
500 {
501 int ret;
502
503 ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irqs);
504
505 return ret < 0 ? -errno : ret;
506 }
507
vfio_device_io_region_read(VFIODevice * vbasedev,uint8_t index,off_t off,uint32_t size,void * data)508 static int vfio_device_io_region_read(VFIODevice *vbasedev, uint8_t index,
509 off_t off, uint32_t size, void *data)
510 {
511 struct vfio_region_info *info;
512 int ret;
513
514 ret = vfio_device_get_region_info(vbasedev, index, &info);
515 if (ret != 0) {
516 return ret;
517 }
518
519 ret = pread(vbasedev->fd, data, size, info->offset + off);
520
521 return ret < 0 ? -errno : ret;
522 }
523
vfio_device_io_region_write(VFIODevice * vbasedev,uint8_t index,off_t off,uint32_t size,void * data)524 static int vfio_device_io_region_write(VFIODevice *vbasedev, uint8_t index,
525 off_t off, uint32_t size, void *data)
526 {
527 struct vfio_region_info *info;
528 int ret;
529
530 ret = vfio_device_get_region_info(vbasedev, index, &info);
531 if (ret != 0) {
532 return ret;
533 }
534
535 ret = pwrite(vbasedev->fd, data, size, info->offset + off);
536
537 return ret < 0 ? -errno : ret;
538 }
539
540 static VFIODeviceIOOps vfio_device_io_ops_ioctl = {
541 .device_feature = vfio_device_io_device_feature,
542 .get_region_info = vfio_device_io_get_region_info,
543 .get_irq_info = vfio_device_io_get_irq_info,
544 .set_irqs = vfio_device_io_set_irqs,
545 .region_read = vfio_device_io_region_read,
546 .region_write = vfio_device_io_region_write,
547 };
548