1 /*
2 * VFIO device
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19 */
20
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23
24 #include "hw/vfio/vfio-device.h"
25 #include "hw/vfio/pci.h"
26 #include "hw/hw.h"
27 #include "trace.h"
28 #include "qapi/error.h"
29 #include "qemu/error-report.h"
30 #include "qemu/units.h"
31 #include "monitor/monitor.h"
32 #include "vfio-helpers.h"
33
34 VFIODeviceList vfio_device_list =
35 QLIST_HEAD_INITIALIZER(vfio_device_list);
36
37 /*
38 * We want to differentiate hot reset of multiple in-use devices vs
39 * hot reset of a single in-use device. VFIO_DEVICE_RESET will already
40 * handle the case of doing hot resets when there is only a single
41 * device per bus. The in-use here refers to how many VFIODevices are
42 * affected. A hot reset that affects multiple devices, but only a
43 * single in-use device, means that we can call it from our bus
44 * ->reset() callback since the extent is effectively a single
45 * device. This allows us to make use of it in the hotplug path. When
46 * there are multiple in-use devices, we can only trigger the hot
47 * reset during a system reset and thus from our reset handler. We
48 * separate _one vs _multi here so that we don't overlap and do a
49 * double reset on the system reset path where both our reset handler
50 * and ->reset() callback are used. Calling _one() will only do a hot
51 * reset for the one in-use devices case, calling _multi() will do
52 * nothing if a _one() would have been sufficient.
53 */
vfio_device_reset_handler(void * opaque)54 void vfio_device_reset_handler(void *opaque)
55 {
56 VFIODevice *vbasedev;
57
58 trace_vfio_device_reset_handler();
59 QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
60 if (vbasedev->dev->realized) {
61 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
62 }
63 }
64
65 QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
66 if (vbasedev->dev->realized && vbasedev->needs_reset) {
67 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
68 }
69 }
70 }
71
72 /*
73 * Common VFIO interrupt disable
74 */
vfio_device_irq_disable(VFIODevice * vbasedev,int index)75 void vfio_device_irq_disable(VFIODevice *vbasedev, int index)
76 {
77 struct vfio_irq_set irq_set = {
78 .argsz = sizeof(irq_set),
79 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
80 .index = index,
81 .start = 0,
82 .count = 0,
83 };
84
85 vbasedev->io_ops->set_irqs(vbasedev, &irq_set);
86 }
87
vfio_device_irq_unmask(VFIODevice * vbasedev,int index)88 void vfio_device_irq_unmask(VFIODevice *vbasedev, int index)
89 {
90 struct vfio_irq_set irq_set = {
91 .argsz = sizeof(irq_set),
92 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
93 .index = index,
94 .start = 0,
95 .count = 1,
96 };
97
98 vbasedev->io_ops->set_irqs(vbasedev, &irq_set);
99 }
100
vfio_device_irq_mask(VFIODevice * vbasedev,int index)101 void vfio_device_irq_mask(VFIODevice *vbasedev, int index)
102 {
103 struct vfio_irq_set irq_set = {
104 .argsz = sizeof(irq_set),
105 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
106 .index = index,
107 .start = 0,
108 .count = 1,
109 };
110
111 vbasedev->io_ops->set_irqs(vbasedev, &irq_set);
112 }
113
action_to_str(int action)114 static inline const char *action_to_str(int action)
115 {
116 switch (action) {
117 case VFIO_IRQ_SET_ACTION_MASK:
118 return "MASK";
119 case VFIO_IRQ_SET_ACTION_UNMASK:
120 return "UNMASK";
121 case VFIO_IRQ_SET_ACTION_TRIGGER:
122 return "TRIGGER";
123 default:
124 return "UNKNOWN ACTION";
125 }
126 }
127
index_to_str(VFIODevice * vbasedev,int index)128 static const char *index_to_str(VFIODevice *vbasedev, int index)
129 {
130 if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
131 return NULL;
132 }
133
134 switch (index) {
135 case VFIO_PCI_INTX_IRQ_INDEX:
136 return "INTX";
137 case VFIO_PCI_MSI_IRQ_INDEX:
138 return "MSI";
139 case VFIO_PCI_MSIX_IRQ_INDEX:
140 return "MSIX";
141 case VFIO_PCI_ERR_IRQ_INDEX:
142 return "ERR";
143 case VFIO_PCI_REQ_IRQ_INDEX:
144 return "REQ";
145 default:
146 return NULL;
147 }
148 }
149
vfio_device_irq_set_signaling(VFIODevice * vbasedev,int index,int subindex,int action,int fd,Error ** errp)150 bool vfio_device_irq_set_signaling(VFIODevice *vbasedev, int index, int subindex,
151 int action, int fd, Error **errp)
152 {
153 ERRP_GUARD();
154 g_autofree struct vfio_irq_set *irq_set = NULL;
155 int argsz;
156 const char *name;
157 int32_t *pfd;
158
159 argsz = sizeof(*irq_set) + sizeof(*pfd);
160
161 irq_set = g_malloc0(argsz);
162 irq_set->argsz = argsz;
163 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action;
164 irq_set->index = index;
165 irq_set->start = subindex;
166 irq_set->count = 1;
167 pfd = (int32_t *)&irq_set->data;
168 *pfd = fd;
169
170 if (!vbasedev->io_ops->set_irqs(vbasedev, irq_set)) {
171 return true;
172 }
173
174 error_setg_errno(errp, errno, "VFIO_DEVICE_SET_IRQS failure");
175
176 name = index_to_str(vbasedev, index);
177 if (name) {
178 error_prepend(errp, "%s-%d: ", name, subindex);
179 } else {
180 error_prepend(errp, "index %d-%d: ", index, subindex);
181 }
182 error_prepend(errp,
183 "Failed to %s %s eventfd signaling for interrupt ",
184 fd < 0 ? "tear down" : "set up", action_to_str(action));
185 return false;
186 }
187
vfio_device_get_irq_info(VFIODevice * vbasedev,int index,struct vfio_irq_info * info)188 int vfio_device_get_irq_info(VFIODevice *vbasedev, int index,
189 struct vfio_irq_info *info)
190 {
191 memset(info, 0, sizeof(*info));
192
193 info->argsz = sizeof(*info);
194 info->index = index;
195
196 return vbasedev->io_ops->get_irq_info(vbasedev, info);
197 }
198
vfio_device_get_region_info(VFIODevice * vbasedev,int index,struct vfio_region_info ** info)199 int vfio_device_get_region_info(VFIODevice *vbasedev, int index,
200 struct vfio_region_info **info)
201 {
202 size_t argsz = sizeof(struct vfio_region_info);
203 int fd = -1;
204 int ret;
205
206 /* check cache */
207 if (vbasedev->reginfo[index] != NULL) {
208 *info = vbasedev->reginfo[index];
209 return 0;
210 }
211
212 *info = g_malloc0(argsz);
213
214 (*info)->index = index;
215 retry:
216 (*info)->argsz = argsz;
217
218 ret = vbasedev->io_ops->get_region_info(vbasedev, *info, &fd);
219 if (ret != 0) {
220 g_free(*info);
221 *info = NULL;
222 return ret;
223 }
224
225 if ((*info)->argsz > argsz) {
226 argsz = (*info)->argsz;
227 *info = g_realloc(*info, argsz);
228
229 if (fd != -1) {
230 close(fd);
231 fd = -1;
232 }
233
234 goto retry;
235 }
236
237 /* fill cache */
238 vbasedev->reginfo[index] = *info;
239 if (vbasedev->region_fds != NULL) {
240 vbasedev->region_fds[index] = fd;
241 }
242
243 return 0;
244 }
245
vfio_device_get_region_info_type(VFIODevice * vbasedev,uint32_t type,uint32_t subtype,struct vfio_region_info ** info)246 int vfio_device_get_region_info_type(VFIODevice *vbasedev, uint32_t type,
247 uint32_t subtype, struct vfio_region_info **info)
248 {
249 int i;
250
251 for (i = 0; i < vbasedev->num_regions; i++) {
252 struct vfio_info_cap_header *hdr;
253 struct vfio_region_info_cap_type *cap_type;
254
255 if (vfio_device_get_region_info(vbasedev, i, info)) {
256 continue;
257 }
258
259 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
260 if (!hdr) {
261 continue;
262 }
263
264 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
265
266 trace_vfio_device_get_region_info_type(vbasedev->name, i,
267 cap_type->type, cap_type->subtype);
268
269 if (cap_type->type == type && cap_type->subtype == subtype) {
270 return 0;
271 }
272 }
273
274 *info = NULL;
275 return -ENODEV;
276 }
277
vfio_device_has_region_cap(VFIODevice * vbasedev,int region,uint16_t cap_type)278 bool vfio_device_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
279 {
280 struct vfio_region_info *info = NULL;
281 bool ret = false;
282
283 if (!vfio_device_get_region_info(vbasedev, region, &info)) {
284 if (vfio_get_region_info_cap(info, cap_type)) {
285 ret = true;
286 }
287 }
288
289 return ret;
290 }
291
vfio_device_get_name(VFIODevice * vbasedev,Error ** errp)292 bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp)
293 {
294 ERRP_GUARD();
295 struct stat st;
296
297 if (vbasedev->fd < 0) {
298 if (stat(vbasedev->sysfsdev, &st) < 0) {
299 error_setg_errno(errp, errno, "no such host device");
300 error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev);
301 return false;
302 }
303 /* User may specify a name, e.g: VFIO platform device */
304 if (!vbasedev->name) {
305 vbasedev->name = g_path_get_basename(vbasedev->sysfsdev);
306 }
307 } else {
308 if (!vbasedev->iommufd) {
309 error_setg(errp, "Use FD passing only with iommufd backend");
310 return false;
311 }
312 /*
313 * Give a name with fd so any function printing out vbasedev->name
314 * will not break.
315 */
316 if (!vbasedev->name) {
317 vbasedev->name = g_strdup_printf("VFIO_FD%d", vbasedev->fd);
318 }
319 }
320
321 return true;
322 }
323
vfio_device_set_fd(VFIODevice * vbasedev,const char * str,Error ** errp)324 void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp)
325 {
326 ERRP_GUARD();
327 int fd = monitor_fd_param(monitor_cur(), str, errp);
328
329 if (fd < 0) {
330 error_prepend(errp, "Could not parse remote object fd %s:", str);
331 return;
332 }
333 vbasedev->fd = fd;
334 }
335
336 static VFIODeviceIOOps vfio_device_io_ops_ioctl;
337
vfio_device_init(VFIODevice * vbasedev,int type,VFIODeviceOps * ops,DeviceState * dev,bool ram_discard)338 void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops,
339 DeviceState *dev, bool ram_discard)
340 {
341 vbasedev->type = type;
342 vbasedev->ops = ops;
343 vbasedev->io_ops = &vfio_device_io_ops_ioctl;
344 vbasedev->dev = dev;
345 vbasedev->fd = -1;
346 vbasedev->use_region_fds = false;
347
348 vbasedev->ram_block_discard_allowed = ram_discard;
349 }
350
vfio_device_get_aw_bits(VFIODevice * vdev)351 int vfio_device_get_aw_bits(VFIODevice *vdev)
352 {
353 /*
354 * iova_ranges is a sorted list. For old kernels that support
355 * VFIO but not support query of iova ranges, iova_ranges is NULL,
356 * in this case HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX(64) is returned.
357 */
358 GList *l = g_list_last(vdev->bcontainer->iova_ranges);
359
360 if (l) {
361 Range *range = l->data;
362 return range_get_last_bit(range) + 1;
363 }
364
365 return HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX;
366 }
367
vfio_device_is_mdev(VFIODevice * vbasedev)368 bool vfio_device_is_mdev(VFIODevice *vbasedev)
369 {
370 g_autofree char *subsys = NULL;
371 g_autofree char *tmp = NULL;
372
373 if (!vbasedev->sysfsdev) {
374 return false;
375 }
376
377 tmp = g_strdup_printf("%s/subsystem", vbasedev->sysfsdev);
378 subsys = realpath(tmp, NULL);
379 return subsys && (strcmp(subsys, "/sys/bus/mdev") == 0);
380 }
381
vfio_device_hiod_create_and_realize(VFIODevice * vbasedev,const char * typename,Error ** errp)382 bool vfio_device_hiod_create_and_realize(VFIODevice *vbasedev,
383 const char *typename, Error **errp)
384 {
385 HostIOMMUDevice *hiod;
386
387 if (vbasedev->mdev) {
388 return true;
389 }
390
391 hiod = HOST_IOMMU_DEVICE(object_new(typename));
392
393 if (!HOST_IOMMU_DEVICE_GET_CLASS(hiod)->realize(hiod, vbasedev, errp)) {
394 object_unref(hiod);
395 return false;
396 }
397
398 vbasedev->hiod = hiod;
399 return true;
400 }
401
vfio_get_vfio_device(Object * obj)402 VFIODevice *vfio_get_vfio_device(Object *obj)
403 {
404 if (object_dynamic_cast(obj, TYPE_VFIO_PCI)) {
405 return &VFIO_PCI_BASE(obj)->vbasedev;
406 } else {
407 return NULL;
408 }
409 }
410
vfio_device_attach_by_iommu_type(const char * iommu_type,char * name,VFIODevice * vbasedev,AddressSpace * as,Error ** errp)411 bool vfio_device_attach_by_iommu_type(const char *iommu_type, char *name,
412 VFIODevice *vbasedev, AddressSpace *as,
413 Error **errp)
414 {
415 const VFIOIOMMUClass *ops =
416 VFIO_IOMMU_CLASS(object_class_by_name(iommu_type));
417
418 assert(ops);
419
420 return ops->attach_device(name, vbasedev, as, errp);
421 }
422
vfio_device_attach(char * name,VFIODevice * vbasedev,AddressSpace * as,Error ** errp)423 bool vfio_device_attach(char *name, VFIODevice *vbasedev,
424 AddressSpace *as, Error **errp)
425 {
426 const char *iommu_type = vbasedev->iommufd ?
427 TYPE_VFIO_IOMMU_IOMMUFD :
428 TYPE_VFIO_IOMMU_LEGACY;
429
430 return vfio_device_attach_by_iommu_type(iommu_type, name, vbasedev,
431 as, errp);
432 }
433
vfio_device_detach(VFIODevice * vbasedev)434 void vfio_device_detach(VFIODevice *vbasedev)
435 {
436 if (!vbasedev->bcontainer) {
437 return;
438 }
439 VFIO_IOMMU_GET_CLASS(vbasedev->bcontainer)->detach_device(vbasedev);
440 }
441
vfio_device_prepare(VFIODevice * vbasedev,VFIOContainerBase * bcontainer,struct vfio_device_info * info)442 void vfio_device_prepare(VFIODevice *vbasedev, VFIOContainerBase *bcontainer,
443 struct vfio_device_info *info)
444 {
445 vbasedev->num_irqs = info->num_irqs;
446 vbasedev->num_regions = info->num_regions;
447 vbasedev->flags = info->flags;
448 vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET);
449
450 vbasedev->bcontainer = bcontainer;
451 QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next);
452
453 QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next);
454
455 vbasedev->reginfo = g_new0(struct vfio_region_info *,
456 vbasedev->num_regions);
457 if (vbasedev->use_region_fds) {
458 vbasedev->region_fds = g_new0(int, vbasedev->num_regions);
459 }
460 }
461
vfio_device_unprepare(VFIODevice * vbasedev)462 void vfio_device_unprepare(VFIODevice *vbasedev)
463 {
464 int i;
465
466 for (i = 0; i < vbasedev->num_regions; i++) {
467 g_free(vbasedev->reginfo[i]);
468 if (vbasedev->region_fds != NULL && vbasedev->region_fds[i] != -1) {
469 close(vbasedev->region_fds[i]);
470 }
471
472 }
473
474 g_clear_pointer(&vbasedev->reginfo, g_free);
475 g_clear_pointer(&vbasedev->region_fds, g_free);
476
477 QLIST_REMOVE(vbasedev, container_next);
478 QLIST_REMOVE(vbasedev, global_next);
479 vbasedev->bcontainer = NULL;
480 }
481
482 /*
483 * Traditional ioctl() based io
484 */
485
vfio_device_io_device_feature(VFIODevice * vbasedev,struct vfio_device_feature * feature)486 static int vfio_device_io_device_feature(VFIODevice *vbasedev,
487 struct vfio_device_feature *feature)
488 {
489 int ret;
490
491 ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
492
493 return ret < 0 ? -errno : ret;
494 }
495
vfio_device_io_get_region_info(VFIODevice * vbasedev,struct vfio_region_info * info,int * fd)496 static int vfio_device_io_get_region_info(VFIODevice *vbasedev,
497 struct vfio_region_info *info,
498 int *fd)
499 {
500 int ret;
501
502 *fd = -1;
503
504 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, info);
505
506 return ret < 0 ? -errno : ret;
507 }
508
vfio_device_io_get_irq_info(VFIODevice * vbasedev,struct vfio_irq_info * info)509 static int vfio_device_io_get_irq_info(VFIODevice *vbasedev,
510 struct vfio_irq_info *info)
511 {
512 int ret;
513
514 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, info);
515
516 return ret < 0 ? -errno : ret;
517 }
518
vfio_device_io_set_irqs(VFIODevice * vbasedev,struct vfio_irq_set * irqs)519 static int vfio_device_io_set_irqs(VFIODevice *vbasedev,
520 struct vfio_irq_set *irqs)
521 {
522 int ret;
523
524 ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irqs);
525
526 return ret < 0 ? -errno : ret;
527 }
528
vfio_device_io_region_read(VFIODevice * vbasedev,uint8_t index,off_t off,uint32_t size,void * data)529 static int vfio_device_io_region_read(VFIODevice *vbasedev, uint8_t index,
530 off_t off, uint32_t size, void *data)
531 {
532 struct vfio_region_info *info;
533 int ret;
534
535 ret = vfio_device_get_region_info(vbasedev, index, &info);
536 if (ret != 0) {
537 return ret;
538 }
539
540 ret = pread(vbasedev->fd, data, size, info->offset + off);
541
542 return ret < 0 ? -errno : ret;
543 }
544
vfio_device_io_region_write(VFIODevice * vbasedev,uint8_t index,off_t off,uint32_t size,void * data,bool post)545 static int vfio_device_io_region_write(VFIODevice *vbasedev, uint8_t index,
546 off_t off, uint32_t size, void *data,
547 bool post)
548 {
549 struct vfio_region_info *info;
550 int ret;
551
552 ret = vfio_device_get_region_info(vbasedev, index, &info);
553 if (ret != 0) {
554 return ret;
555 }
556
557 ret = pwrite(vbasedev->fd, data, size, info->offset + off);
558
559 return ret < 0 ? -errno : ret;
560 }
561
562 static VFIODeviceIOOps vfio_device_io_ops_ioctl = {
563 .device_feature = vfio_device_io_device_feature,
564 .get_region_info = vfio_device_io_get_region_info,
565 .get_irq_info = vfio_device_io_get_irq_info,
566 .set_irqs = vfio_device_io_set_irqs,
567 .region_read = vfio_device_io_region_read,
568 .region_write = vfio_device_io_region_write,
569 };
570