1 /*
2 * vfio based subchannel assignment support
3 *
4 * Copyright 2017 IBM Corp.
5 * Copyright 2019 Red Hat, Inc.
6 *
7 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
9 * Pierre Morel <pmorel@linux.vnet.ibm.com>
10 * Cornelia Huck <cohuck@redhat.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2 or (at
13 * your option) any later version. See the COPYING file in the top-level
14 * directory.
15 */
16
17 #include "qemu/osdep.h"
18 #include CONFIG_DEVICES /* CONFIG_IOMMUFD */
19 #include <linux/vfio.h>
20 #include <linux/vfio_ccw.h>
21 #include <sys/ioctl.h>
22
23 #include "qapi/error.h"
24 #include "hw/vfio/vfio-device.h"
25 #include "system/iommufd.h"
26 #include "hw/s390x/s390-ccw.h"
27 #include "hw/s390x/vfio-ccw.h"
28 #include "hw/qdev-properties.h"
29 #include "hw/s390x/ccw-device.h"
30 #include "system/address-spaces.h"
31 #include "qemu/error-report.h"
32 #include "qemu/main-loop.h"
33 #include "qemu/module.h"
34
35 struct VFIOCCWDevice {
36 S390CCWDevice cdev;
37 VFIODevice vdev;
38 uint64_t io_region_size;
39 uint64_t io_region_offset;
40 struct ccw_io_region *io_region;
41 uint64_t async_cmd_region_size;
42 uint64_t async_cmd_region_offset;
43 struct ccw_cmd_region *async_cmd_region;
44 uint64_t schib_region_size;
45 uint64_t schib_region_offset;
46 struct ccw_schib_region *schib_region;
47 uint64_t crw_region_size;
48 uint64_t crw_region_offset;
49 struct ccw_crw_region *crw_region;
50 EventNotifier io_notifier;
51 EventNotifier crw_notifier;
52 EventNotifier req_notifier;
53 bool force_orb_pfch;
54 };
55
vfio_ccw_compute_needs_reset(VFIODevice * vdev)56 static void vfio_ccw_compute_needs_reset(VFIODevice *vdev)
57 {
58 vdev->needs_reset = false;
59 }
60
61 /*
62 * We don't need vfio_hot_reset_multi and vfio_eoi operations for
63 * vfio_ccw device now.
64 */
65 struct VFIODeviceOps vfio_ccw_ops = {
66 .vfio_compute_needs_reset = vfio_ccw_compute_needs_reset,
67 };
68
vfio_ccw_handle_request(SubchDev * sch)69 static IOInstEnding vfio_ccw_handle_request(SubchDev *sch)
70 {
71 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
72 struct ccw_io_region *region = vcdev->io_region;
73 int ret;
74
75 if (!(sch->orb.ctrl0 & ORB_CTRL0_MASK_PFCH) && vcdev->force_orb_pfch) {
76 sch->orb.ctrl0 |= ORB_CTRL0_MASK_PFCH;
77 warn_report_once("vfio-ccw (devno %x.%x.%04x): PFCH flag forced",
78 sch->cssid, sch->ssid, sch->devno);
79 }
80
81 QEMU_BUILD_BUG_ON(sizeof(region->orb_area) != sizeof(ORB));
82 QEMU_BUILD_BUG_ON(sizeof(region->scsw_area) != sizeof(SCSW));
83 QEMU_BUILD_BUG_ON(sizeof(region->irb_area) != sizeof(IRB));
84
85 memset(region, 0, sizeof(*region));
86
87 memcpy(region->orb_area, &sch->orb, sizeof(ORB));
88 memcpy(region->scsw_area, &sch->curr_status.scsw, sizeof(SCSW));
89
90 again:
91 ret = pwrite(vcdev->vdev.fd, region,
92 vcdev->io_region_size, vcdev->io_region_offset);
93 if (ret != vcdev->io_region_size) {
94 if (errno == EAGAIN) {
95 goto again;
96 }
97 error_report("vfio-ccw: write I/O region failed with errno=%d", errno);
98 ret = errno ? -errno : -EFAULT;
99 } else {
100 ret = 0;
101 }
102 switch (ret) {
103 case 0:
104 return IOINST_CC_EXPECTED;
105 case -EBUSY:
106 return IOINST_CC_BUSY;
107 case -ENODEV:
108 case -EACCES:
109 return IOINST_CC_NOT_OPERATIONAL;
110 case -EFAULT:
111 default:
112 sch_gen_unit_exception(sch);
113 css_inject_io_interrupt(sch);
114 return IOINST_CC_EXPECTED;
115 }
116 }
117
vfio_ccw_handle_store(SubchDev * sch)118 static IOInstEnding vfio_ccw_handle_store(SubchDev *sch)
119 {
120 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
121 SCHIB *schib = &sch->curr_status;
122 struct ccw_schib_region *region = vcdev->schib_region;
123 SCHIB *s;
124 int ret;
125
126 /* schib region not available so nothing else to do */
127 if (!region) {
128 return IOINST_CC_EXPECTED;
129 }
130
131 memset(region, 0, sizeof(*region));
132 ret = pread(vcdev->vdev.fd, region, vcdev->schib_region_size,
133 vcdev->schib_region_offset);
134
135 if (ret == -1) {
136 /*
137 * Device is probably damaged, but store subchannel does not
138 * have a nonzero cc defined for this scenario. Log an error,
139 * and presume things are otherwise fine.
140 */
141 error_report("vfio-ccw: store region read failed with errno=%d", errno);
142 return IOINST_CC_EXPECTED;
143 }
144
145 /*
146 * Selectively copy path-related bits of the SCHIB,
147 * rather than copying the entire struct.
148 */
149 s = (SCHIB *)region->schib_area;
150 schib->pmcw.pnom = s->pmcw.pnom;
151 schib->pmcw.lpum = s->pmcw.lpum;
152 schib->pmcw.pam = s->pmcw.pam;
153 schib->pmcw.pom = s->pmcw.pom;
154
155 if (s->scsw.flags & SCSW_FLAGS_MASK_PNO) {
156 schib->scsw.flags |= SCSW_FLAGS_MASK_PNO;
157 }
158
159 return IOINST_CC_EXPECTED;
160 }
161
vfio_ccw_handle_clear(SubchDev * sch)162 static int vfio_ccw_handle_clear(SubchDev *sch)
163 {
164 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
165 struct ccw_cmd_region *region = vcdev->async_cmd_region;
166 int ret;
167
168 if (!vcdev->async_cmd_region) {
169 /* Async command region not available, fall back to emulation */
170 return -ENOSYS;
171 }
172
173 memset(region, 0, sizeof(*region));
174 region->command = VFIO_CCW_ASYNC_CMD_CSCH;
175
176 again:
177 ret = pwrite(vcdev->vdev.fd, region,
178 vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
179 if (ret != vcdev->async_cmd_region_size) {
180 if (errno == EAGAIN) {
181 goto again;
182 }
183 error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
184 ret = errno ? -errno : -EFAULT;
185 } else {
186 ret = 0;
187 }
188 switch (ret) {
189 case 0:
190 case -ENODEV:
191 case -EACCES:
192 return ret;
193 case -EFAULT:
194 default:
195 sch_gen_unit_exception(sch);
196 css_inject_io_interrupt(sch);
197 return 0;
198 }
199 }
200
vfio_ccw_handle_halt(SubchDev * sch)201 static int vfio_ccw_handle_halt(SubchDev *sch)
202 {
203 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
204 struct ccw_cmd_region *region = vcdev->async_cmd_region;
205 int ret;
206
207 if (!vcdev->async_cmd_region) {
208 /* Async command region not available, fall back to emulation */
209 return -ENOSYS;
210 }
211
212 memset(region, 0, sizeof(*region));
213 region->command = VFIO_CCW_ASYNC_CMD_HSCH;
214
215 again:
216 ret = pwrite(vcdev->vdev.fd, region,
217 vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
218 if (ret != vcdev->async_cmd_region_size) {
219 if (errno == EAGAIN) {
220 goto again;
221 }
222 error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
223 ret = errno ? -errno : -EFAULT;
224 } else {
225 ret = 0;
226 }
227 switch (ret) {
228 case 0:
229 case -EBUSY:
230 case -ENODEV:
231 case -EACCES:
232 return ret;
233 case -EFAULT:
234 default:
235 sch_gen_unit_exception(sch);
236 css_inject_io_interrupt(sch);
237 return 0;
238 }
239 }
240
vfio_ccw_reset(DeviceState * dev)241 static void vfio_ccw_reset(DeviceState *dev)
242 {
243 VFIOCCWDevice *vcdev = VFIO_CCW(dev);
244
245 ioctl(vcdev->vdev.fd, VFIO_DEVICE_RESET);
246 }
247
vfio_ccw_crw_read(VFIOCCWDevice * vcdev)248 static void vfio_ccw_crw_read(VFIOCCWDevice *vcdev)
249 {
250 struct ccw_crw_region *region = vcdev->crw_region;
251 CRW crw;
252 int size;
253
254 /* Keep reading CRWs as long as data is returned */
255 do {
256 memset(region, 0, sizeof(*region));
257 size = pread(vcdev->vdev.fd, region, vcdev->crw_region_size,
258 vcdev->crw_region_offset);
259
260 if (size == -1) {
261 error_report("vfio-ccw: Read crw region failed with errno=%d",
262 errno);
263 break;
264 }
265
266 if (region->crw == 0) {
267 /* No more CRWs to queue */
268 break;
269 }
270
271 memcpy(&crw, ®ion->crw, sizeof(CRW));
272
273 css_crw_add_to_queue(crw);
274 } while (1);
275 }
276
vfio_ccw_req_notifier_handler(void * opaque)277 static void vfio_ccw_req_notifier_handler(void *opaque)
278 {
279 VFIOCCWDevice *vcdev = opaque;
280 Error *err = NULL;
281
282 if (!event_notifier_test_and_clear(&vcdev->req_notifier)) {
283 return;
284 }
285
286 qdev_unplug(DEVICE(vcdev), &err);
287 if (err) {
288 warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
289 }
290 }
291
vfio_ccw_crw_notifier_handler(void * opaque)292 static void vfio_ccw_crw_notifier_handler(void *opaque)
293 {
294 VFIOCCWDevice *vcdev = opaque;
295
296 while (event_notifier_test_and_clear(&vcdev->crw_notifier)) {
297 vfio_ccw_crw_read(vcdev);
298 }
299 }
300
vfio_ccw_io_notifier_handler(void * opaque)301 static void vfio_ccw_io_notifier_handler(void *opaque)
302 {
303 VFIOCCWDevice *vcdev = opaque;
304 struct ccw_io_region *region = vcdev->io_region;
305 CcwDevice *ccw_dev = CCW_DEVICE(vcdev);
306 SubchDev *sch = ccw_dev->sch;
307 SCHIB *schib = &sch->curr_status;
308 SCSW s;
309 IRB irb;
310 ESW esw;
311 int size;
312
313 if (!event_notifier_test_and_clear(&vcdev->io_notifier)) {
314 return;
315 }
316
317 size = pread(vcdev->vdev.fd, region, vcdev->io_region_size,
318 vcdev->io_region_offset);
319 if (size == -1) {
320 switch (errno) {
321 case ENODEV:
322 /* Generate a deferred cc 3 condition. */
323 schib->scsw.flags |= SCSW_FLAGS_MASK_CC;
324 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
325 schib->scsw.ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
326 goto read_err;
327 case EFAULT:
328 /* Memory problem, generate channel data check. */
329 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
330 schib->scsw.cstat = SCSW_CSTAT_DATA_CHECK;
331 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
332 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
333 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
334 goto read_err;
335 default:
336 /* Error, generate channel program check. */
337 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
338 schib->scsw.cstat = SCSW_CSTAT_PROG_CHECK;
339 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
340 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
341 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
342 goto read_err;
343 }
344 } else if (size != vcdev->io_region_size) {
345 /* Information transfer error, generate channel-control check. */
346 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
347 schib->scsw.cstat = SCSW_CSTAT_CHN_CTRL_CHK;
348 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
349 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
350 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
351 goto read_err;
352 }
353
354 memcpy(&irb, region->irb_area, sizeof(IRB));
355
356 /* Update control block via irb. */
357 s = schib->scsw;
358 copy_scsw_to_guest(&s, &irb.scsw);
359 schib->scsw = s;
360
361 copy_esw_to_guest(&esw, &irb.esw);
362 sch->esw = esw;
363
364 /* If a uint check is pending, copy sense data. */
365 if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) &&
366 (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) {
367 memcpy(sch->sense_data, irb.ecw, sizeof(irb.ecw));
368 }
369
370 read_err:
371 css_inject_io_interrupt(sch);
372 }
373
vfio_ccw_register_irq_notifier(VFIOCCWDevice * vcdev,unsigned int irq,Error ** errp)374 static bool vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev,
375 unsigned int irq,
376 Error **errp)
377 {
378 VFIODevice *vdev = &vcdev->vdev;
379 struct vfio_irq_info irq_info;
380 int ret;
381 int fd;
382 EventNotifier *notifier;
383 IOHandler *fd_read;
384
385 switch (irq) {
386 case VFIO_CCW_IO_IRQ_INDEX:
387 notifier = &vcdev->io_notifier;
388 fd_read = vfio_ccw_io_notifier_handler;
389 break;
390 case VFIO_CCW_CRW_IRQ_INDEX:
391 notifier = &vcdev->crw_notifier;
392 fd_read = vfio_ccw_crw_notifier_handler;
393 break;
394 case VFIO_CCW_REQ_IRQ_INDEX:
395 notifier = &vcdev->req_notifier;
396 fd_read = vfio_ccw_req_notifier_handler;
397 break;
398 default:
399 error_setg(errp, "vfio: Unsupported device irq(%d)", irq);
400 return false;
401 }
402
403 if (vdev->num_irqs < irq + 1) {
404 error_setg(errp, "vfio: IRQ %u not available (number of irqs %u)",
405 irq, vdev->num_irqs);
406 return false;
407 }
408
409 ret = vfio_device_get_irq_info(vdev, irq, &irq_info);
410
411 if (ret < 0) {
412 error_setg_errno(errp, -ret, "vfio: Error getting irq info");
413 return false;
414 }
415
416 if (irq_info.count < 1) {
417 error_setg(errp, "vfio: Error getting irq info, count=0");
418 return false;
419 }
420
421 if (event_notifier_init(notifier, 0)) {
422 error_setg_errno(errp, errno,
423 "vfio: Unable to init event notifier for irq (%d)",
424 irq);
425 return false;
426 }
427
428 fd = event_notifier_get_fd(notifier);
429 qemu_set_fd_handler(fd, fd_read, NULL, vcdev);
430
431 if (!vfio_device_irq_set_signaling(vdev, irq, 0,
432 VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
433 qemu_set_fd_handler(fd, NULL, NULL, vcdev);
434 event_notifier_cleanup(notifier);
435 }
436
437 return true;
438 }
439
vfio_ccw_unregister_irq_notifier(VFIOCCWDevice * vcdev,unsigned int irq)440 static void vfio_ccw_unregister_irq_notifier(VFIOCCWDevice *vcdev,
441 unsigned int irq)
442 {
443 Error *err = NULL;
444 EventNotifier *notifier;
445
446 switch (irq) {
447 case VFIO_CCW_IO_IRQ_INDEX:
448 notifier = &vcdev->io_notifier;
449 break;
450 case VFIO_CCW_CRW_IRQ_INDEX:
451 notifier = &vcdev->crw_notifier;
452 break;
453 case VFIO_CCW_REQ_IRQ_INDEX:
454 notifier = &vcdev->req_notifier;
455 break;
456 default:
457 error_report("vfio: Unsupported device irq(%d)", irq);
458 return;
459 }
460
461 if (!vfio_device_irq_set_signaling(&vcdev->vdev, irq, 0,
462 VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
463 warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
464 }
465
466 qemu_set_fd_handler(event_notifier_get_fd(notifier),
467 NULL, NULL, vcdev);
468 event_notifier_cleanup(notifier);
469 }
470
vfio_ccw_get_region(VFIOCCWDevice * vcdev,Error ** errp)471 static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
472 {
473 VFIODevice *vdev = &vcdev->vdev;
474 struct vfio_region_info *info;
475 int ret;
476
477 /* Sanity check device */
478 if (!(vdev->flags & VFIO_DEVICE_FLAGS_CCW)) {
479 error_setg(errp, "vfio: Um, this isn't a vfio-ccw device");
480 return false;
481 }
482
483 /*
484 * We always expect at least the I/O region to be present. We also
485 * may have a variable number of regions governed by capabilities.
486 */
487 if (vdev->num_regions < VFIO_CCW_CONFIG_REGION_INDEX + 1) {
488 error_setg(errp, "vfio: too few regions (%u), expected at least %u",
489 vdev->num_regions, VFIO_CCW_CONFIG_REGION_INDEX + 1);
490 return false;
491 }
492
493 ret = vfio_device_get_region_info(vdev, VFIO_CCW_CONFIG_REGION_INDEX, &info);
494 if (ret) {
495 error_setg_errno(errp, -ret, "vfio: Error getting config info");
496 return false;
497 }
498
499 vcdev->io_region_size = info->size;
500 if (sizeof(*vcdev->io_region) != vcdev->io_region_size) {
501 error_setg(errp, "vfio: Unexpected size of the I/O region");
502 goto out_err;
503 }
504
505 vcdev->io_region_offset = info->offset;
506 vcdev->io_region = g_malloc0(info->size);
507
508 /* check for the optional async command region */
509 ret = vfio_device_get_region_info_type(vdev, VFIO_REGION_TYPE_CCW,
510 VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, &info);
511 if (!ret) {
512 vcdev->async_cmd_region_size = info->size;
513 if (sizeof(*vcdev->async_cmd_region) != vcdev->async_cmd_region_size) {
514 error_setg(errp, "vfio: Unexpected size of the async cmd region");
515 goto out_err;
516 }
517 vcdev->async_cmd_region_offset = info->offset;
518 vcdev->async_cmd_region = g_malloc0(info->size);
519 }
520
521 ret = vfio_device_get_region_info_type(vdev, VFIO_REGION_TYPE_CCW,
522 VFIO_REGION_SUBTYPE_CCW_SCHIB, &info);
523 if (!ret) {
524 vcdev->schib_region_size = info->size;
525 if (sizeof(*vcdev->schib_region) != vcdev->schib_region_size) {
526 error_setg(errp, "vfio: Unexpected size of the schib region");
527 goto out_err;
528 }
529 vcdev->schib_region_offset = info->offset;
530 vcdev->schib_region = g_malloc(info->size);
531 }
532
533 ret = vfio_device_get_region_info_type(vdev, VFIO_REGION_TYPE_CCW,
534 VFIO_REGION_SUBTYPE_CCW_CRW, &info);
535
536 if (!ret) {
537 vcdev->crw_region_size = info->size;
538 if (sizeof(*vcdev->crw_region) != vcdev->crw_region_size) {
539 error_setg(errp, "vfio: Unexpected size of the CRW region");
540 goto out_err;
541 }
542 vcdev->crw_region_offset = info->offset;
543 vcdev->crw_region = g_malloc(info->size);
544 }
545
546 return true;
547
548 out_err:
549 g_free(vcdev->crw_region);
550 g_free(vcdev->schib_region);
551 g_free(vcdev->async_cmd_region);
552 g_free(vcdev->io_region);
553 return false;
554 }
555
vfio_ccw_put_region(VFIOCCWDevice * vcdev)556 static void vfio_ccw_put_region(VFIOCCWDevice *vcdev)
557 {
558 g_free(vcdev->crw_region);
559 g_free(vcdev->schib_region);
560 g_free(vcdev->async_cmd_region);
561 g_free(vcdev->io_region);
562 }
563
vfio_ccw_realize(DeviceState * dev,Error ** errp)564 static void vfio_ccw_realize(DeviceState *dev, Error **errp)
565 {
566 S390CCWDevice *cdev = S390_CCW_DEVICE(dev);
567 VFIOCCWDevice *vcdev = VFIO_CCW(cdev);
568 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
569 VFIODevice *vbasedev = &vcdev->vdev;
570 Error *err = NULL;
571
572 /* Call the class init function for subchannel. */
573 if (cdc->realize) {
574 if (!cdc->realize(cdev, vcdev->vdev.sysfsdev, errp)) {
575 return;
576 }
577 }
578
579 if (!vfio_device_get_name(vbasedev, errp)) {
580 goto out_unrealize;
581 }
582
583 if (!vfio_device_attach(cdev->mdevid, vbasedev,
584 &address_space_memory, errp)) {
585 goto out_attach_dev_err;
586 }
587
588 if (!vfio_ccw_get_region(vcdev, errp)) {
589 goto out_region_err;
590 }
591
592 if (!vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX, errp)) {
593 goto out_io_notifier_err;
594 }
595
596 if (vcdev->crw_region) {
597 if (!vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX,
598 errp)) {
599 goto out_irq_notifier_err;
600 }
601 }
602
603 if (!vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX, &err)) {
604 /*
605 * Report this error, but do not make it a failing condition.
606 * Lack of this IRQ in the host does not prevent normal operation.
607 */
608 warn_report_err(err);
609 }
610
611 return;
612
613 out_irq_notifier_err:
614 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX);
615 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
616 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
617 out_io_notifier_err:
618 vfio_ccw_put_region(vcdev);
619 out_region_err:
620 vfio_device_detach(vbasedev);
621 out_attach_dev_err:
622 g_free(vbasedev->name);
623 out_unrealize:
624 if (cdc->unrealize) {
625 cdc->unrealize(cdev);
626 }
627 }
628
vfio_ccw_unrealize(DeviceState * dev)629 static void vfio_ccw_unrealize(DeviceState *dev)
630 {
631 S390CCWDevice *cdev = S390_CCW_DEVICE(dev);
632 VFIOCCWDevice *vcdev = VFIO_CCW(cdev);
633 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
634
635 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX);
636 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
637 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
638 vfio_ccw_put_region(vcdev);
639 vfio_device_detach(&vcdev->vdev);
640 g_free(vcdev->vdev.name);
641
642 if (cdc->unrealize) {
643 cdc->unrealize(cdev);
644 }
645 }
646
647 static const Property vfio_ccw_properties[] = {
648 DEFINE_PROP_STRING("sysfsdev", VFIOCCWDevice, vdev.sysfsdev),
649 DEFINE_PROP_BOOL("force-orb-pfch", VFIOCCWDevice, force_orb_pfch, false),
650 #ifdef CONFIG_IOMMUFD
651 DEFINE_PROP_LINK("iommufd", VFIOCCWDevice, vdev.iommufd,
652 TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
653 #endif
654 DEFINE_PROP_CCW_LOADPARM("loadparm", CcwDevice, loadparm),
655 };
656
657 static const VMStateDescription vfio_ccw_vmstate = {
658 .name = "vfio-ccw",
659 .unmigratable = 1,
660 };
661
vfio_ccw_instance_init(Object * obj)662 static void vfio_ccw_instance_init(Object *obj)
663 {
664 VFIOCCWDevice *vcdev = VFIO_CCW(obj);
665 VFIODevice *vbasedev = &vcdev->vdev;
666
667 /* CCW device is mdev type device */
668 vbasedev->mdev = true;
669
670 /*
671 * All vfio-ccw devices are believed to operate in a way compatible with
672 * discarding of memory in RAM blocks, ie. pages pinned in the host are
673 * in the current working set of the guest driver and therefore never
674 * overlap e.g., with pages available to the guest balloon driver. This
675 * needs to be set before vfio_get_device() for vfio common to handle
676 * ram_block_discard_disable().
677 */
678 vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_CCW, &vfio_ccw_ops,
679 DEVICE(vcdev), true);
680 }
681
682 #ifdef CONFIG_IOMMUFD
vfio_ccw_set_fd(Object * obj,const char * str,Error ** errp)683 static void vfio_ccw_set_fd(Object *obj, const char *str, Error **errp)
684 {
685 vfio_device_set_fd(&VFIO_CCW(obj)->vdev, str, errp);
686 }
687 #endif
688
vfio_ccw_class_init(ObjectClass * klass,const void * data)689 static void vfio_ccw_class_init(ObjectClass *klass, const void *data)
690 {
691 DeviceClass *dc = DEVICE_CLASS(klass);
692 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass);
693
694 device_class_set_props(dc, vfio_ccw_properties);
695 #ifdef CONFIG_IOMMUFD
696 object_class_property_add_str(klass, "fd", NULL, vfio_ccw_set_fd);
697 #endif
698 dc->vmsd = &vfio_ccw_vmstate;
699 dc->desc = "VFIO-based subchannel assignment";
700 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
701 dc->realize = vfio_ccw_realize;
702 dc->unrealize = vfio_ccw_unrealize;
703 device_class_set_legacy_reset(dc, vfio_ccw_reset);
704
705 cdc->handle_request = vfio_ccw_handle_request;
706 cdc->handle_halt = vfio_ccw_handle_halt;
707 cdc->handle_clear = vfio_ccw_handle_clear;
708 cdc->handle_store = vfio_ccw_handle_store;
709
710 object_class_property_set_description(klass, /* 2.10 */
711 "sysfsdev",
712 "Host sysfs path of assigned device");
713 object_class_property_set_description(klass, /* 3.0 */
714 "force-orb-pfch",
715 "Force unlimited prefetch");
716 #ifdef CONFIG_IOMMUFD
717 object_class_property_set_description(klass, /* 9.0 */
718 "iommufd",
719 "Set host IOMMUFD backend device");
720 #endif
721 object_class_property_set_description(klass, /* 9.2 */
722 "loadparm",
723 "Define which devices that can be used for booting");
724 }
725
726 static const TypeInfo vfio_ccw_info = {
727 .name = TYPE_VFIO_CCW,
728 .parent = TYPE_S390_CCW,
729 .instance_size = sizeof(VFIOCCWDevice),
730 .instance_init = vfio_ccw_instance_init,
731 .class_init = vfio_ccw_class_init,
732 };
733
register_vfio_ccw_type(void)734 static void register_vfio_ccw_type(void)
735 {
736 type_register_static(&vfio_ccw_info);
737 }
738
739 type_init(register_vfio_ccw_type)
740