1 /*
2 * vhost-user GPU Device
3 *
4 * Copyright Red Hat, Inc. 2018
5 *
6 * Authors:
7 * Marc-André Lureau <marcandre.lureau@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 */
12
13 #include "qemu/osdep.h"
14 #include "qemu/error-report.h"
15 #include "qemu/sockets.h"
16 #include "hw/qdev-properties.h"
17 #include "hw/virtio/virtio-gpu.h"
18 #include "chardev/char-fe.h"
19 #include "qapi/error.h"
20 #include "migration/blocker.h"
21 #include "standard-headers/drm/drm_fourcc.h"
22
23 typedef enum VhostUserGpuRequest {
24 VHOST_USER_GPU_NONE = 0,
25 VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
26 VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
27 VHOST_USER_GPU_GET_DISPLAY_INFO,
28 VHOST_USER_GPU_CURSOR_POS,
29 VHOST_USER_GPU_CURSOR_POS_HIDE,
30 VHOST_USER_GPU_CURSOR_UPDATE,
31 VHOST_USER_GPU_SCANOUT,
32 VHOST_USER_GPU_UPDATE,
33 VHOST_USER_GPU_DMABUF_SCANOUT,
34 VHOST_USER_GPU_DMABUF_UPDATE,
35 VHOST_USER_GPU_GET_EDID,
36 VHOST_USER_GPU_DMABUF_SCANOUT2,
37 } VhostUserGpuRequest;
38
39 typedef struct VhostUserGpuDisplayInfoReply {
40 struct virtio_gpu_resp_display_info info;
41 } VhostUserGpuDisplayInfoReply;
42
43 typedef struct VhostUserGpuCursorPos {
44 uint32_t scanout_id;
45 uint32_t x;
46 uint32_t y;
47 } QEMU_PACKED VhostUserGpuCursorPos;
48
49 typedef struct VhostUserGpuCursorUpdate {
50 VhostUserGpuCursorPos pos;
51 uint32_t hot_x;
52 uint32_t hot_y;
53 uint32_t data[64 * 64];
54 } QEMU_PACKED VhostUserGpuCursorUpdate;
55
56 typedef struct VhostUserGpuScanout {
57 uint32_t scanout_id;
58 uint32_t width;
59 uint32_t height;
60 } QEMU_PACKED VhostUserGpuScanout;
61
62 typedef struct VhostUserGpuUpdate {
63 uint32_t scanout_id;
64 uint32_t x;
65 uint32_t y;
66 uint32_t width;
67 uint32_t height;
68 uint8_t data[];
69 } QEMU_PACKED VhostUserGpuUpdate;
70
71 typedef struct VhostUserGpuDMABUFScanout {
72 uint32_t scanout_id;
73 uint32_t x;
74 uint32_t y;
75 uint32_t width;
76 uint32_t height;
77 uint32_t fd_width;
78 uint32_t fd_height;
79 uint32_t fd_stride;
80 uint32_t fd_flags;
81 int fd_drm_fourcc;
82 } QEMU_PACKED VhostUserGpuDMABUFScanout;
83
84 typedef struct VhostUserGpuDMABUFScanout2 {
85 struct VhostUserGpuDMABUFScanout dmabuf_scanout;
86 uint64_t modifier;
87 } QEMU_PACKED VhostUserGpuDMABUFScanout2;
88
89 typedef struct VhostUserGpuEdidRequest {
90 uint32_t scanout_id;
91 } QEMU_PACKED VhostUserGpuEdidRequest;
92
93 typedef struct VhostUserGpuMsg {
94 uint32_t request; /* VhostUserGpuRequest */
95 uint32_t flags;
96 uint32_t size; /* the following payload size */
97 union {
98 VhostUserGpuCursorPos cursor_pos;
99 VhostUserGpuCursorUpdate cursor_update;
100 VhostUserGpuScanout scanout;
101 VhostUserGpuUpdate update;
102 VhostUserGpuDMABUFScanout dmabuf_scanout;
103 VhostUserGpuDMABUFScanout2 dmabuf_scanout2;
104 VhostUserGpuEdidRequest edid_req;
105 struct virtio_gpu_resp_edid resp_edid;
106 struct virtio_gpu_resp_display_info display_info;
107 uint64_t u64;
108 } payload;
109 } QEMU_PACKED VhostUserGpuMsg;
110
111 static VhostUserGpuMsg m __attribute__ ((unused));
112 #define VHOST_USER_GPU_HDR_SIZE \
113 (sizeof(m.request) + sizeof(m.size) + sizeof(m.flags))
114
115 #define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
116
117 #define VHOST_USER_GPU_PROTOCOL_F_EDID 0
118 #define VHOST_USER_GPU_PROTOCOL_F_DMABUF2 1
119
120 static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked);
121
122 static void
vhost_user_gpu_handle_cursor(VhostUserGPU * g,VhostUserGpuMsg * msg)123 vhost_user_gpu_handle_cursor(VhostUserGPU *g, VhostUserGpuMsg *msg)
124 {
125 VhostUserGpuCursorPos *pos = &msg->payload.cursor_pos;
126 struct virtio_gpu_scanout *s;
127
128 if (pos->scanout_id >= g->parent_obj.conf.max_outputs) {
129 return;
130 }
131 s = &g->parent_obj.scanout[pos->scanout_id];
132
133 if (msg->request == VHOST_USER_GPU_CURSOR_UPDATE) {
134 VhostUserGpuCursorUpdate *up = &msg->payload.cursor_update;
135 if (!s->current_cursor) {
136 s->current_cursor = cursor_alloc(64, 64);
137 }
138
139 s->current_cursor->hot_x = up->hot_x;
140 s->current_cursor->hot_y = up->hot_y;
141
142 memcpy(s->current_cursor->data, up->data,
143 64 * 64 * sizeof(uint32_t));
144
145 dpy_cursor_define(s->con, s->current_cursor);
146 }
147
148 dpy_mouse_set(s->con, pos->x, pos->y,
149 msg->request != VHOST_USER_GPU_CURSOR_POS_HIDE);
150 }
151
152 static void
vhost_user_gpu_send_msg(VhostUserGPU * g,const VhostUserGpuMsg * msg)153 vhost_user_gpu_send_msg(VhostUserGPU *g, const VhostUserGpuMsg *msg)
154 {
155 qemu_chr_fe_write(&g->vhost_chr, (uint8_t *)msg,
156 VHOST_USER_GPU_HDR_SIZE + msg->size);
157 }
158
159 static void
vhost_user_gpu_unblock(VhostUserGPU * g)160 vhost_user_gpu_unblock(VhostUserGPU *g)
161 {
162 VhostUserGpuMsg msg = {
163 .request = VHOST_USER_GPU_DMABUF_UPDATE,
164 .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
165 };
166
167 vhost_user_gpu_send_msg(g, &msg);
168 }
169
170 static void
vhost_user_gpu_handle_display(VhostUserGPU * g,VhostUserGpuMsg * msg)171 vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
172 {
173 QemuConsole *con = NULL;
174 struct virtio_gpu_scanout *s;
175
176 switch (msg->request) {
177 case VHOST_USER_GPU_GET_PROTOCOL_FEATURES: {
178 VhostUserGpuMsg reply = {
179 .request = msg->request,
180 .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
181 .size = sizeof(uint64_t),
182 .payload = {
183 .u64 = (1 << VHOST_USER_GPU_PROTOCOL_F_EDID) |
184 (1 << VHOST_USER_GPU_PROTOCOL_F_DMABUF2)
185 }
186 };
187
188 vhost_user_gpu_send_msg(g, &reply);
189 break;
190 }
191 case VHOST_USER_GPU_SET_PROTOCOL_FEATURES: {
192 break;
193 }
194 case VHOST_USER_GPU_GET_DISPLAY_INFO: {
195 struct virtio_gpu_resp_display_info display_info = { {} };
196 VhostUserGpuMsg reply = {
197 .request = msg->request,
198 .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
199 .size = sizeof(struct virtio_gpu_resp_display_info),
200 };
201
202 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
203 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
204 memcpy(&reply.payload.display_info, &display_info,
205 sizeof(display_info));
206 vhost_user_gpu_send_msg(g, &reply);
207 break;
208 }
209 case VHOST_USER_GPU_GET_EDID: {
210 VhostUserGpuEdidRequest *m = &msg->payload.edid_req;
211 struct virtio_gpu_resp_edid resp = { {} };
212 VhostUserGpuMsg reply = {
213 .request = msg->request,
214 .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
215 .size = sizeof(reply.payload.resp_edid),
216 };
217
218 if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
219 error_report("invalid scanout: %d", m->scanout_id);
220 break;
221 }
222
223 resp.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
224 virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), m->scanout_id, &resp);
225 memcpy(&reply.payload.resp_edid, &resp, sizeof(resp));
226 vhost_user_gpu_send_msg(g, &reply);
227 break;
228 }
229 case VHOST_USER_GPU_SCANOUT: {
230 VhostUserGpuScanout *m = &msg->payload.scanout;
231
232 if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
233 return;
234 }
235
236 g->parent_obj.enable = 1;
237 s = &g->parent_obj.scanout[m->scanout_id];
238 con = s->con;
239
240 if (m->width == 0) {
241 dpy_gfx_replace_surface(con, NULL);
242 } else {
243 s->ds = qemu_create_displaysurface(m->width, m->height);
244 /* replace surface on next update */
245 }
246
247 break;
248 }
249 case VHOST_USER_GPU_DMABUF_SCANOUT2:
250 case VHOST_USER_GPU_DMABUF_SCANOUT: {
251 VhostUserGpuDMABUFScanout *m = &msg->payload.dmabuf_scanout;
252 int fd = qemu_chr_fe_get_msgfd(&g->vhost_chr);
253 uint32_t offset = 0;
254 uint32_t stride = m->fd_stride;
255 uint64_t modifier = DRM_FORMAT_MOD_INVALID;
256 QemuDmaBuf *dmabuf;
257
258 if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
259 error_report("invalid scanout: %d", m->scanout_id);
260 if (fd >= 0) {
261 close(fd);
262 }
263 break;
264 }
265
266 g->parent_obj.enable = 1;
267 con = g->parent_obj.scanout[m->scanout_id].con;
268 dmabuf = g->dmabuf[m->scanout_id];
269
270 if (dmabuf) {
271 qemu_dmabuf_close(dmabuf);
272 dpy_gl_release_dmabuf(con, dmabuf);
273 g_clear_pointer(&dmabuf, qemu_dmabuf_free);
274 }
275
276 if (fd == -1) {
277 dpy_gl_scanout_disable(con);
278 g->dmabuf[m->scanout_id] = NULL;
279 break;
280 }
281
282 if (msg->request == VHOST_USER_GPU_DMABUF_SCANOUT2) {
283 VhostUserGpuDMABUFScanout2 *m2 = &msg->payload.dmabuf_scanout2;
284 modifier = m2->modifier;
285 }
286
287 dmabuf = qemu_dmabuf_new(m->width, m->height,
288 &offset, &stride, 0, 0,
289 m->fd_width, m->fd_height,
290 m->fd_drm_fourcc, modifier,
291 &fd, 1, false, m->fd_flags &
292 VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP);
293
294 dpy_gl_scanout_dmabuf(con, dmabuf);
295 g->dmabuf[m->scanout_id] = dmabuf;
296 break;
297 }
298 case VHOST_USER_GPU_DMABUF_UPDATE: {
299 VhostUserGpuUpdate *m = &msg->payload.update;
300
301 if (m->scanout_id >= g->parent_obj.conf.max_outputs ||
302 !g->parent_obj.scanout[m->scanout_id].con) {
303 error_report("invalid scanout update: %d", m->scanout_id);
304 vhost_user_gpu_unblock(g);
305 break;
306 }
307
308 con = g->parent_obj.scanout[m->scanout_id].con;
309 if (!console_has_gl(con)) {
310 error_report("console doesn't support GL!");
311 vhost_user_gpu_unblock(g);
312 break;
313 }
314 g->backend_blocked = true;
315 dpy_gl_update(con, m->x, m->y, m->width, m->height);
316 break;
317 }
318 #ifdef CONFIG_PIXMAN
319 case VHOST_USER_GPU_UPDATE: {
320 VhostUserGpuUpdate *m = &msg->payload.update;
321
322 if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
323 break;
324 }
325 s = &g->parent_obj.scanout[m->scanout_id];
326 con = s->con;
327 pixman_image_t *image =
328 pixman_image_create_bits(PIXMAN_x8r8g8b8,
329 m->width,
330 m->height,
331 (uint32_t *)m->data,
332 m->width * 4);
333
334 pixman_image_composite(PIXMAN_OP_SRC,
335 image, NULL, s->ds->image,
336 0, 0, 0, 0, m->x, m->y, m->width, m->height);
337
338 pixman_image_unref(image);
339 if (qemu_console_surface(con) != s->ds) {
340 dpy_gfx_replace_surface(con, s->ds);
341 } else {
342 dpy_gfx_update(con, m->x, m->y, m->width, m->height);
343 }
344 break;
345 }
346 #endif
347 default:
348 g_warning("unhandled message %d %d", msg->request, msg->size);
349 }
350
351 if (con && qemu_console_is_gl_blocked(con)) {
352 vhost_user_gpu_update_blocked(g, true);
353 }
354 }
355
356 static void
vhost_user_gpu_chr_read(void * opaque)357 vhost_user_gpu_chr_read(void *opaque)
358 {
359 VhostUserGPU *g = opaque;
360 VhostUserGpuMsg *msg = NULL;
361 VhostUserGpuRequest request;
362 uint32_t size, flags;
363 int r;
364
365 r = qemu_chr_fe_read_all(&g->vhost_chr,
366 (uint8_t *)&request, sizeof(uint32_t));
367 if (r != sizeof(uint32_t)) {
368 error_report("failed to read msg header: %d, %d", r, errno);
369 goto end;
370 }
371
372 r = qemu_chr_fe_read_all(&g->vhost_chr,
373 (uint8_t *)&flags, sizeof(uint32_t));
374 if (r != sizeof(uint32_t)) {
375 error_report("failed to read msg flags");
376 goto end;
377 }
378
379 r = qemu_chr_fe_read_all(&g->vhost_chr,
380 (uint8_t *)&size, sizeof(uint32_t));
381 if (r != sizeof(uint32_t)) {
382 error_report("failed to read msg size");
383 goto end;
384 }
385
386 msg = g_malloc(VHOST_USER_GPU_HDR_SIZE + size);
387
388 r = qemu_chr_fe_read_all(&g->vhost_chr,
389 (uint8_t *)&msg->payload, size);
390 if (r != size) {
391 error_report("failed to read msg payload %d != %d", r, size);
392 goto end;
393 }
394
395 msg->request = request;
396 msg->flags = flags;
397 msg->size = size;
398
399 if (request == VHOST_USER_GPU_CURSOR_UPDATE ||
400 request == VHOST_USER_GPU_CURSOR_POS ||
401 request == VHOST_USER_GPU_CURSOR_POS_HIDE) {
402 vhost_user_gpu_handle_cursor(g, msg);
403 } else {
404 vhost_user_gpu_handle_display(g, msg);
405 }
406
407 end:
408 g_free(msg);
409 }
410
411 static void
vhost_user_gpu_update_blocked(VhostUserGPU * g,bool blocked)412 vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked)
413 {
414 qemu_set_fd_handler(g->vhost_gpu_fd,
415 blocked ? NULL : vhost_user_gpu_chr_read, NULL, g);
416 }
417
418 static void
vhost_user_gpu_gl_flushed(VirtIOGPUBase * b)419 vhost_user_gpu_gl_flushed(VirtIOGPUBase *b)
420 {
421 VhostUserGPU *g = VHOST_USER_GPU(b);
422
423 if (g->backend_blocked) {
424 vhost_user_gpu_unblock(g);
425 g->backend_blocked = false;
426 }
427
428 vhost_user_gpu_update_blocked(g, false);
429 }
430
431 static bool
vhost_user_gpu_do_set_socket(VhostUserGPU * g,Error ** errp)432 vhost_user_gpu_do_set_socket(VhostUserGPU *g, Error **errp)
433 {
434 Chardev *chr;
435 int sv[2];
436
437 if (qemu_socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
438 error_setg_errno(errp, errno, "socketpair() failed");
439 return false;
440 }
441
442 chr = CHARDEV(object_new(TYPE_CHARDEV_SOCKET));
443 if (!chr || qemu_chr_add_client(chr, sv[0]) == -1) {
444 error_setg(errp, "Failed to make socket chardev");
445 goto err;
446 }
447 if (!qemu_chr_fe_init(&g->vhost_chr, chr, errp)) {
448 goto err;
449 }
450 if (vhost_user_gpu_set_socket(&g->vhost->dev, sv[1]) < 0) {
451 error_setg(errp, "Failed to set vhost-user-gpu socket");
452 qemu_chr_fe_deinit(&g->vhost_chr, false);
453 goto err;
454 }
455
456 g->vhost_gpu_fd = sv[0];
457 vhost_user_gpu_update_blocked(g, false);
458 close(sv[1]);
459 return true;
460
461 err:
462 close(sv[0]);
463 close(sv[1]);
464 if (chr) {
465 object_unref(OBJECT(chr));
466 }
467 return false;
468 }
469
470 static void
vhost_user_gpu_get_config(VirtIODevice * vdev,uint8_t * config_data)471 vhost_user_gpu_get_config(VirtIODevice *vdev, uint8_t *config_data)
472 {
473 VhostUserGPU *g = VHOST_USER_GPU(vdev);
474 VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
475 struct virtio_gpu_config *vgconfig =
476 (struct virtio_gpu_config *)config_data;
477 Error *local_err = NULL;
478 int ret;
479
480 memset(config_data, 0, sizeof(struct virtio_gpu_config));
481
482 ret = vhost_dev_get_config(&g->vhost->dev,
483 config_data, sizeof(struct virtio_gpu_config),
484 &local_err);
485 if (ret) {
486 error_report_err(local_err);
487 return;
488 }
489
490 /* those fields are managed by qemu */
491 vgconfig->num_scanouts = b->virtio_config.num_scanouts;
492 vgconfig->events_read = b->virtio_config.events_read;
493 vgconfig->events_clear = b->virtio_config.events_clear;
494 }
495
496 static void
vhost_user_gpu_set_config(VirtIODevice * vdev,const uint8_t * config_data)497 vhost_user_gpu_set_config(VirtIODevice *vdev,
498 const uint8_t *config_data)
499 {
500 VhostUserGPU *g = VHOST_USER_GPU(vdev);
501 VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
502 const struct virtio_gpu_config *vgconfig =
503 (const struct virtio_gpu_config *)config_data;
504 int ret;
505
506 if (vgconfig->events_clear) {
507 b->virtio_config.events_read &= ~vgconfig->events_clear;
508 }
509
510 ret = vhost_dev_set_config(&g->vhost->dev, config_data,
511 0, sizeof(struct virtio_gpu_config),
512 VHOST_SET_CONFIG_TYPE_FRONTEND);
513 if (ret) {
514 error_report("vhost-user-gpu: set device config space failed");
515 return;
516 }
517 }
518
519 static int
vhost_user_gpu_set_status(VirtIODevice * vdev,uint8_t val)520 vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val)
521 {
522 VhostUserGPU *g = VHOST_USER_GPU(vdev);
523 Error *err = NULL;
524
525 if (val & VIRTIO_CONFIG_S_DRIVER_OK && vdev->vm_running) {
526 if (!vhost_user_gpu_do_set_socket(g, &err)) {
527 error_report_err(err);
528 return 0;
529 }
530 vhost_user_backend_start(g->vhost);
531 } else {
532 int ret;
533
534 /* unblock any wait and stop processing */
535 if (g->vhost_gpu_fd != -1) {
536 vhost_user_gpu_update_blocked(g, true);
537 qemu_chr_fe_deinit(&g->vhost_chr, true);
538 g->vhost_gpu_fd = -1;
539 }
540 ret = vhost_user_backend_stop(g->vhost);
541 if (ret < 0) {
542 return ret;
543 }
544 }
545 return 0;
546 }
547
548 static bool
vhost_user_gpu_guest_notifier_pending(VirtIODevice * vdev,int idx)549 vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
550 {
551 VhostUserGPU *g = VHOST_USER_GPU(vdev);
552
553 /*
554 * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
555 * as the macro of configure interrupt's IDX, If this driver does not
556 * support, the function will return
557 */
558
559 if (idx == VIRTIO_CONFIG_IRQ_IDX) {
560 return false;
561 }
562 return vhost_virtqueue_pending(&g->vhost->dev, idx);
563 }
564
565 static void
vhost_user_gpu_guest_notifier_mask(VirtIODevice * vdev,int idx,bool mask)566 vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
567 {
568 VhostUserGPU *g = VHOST_USER_GPU(vdev);
569
570 /*
571 * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
572 * as the macro of configure interrupt's IDX, If this driver does not
573 * support, the function will return
574 */
575
576 if (idx == VIRTIO_CONFIG_IRQ_IDX) {
577 return;
578 }
579 vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
580 }
581
582 static void
vhost_user_gpu_instance_init(Object * obj)583 vhost_user_gpu_instance_init(Object *obj)
584 {
585 VhostUserGPU *g = VHOST_USER_GPU(obj);
586
587 g->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND));
588 object_property_add_alias(obj, "chardev",
589 OBJECT(g->vhost), "chardev");
590 }
591
592 static void
vhost_user_gpu_instance_finalize(Object * obj)593 vhost_user_gpu_instance_finalize(Object *obj)
594 {
595 VhostUserGPU *g = VHOST_USER_GPU(obj);
596
597 object_unref(OBJECT(g->vhost));
598 }
599
600 static void
vhost_user_gpu_reset(VirtIODevice * vdev)601 vhost_user_gpu_reset(VirtIODevice *vdev)
602 {
603 VhostUserGPU *g = VHOST_USER_GPU(vdev);
604
605 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
606
607 vhost_user_backend_stop(g->vhost);
608 }
609
610 static int
vhost_user_gpu_config_change(struct vhost_dev * dev)611 vhost_user_gpu_config_change(struct vhost_dev *dev)
612 {
613 error_report("vhost-user-gpu: unhandled backend config change");
614 return -1;
615 }
616
617 static const VhostDevConfigOps config_ops = {
618 .vhost_dev_config_notifier = vhost_user_gpu_config_change,
619 };
620
621 static void
vhost_user_gpu_device_realize(DeviceState * qdev,Error ** errp)622 vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp)
623 {
624 VhostUserGPU *g = VHOST_USER_GPU(qdev);
625 VirtIODevice *vdev = VIRTIO_DEVICE(g);
626
627 vhost_dev_set_config_notifier(&g->vhost->dev, &config_ops);
628 if (vhost_user_backend_dev_init(g->vhost, vdev, 2, errp) < 0) {
629 return;
630 }
631
632 /* existing backend may send DMABUF, so let's add that requirement */
633 g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_DMABUF_ENABLED;
634 if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_VIRGL)) {
635 g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED;
636 }
637 if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_EDID)) {
638 g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_EDID_ENABLED;
639 } else {
640 error_report("EDID requested but the backend doesn't support it.");
641 g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_EDID_ENABLED);
642 }
643 if (virtio_has_feature(g->vhost->dev.features,
644 VIRTIO_GPU_F_RESOURCE_UUID)) {
645 g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_RESOURCE_UUID_ENABLED;
646 }
647 if (virtio_has_feature(g->vhost->dev.features,
648 VIRTIO_GPU_F_RESOURCE_UUID)) {
649 g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_RESOURCE_UUID_ENABLED;
650 }
651
652 if (!virtio_gpu_base_device_realize(qdev, NULL, NULL, errp)) {
653 return;
654 }
655
656 g->vhost_gpu_fd = -1;
657 }
658
vhost_user_gpu_get_vhost(VirtIODevice * vdev)659 static struct vhost_dev *vhost_user_gpu_get_vhost(VirtIODevice *vdev)
660 {
661 VhostUserGPU *g = VHOST_USER_GPU(vdev);
662 return g->vhost ? &g->vhost->dev : NULL;
663 }
664
665 static const Property vhost_user_gpu_properties[] = {
666 VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf),
667 };
668
669 static void
vhost_user_gpu_class_init(ObjectClass * klass,const void * data)670 vhost_user_gpu_class_init(ObjectClass *klass, const void *data)
671 {
672 DeviceClass *dc = DEVICE_CLASS(klass);
673 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
674 VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
675
676 vgc->gl_flushed = vhost_user_gpu_gl_flushed;
677
678 vdc->realize = vhost_user_gpu_device_realize;
679 vdc->reset = vhost_user_gpu_reset;
680 vdc->set_status = vhost_user_gpu_set_status;
681 vdc->guest_notifier_mask = vhost_user_gpu_guest_notifier_mask;
682 vdc->guest_notifier_pending = vhost_user_gpu_guest_notifier_pending;
683 vdc->get_config = vhost_user_gpu_get_config;
684 vdc->set_config = vhost_user_gpu_set_config;
685 vdc->get_vhost = vhost_user_gpu_get_vhost;
686
687 device_class_set_props(dc, vhost_user_gpu_properties);
688 }
689
690 static const TypeInfo vhost_user_gpu_info = {
691 .name = TYPE_VHOST_USER_GPU,
692 .parent = TYPE_VIRTIO_GPU_BASE,
693 .instance_size = sizeof(VhostUserGPU),
694 .instance_init = vhost_user_gpu_instance_init,
695 .instance_finalize = vhost_user_gpu_instance_finalize,
696 .class_init = vhost_user_gpu_class_init,
697 };
698 module_obj(TYPE_VHOST_USER_GPU);
699 module_kconfig(VHOST_USER_GPU);
700
vhost_user_gpu_register_types(void)701 static void vhost_user_gpu_register_types(void)
702 {
703 type_register_static(&vhost_user_gpu_info);
704 }
705
706 type_init(vhost_user_gpu_register_types)
707