1 /* 2 * Virtio vhost-user GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2018 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * Marc-André Lureau <marcandre.lureau@redhat.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2 or later. 12 * See the COPYING file in the top-level directory. 13 */ 14 #include "qemu/osdep.h" 15 #include "qemu/drm.h" 16 #include "qapi/error.h" 17 #include "qemu/sockets.h" 18 19 #include <pixman.h> 20 #include <glib-unix.h> 21 22 #include "vugpu.h" 23 #include "hw/virtio/virtio-gpu-bswap.h" 24 #include "hw/virtio/virtio-gpu-pixman.h" 25 #include "virgl.h" 26 #include "vugbm.h" 27 28 enum { 29 VHOST_USER_GPU_MAX_QUEUES = 2, 30 }; 31 32 struct virtio_gpu_simple_resource { 33 uint32_t resource_id; 34 uint32_t width; 35 uint32_t height; 36 uint32_t format; 37 struct iovec *iov; 38 unsigned int iov_cnt; 39 uint32_t scanout_bitmask; 40 pixman_image_t *image; 41 struct vugbm_buffer buffer; 42 QTAILQ_ENTRY(virtio_gpu_simple_resource) next; 43 }; 44 45 static gboolean opt_print_caps; 46 static int opt_fdnum = -1; 47 static char *opt_socket_path; 48 static char *opt_render_node; 49 static gboolean opt_virgl; 50 51 static void vg_handle_ctrl(VuDev *dev, int qidx); 52 53 static const char * 54 vg_cmd_to_string(int cmd) 55 { 56 #define CMD(cmd) [cmd] = #cmd 57 static const char *vg_cmd_str[] = { 58 CMD(VIRTIO_GPU_UNDEFINED), 59 60 /* 2d commands */ 61 CMD(VIRTIO_GPU_CMD_GET_DISPLAY_INFO), 62 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D), 63 CMD(VIRTIO_GPU_CMD_RESOURCE_UNREF), 64 CMD(VIRTIO_GPU_CMD_SET_SCANOUT), 65 CMD(VIRTIO_GPU_CMD_RESOURCE_FLUSH), 66 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D), 67 CMD(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING), 68 CMD(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING), 69 CMD(VIRTIO_GPU_CMD_GET_CAPSET_INFO), 70 CMD(VIRTIO_GPU_CMD_GET_CAPSET), 71 72 /* 3d commands */ 73 CMD(VIRTIO_GPU_CMD_CTX_CREATE), 74 CMD(VIRTIO_GPU_CMD_CTX_DESTROY), 75 CMD(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE), 76 CMD(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE), 77 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D), 78 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D), 79 CMD(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D), 80 CMD(VIRTIO_GPU_CMD_SUBMIT_3D), 81 82 /* cursor commands */ 83 CMD(VIRTIO_GPU_CMD_UPDATE_CURSOR), 84 CMD(VIRTIO_GPU_CMD_MOVE_CURSOR), 85 }; 86 #undef REQ 87 88 if (cmd >= 0 && cmd < G_N_ELEMENTS(vg_cmd_str)) { 89 return vg_cmd_str[cmd]; 90 } else { 91 return "unknown"; 92 } 93 } 94 95 static int 96 vg_sock_fd_read(int sock, void *buf, ssize_t buflen) 97 { 98 int ret; 99 100 do { 101 ret = read(sock, buf, buflen); 102 } while (ret < 0 && (errno == EINTR || errno == EAGAIN)); 103 104 g_warn_if_fail(ret == buflen); 105 return ret; 106 } 107 108 static void 109 vg_sock_fd_close(VuGpu *g) 110 { 111 if (g->sock_fd >= 0) { 112 close(g->sock_fd); 113 g->sock_fd = -1; 114 } 115 } 116 117 static gboolean 118 source_wait_cb(gint fd, GIOCondition condition, gpointer user_data) 119 { 120 VuGpu *g = user_data; 121 122 if (!vg_recv_msg(g, VHOST_USER_GPU_DMABUF_UPDATE, 0, NULL)) { 123 return G_SOURCE_CONTINUE; 124 } 125 126 /* resume */ 127 g->wait_in = 0; 128 vg_handle_ctrl(&g->dev.parent, 0); 129 130 return G_SOURCE_REMOVE; 131 } 132 133 void 134 vg_wait_ok(VuGpu *g) 135 { 136 assert(g->wait_in == 0); 137 g->wait_in = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP, 138 source_wait_cb, g); 139 } 140 141 static int 142 vg_sock_fd_write(int sock, const void *buf, ssize_t buflen, int fd) 143 { 144 ssize_t ret; 145 struct iovec iov = { 146 .iov_base = (void *)buf, 147 .iov_len = buflen, 148 }; 149 struct msghdr msg = { 150 .msg_iov = &iov, 151 .msg_iovlen = 1, 152 }; 153 union { 154 struct cmsghdr cmsghdr; 155 char control[CMSG_SPACE(sizeof(int))]; 156 } cmsgu; 157 struct cmsghdr *cmsg; 158 159 if (fd != -1) { 160 msg.msg_control = cmsgu.control; 161 msg.msg_controllen = sizeof(cmsgu.control); 162 163 cmsg = CMSG_FIRSTHDR(&msg); 164 cmsg->cmsg_len = CMSG_LEN(sizeof(int)); 165 cmsg->cmsg_level = SOL_SOCKET; 166 cmsg->cmsg_type = SCM_RIGHTS; 167 168 *((int *)CMSG_DATA(cmsg)) = fd; 169 } 170 171 do { 172 ret = sendmsg(sock, &msg, 0); 173 } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); 174 175 g_warn_if_fail(ret == buflen); 176 return ret; 177 } 178 179 void 180 vg_send_msg(VuGpu *vg, const VhostUserGpuMsg *msg, int fd) 181 { 182 if (vg_sock_fd_write(vg->sock_fd, msg, 183 VHOST_USER_GPU_HDR_SIZE + msg->size, fd) < 0) { 184 vg_sock_fd_close(vg); 185 } 186 } 187 188 bool 189 vg_recv_msg(VuGpu *g, uint32_t expect_req, uint32_t expect_size, 190 gpointer payload) 191 { 192 uint32_t req, flags, size; 193 194 if (vg_sock_fd_read(g->sock_fd, &req, sizeof(req)) < 0 || 195 vg_sock_fd_read(g->sock_fd, &flags, sizeof(flags)) < 0 || 196 vg_sock_fd_read(g->sock_fd, &size, sizeof(size)) < 0) { 197 goto err; 198 } 199 200 g_return_val_if_fail(req == expect_req, false); 201 g_return_val_if_fail(flags & VHOST_USER_GPU_MSG_FLAG_REPLY, false); 202 g_return_val_if_fail(size == expect_size, false); 203 204 if (size && vg_sock_fd_read(g->sock_fd, payload, size) != size) { 205 goto err; 206 } 207 208 return true; 209 210 err: 211 vg_sock_fd_close(g); 212 return false; 213 } 214 215 static struct virtio_gpu_simple_resource * 216 virtio_gpu_find_resource(VuGpu *g, uint32_t resource_id) 217 { 218 struct virtio_gpu_simple_resource *res; 219 220 QTAILQ_FOREACH(res, &g->reslist, next) { 221 if (res->resource_id == resource_id) { 222 return res; 223 } 224 } 225 return NULL; 226 } 227 228 void 229 vg_ctrl_response(VuGpu *g, 230 struct virtio_gpu_ctrl_command *cmd, 231 struct virtio_gpu_ctrl_hdr *resp, 232 size_t resp_len) 233 { 234 size_t s; 235 236 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 237 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 238 resp->fence_id = cmd->cmd_hdr.fence_id; 239 resp->ctx_id = cmd->cmd_hdr.ctx_id; 240 } 241 virtio_gpu_ctrl_hdr_bswap(resp); 242 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 243 if (s != resp_len) { 244 g_critical("%s: response size incorrect %zu vs %zu", 245 __func__, s, resp_len); 246 } 247 vu_queue_push(&g->dev.parent, cmd->vq, &cmd->elem, s); 248 vu_queue_notify(&g->dev.parent, cmd->vq); 249 cmd->state = VG_CMD_STATE_FINISHED; 250 } 251 252 void 253 vg_ctrl_response_nodata(VuGpu *g, 254 struct virtio_gpu_ctrl_command *cmd, 255 enum virtio_gpu_ctrl_type type) 256 { 257 struct virtio_gpu_ctrl_hdr resp = { 258 .type = type, 259 }; 260 261 vg_ctrl_response(g, cmd, &resp, sizeof(resp)); 262 } 263 264 265 static gboolean 266 get_display_info_cb(gint fd, GIOCondition condition, gpointer user_data) 267 { 268 struct virtio_gpu_resp_display_info dpy_info = { {} }; 269 VuGpu *vg = user_data; 270 struct virtio_gpu_ctrl_command *cmd = QTAILQ_LAST(&vg->fenceq); 271 272 g_debug("disp info cb"); 273 assert(cmd->cmd_hdr.type == VIRTIO_GPU_CMD_GET_DISPLAY_INFO); 274 if (!vg_recv_msg(vg, VHOST_USER_GPU_GET_DISPLAY_INFO, 275 sizeof(dpy_info), &dpy_info)) { 276 return G_SOURCE_CONTINUE; 277 } 278 279 QTAILQ_REMOVE(&vg->fenceq, cmd, next); 280 vg_ctrl_response(vg, cmd, &dpy_info.hdr, sizeof(dpy_info)); 281 282 vg->wait_in = 0; 283 vg_handle_ctrl(&vg->dev.parent, 0); 284 285 return G_SOURCE_REMOVE; 286 } 287 288 void 289 vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd) 290 { 291 VhostUserGpuMsg msg = { 292 .request = VHOST_USER_GPU_GET_DISPLAY_INFO, 293 .size = 0, 294 }; 295 296 assert(vg->wait_in == 0); 297 298 vg_send_msg(vg, &msg, -1); 299 vg->wait_in = g_unix_fd_add(vg->sock_fd, G_IO_IN | G_IO_HUP, 300 get_display_info_cb, vg); 301 cmd->state = VG_CMD_STATE_PENDING; 302 } 303 304 static void 305 vg_resource_create_2d(VuGpu *g, 306 struct virtio_gpu_ctrl_command *cmd) 307 { 308 pixman_format_code_t pformat; 309 struct virtio_gpu_simple_resource *res; 310 struct virtio_gpu_resource_create_2d c2d; 311 312 VUGPU_FILL_CMD(c2d); 313 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 314 315 if (c2d.resource_id == 0) { 316 g_critical("%s: resource id 0 is not allowed", __func__); 317 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 318 return; 319 } 320 321 res = virtio_gpu_find_resource(g, c2d.resource_id); 322 if (res) { 323 g_critical("%s: resource already exists %d", __func__, c2d.resource_id); 324 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 325 return; 326 } 327 328 res = g_new0(struct virtio_gpu_simple_resource, 1); 329 res->width = c2d.width; 330 res->height = c2d.height; 331 res->format = c2d.format; 332 res->resource_id = c2d.resource_id; 333 334 pformat = virtio_gpu_get_pixman_format(c2d.format); 335 if (!pformat) { 336 g_critical("%s: host couldn't handle guest format %d", 337 __func__, c2d.format); 338 g_free(res); 339 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 340 return; 341 } 342 vugbm_buffer_create(&res->buffer, &g->gdev, c2d.width, c2d.height); 343 res->image = pixman_image_create_bits(pformat, 344 c2d.width, 345 c2d.height, 346 (uint32_t *)res->buffer.mmap, 347 res->buffer.stride); 348 if (!res->image) { 349 g_critical("%s: resource creation failed %d %d %d", 350 __func__, c2d.resource_id, c2d.width, c2d.height); 351 g_free(res); 352 vugbm_buffer_destroy(&res->buffer); 353 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 354 return; 355 } 356 357 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 358 } 359 360 static void 361 vg_disable_scanout(VuGpu *g, int scanout_id) 362 { 363 struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id]; 364 struct virtio_gpu_simple_resource *res; 365 366 if (scanout->resource_id == 0) { 367 return; 368 } 369 370 res = virtio_gpu_find_resource(g, scanout->resource_id); 371 if (res) { 372 res->scanout_bitmask &= ~(1 << scanout_id); 373 } 374 375 scanout->width = 0; 376 scanout->height = 0; 377 378 if (g->sock_fd >= 0) { 379 VhostUserGpuMsg msg = { 380 .request = VHOST_USER_GPU_SCANOUT, 381 .size = sizeof(VhostUserGpuScanout), 382 .payload.scanout.scanout_id = scanout_id, 383 }; 384 vg_send_msg(g, &msg, -1); 385 } 386 } 387 388 static void 389 vg_resource_destroy(VuGpu *g, 390 struct virtio_gpu_simple_resource *res) 391 { 392 int i; 393 394 if (res->scanout_bitmask) { 395 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) { 396 if (res->scanout_bitmask & (1 << i)) { 397 vg_disable_scanout(g, i); 398 } 399 } 400 } 401 402 vugbm_buffer_destroy(&res->buffer); 403 g_free(res->iov); 404 pixman_image_unref(res->image); 405 QTAILQ_REMOVE(&g->reslist, res, next); 406 g_free(res); 407 } 408 409 static void 410 vg_resource_unref(VuGpu *g, 411 struct virtio_gpu_ctrl_command *cmd) 412 { 413 struct virtio_gpu_simple_resource *res; 414 struct virtio_gpu_resource_unref unref; 415 416 VUGPU_FILL_CMD(unref); 417 virtio_gpu_bswap_32(&unref, sizeof(unref)); 418 419 res = virtio_gpu_find_resource(g, unref.resource_id); 420 if (!res) { 421 g_critical("%s: illegal resource specified %d", 422 __func__, unref.resource_id); 423 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 424 return; 425 } 426 vg_resource_destroy(g, res); 427 } 428 429 int 430 vg_create_mapping_iov(VuGpu *g, 431 struct virtio_gpu_resource_attach_backing *ab, 432 struct virtio_gpu_ctrl_command *cmd, 433 struct iovec **iov) 434 { 435 struct virtio_gpu_mem_entry *ents; 436 size_t esize, s; 437 int i; 438 439 if (ab->nr_entries > 16384) { 440 g_critical("%s: nr_entries is too big (%d > 16384)", 441 __func__, ab->nr_entries); 442 return -1; 443 } 444 445 esize = sizeof(*ents) * ab->nr_entries; 446 ents = g_malloc(esize); 447 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 448 sizeof(*ab), ents, esize); 449 if (s != esize) { 450 g_critical("%s: command data size incorrect %zu vs %zu", 451 __func__, s, esize); 452 g_free(ents); 453 return -1; 454 } 455 456 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 457 for (i = 0; i < ab->nr_entries; i++) { 458 uint64_t len = ents[i].length; 459 (*iov)[i].iov_len = ents[i].length; 460 (*iov)[i].iov_base = vu_gpa_to_va(&g->dev.parent, &len, ents[i].addr); 461 if (!(*iov)[i].iov_base || len != ents[i].length) { 462 g_critical("%s: resource %d element %d", 463 __func__, ab->resource_id, i); 464 g_free(*iov); 465 g_free(ents); 466 *iov = NULL; 467 return -1; 468 } 469 } 470 g_free(ents); 471 return 0; 472 } 473 474 static void 475 vg_resource_attach_backing(VuGpu *g, 476 struct virtio_gpu_ctrl_command *cmd) 477 { 478 struct virtio_gpu_simple_resource *res; 479 struct virtio_gpu_resource_attach_backing ab; 480 int ret; 481 482 VUGPU_FILL_CMD(ab); 483 virtio_gpu_bswap_32(&ab, sizeof(ab)); 484 485 res = virtio_gpu_find_resource(g, ab.resource_id); 486 if (!res) { 487 g_critical("%s: illegal resource specified %d", 488 __func__, ab.resource_id); 489 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 490 return; 491 } 492 493 if (res->iov) { 494 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 495 return; 496 } 497 498 ret = vg_create_mapping_iov(g, &ab, cmd, &res->iov); 499 if (ret != 0) { 500 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 501 return; 502 } 503 504 res->iov_cnt = ab.nr_entries; 505 } 506 507 static void 508 vg_resource_detach_backing(VuGpu *g, 509 struct virtio_gpu_ctrl_command *cmd) 510 { 511 struct virtio_gpu_simple_resource *res; 512 struct virtio_gpu_resource_detach_backing detach; 513 514 VUGPU_FILL_CMD(detach); 515 virtio_gpu_bswap_32(&detach, sizeof(detach)); 516 517 res = virtio_gpu_find_resource(g, detach.resource_id); 518 if (!res || !res->iov) { 519 g_critical("%s: illegal resource specified %d", 520 __func__, detach.resource_id); 521 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 522 return; 523 } 524 525 g_free(res->iov); 526 res->iov = NULL; 527 res->iov_cnt = 0; 528 } 529 530 static void 531 vg_transfer_to_host_2d(VuGpu *g, 532 struct virtio_gpu_ctrl_command *cmd) 533 { 534 struct virtio_gpu_simple_resource *res; 535 int h; 536 uint32_t src_offset, dst_offset, stride; 537 int bpp; 538 pixman_format_code_t format; 539 struct virtio_gpu_transfer_to_host_2d t2d; 540 541 VUGPU_FILL_CMD(t2d); 542 virtio_gpu_t2d_bswap(&t2d); 543 544 res = virtio_gpu_find_resource(g, t2d.resource_id); 545 if (!res || !res->iov) { 546 g_critical("%s: illegal resource specified %d", 547 __func__, t2d.resource_id); 548 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 549 return; 550 } 551 552 if (t2d.r.x > res->width || 553 t2d.r.y > res->height || 554 t2d.r.width > res->width || 555 t2d.r.height > res->height || 556 t2d.r.x + t2d.r.width > res->width || 557 t2d.r.y + t2d.r.height > res->height) { 558 g_critical("%s: transfer bounds outside resource" 559 " bounds for resource %d: %d %d %d %d vs %d %d", 560 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 561 t2d.r.width, t2d.r.height, res->width, res->height); 562 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 563 return; 564 } 565 566 format = pixman_image_get_format(res->image); 567 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 568 stride = pixman_image_get_stride(res->image); 569 570 if (t2d.offset || t2d.r.x || t2d.r.y || 571 t2d.r.width != pixman_image_get_width(res->image)) { 572 void *img_data = pixman_image_get_data(res->image); 573 for (h = 0; h < t2d.r.height; h++) { 574 src_offset = t2d.offset + stride * h; 575 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 576 577 iov_to_buf(res->iov, res->iov_cnt, src_offset, 578 img_data 579 + dst_offset, t2d.r.width * bpp); 580 } 581 } else { 582 iov_to_buf(res->iov, res->iov_cnt, 0, 583 pixman_image_get_data(res->image), 584 pixman_image_get_stride(res->image) 585 * pixman_image_get_height(res->image)); 586 } 587 } 588 589 static void 590 vg_set_scanout(VuGpu *g, 591 struct virtio_gpu_ctrl_command *cmd) 592 { 593 struct virtio_gpu_simple_resource *res, *ores; 594 struct virtio_gpu_scanout *scanout; 595 struct virtio_gpu_set_scanout ss; 596 int fd; 597 598 VUGPU_FILL_CMD(ss); 599 virtio_gpu_bswap_32(&ss, sizeof(ss)); 600 601 if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) { 602 g_critical("%s: illegal scanout id specified %d", 603 __func__, ss.scanout_id); 604 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 605 return; 606 } 607 608 if (ss.resource_id == 0) { 609 vg_disable_scanout(g, ss.scanout_id); 610 return; 611 } 612 613 /* create a surface for this scanout */ 614 res = virtio_gpu_find_resource(g, ss.resource_id); 615 if (!res) { 616 g_critical("%s: illegal resource specified %d", 617 __func__, ss.resource_id); 618 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 619 return; 620 } 621 622 if (ss.r.x > res->width || 623 ss.r.y > res->height || 624 ss.r.width > res->width || 625 ss.r.height > res->height || 626 ss.r.x + ss.r.width > res->width || 627 ss.r.y + ss.r.height > res->height) { 628 g_critical("%s: illegal scanout %d bounds for" 629 " resource %d, (%d,%d)+%d,%d vs %d %d", 630 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 631 ss.r.width, ss.r.height, res->width, res->height); 632 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 633 return; 634 } 635 636 scanout = &g->scanout[ss.scanout_id]; 637 638 ores = virtio_gpu_find_resource(g, scanout->resource_id); 639 if (ores) { 640 ores->scanout_bitmask &= ~(1 << ss.scanout_id); 641 } 642 643 res->scanout_bitmask |= (1 << ss.scanout_id); 644 scanout->resource_id = ss.resource_id; 645 scanout->x = ss.r.x; 646 scanout->y = ss.r.y; 647 scanout->width = ss.r.width; 648 scanout->height = ss.r.height; 649 650 struct vugbm_buffer *buffer = &res->buffer; 651 652 if (vugbm_buffer_can_get_dmabuf_fd(buffer)) { 653 VhostUserGpuMsg msg = { 654 .request = VHOST_USER_GPU_DMABUF_SCANOUT, 655 .size = sizeof(VhostUserGpuDMABUFScanout), 656 .payload.dmabuf_scanout = (VhostUserGpuDMABUFScanout) { 657 .scanout_id = ss.scanout_id, 658 .x = ss.r.x, 659 .y = ss.r.y, 660 .width = ss.r.width, 661 .height = ss.r.height, 662 .fd_width = buffer->width, 663 .fd_height = buffer->height, 664 .fd_stride = buffer->stride, 665 .fd_drm_fourcc = buffer->format 666 } 667 }; 668 669 if (vugbm_buffer_get_dmabuf_fd(buffer, &fd)) { 670 vg_send_msg(g, &msg, fd); 671 close(fd); 672 } 673 } else { 674 VhostUserGpuMsg msg = { 675 .request = VHOST_USER_GPU_SCANOUT, 676 .size = sizeof(VhostUserGpuScanout), 677 .payload.scanout = (VhostUserGpuScanout) { 678 .scanout_id = ss.scanout_id, 679 .width = scanout->width, 680 .height = scanout->height 681 } 682 }; 683 vg_send_msg(g, &msg, -1); 684 } 685 } 686 687 static void 688 vg_resource_flush(VuGpu *g, 689 struct virtio_gpu_ctrl_command *cmd) 690 { 691 struct virtio_gpu_simple_resource *res; 692 struct virtio_gpu_resource_flush rf; 693 pixman_region16_t flush_region; 694 int i; 695 696 VUGPU_FILL_CMD(rf); 697 virtio_gpu_bswap_32(&rf, sizeof(rf)); 698 699 res = virtio_gpu_find_resource(g, rf.resource_id); 700 if (!res) { 701 g_critical("%s: illegal resource specified %d\n", 702 __func__, rf.resource_id); 703 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 704 return; 705 } 706 707 if (rf.r.x > res->width || 708 rf.r.y > res->height || 709 rf.r.width > res->width || 710 rf.r.height > res->height || 711 rf.r.x + rf.r.width > res->width || 712 rf.r.y + rf.r.height > res->height) { 713 g_critical("%s: flush bounds outside resource" 714 " bounds for resource %d: %d %d %d %d vs %d %d\n", 715 __func__, rf.resource_id, rf.r.x, rf.r.y, 716 rf.r.width, rf.r.height, res->width, res->height); 717 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 718 return; 719 } 720 721 pixman_region_init_rect(&flush_region, 722 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 723 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) { 724 struct virtio_gpu_scanout *scanout; 725 pixman_region16_t region, finalregion; 726 pixman_box16_t *extents; 727 728 if (!(res->scanout_bitmask & (1 << i))) { 729 continue; 730 } 731 scanout = &g->scanout[i]; 732 733 pixman_region_init(&finalregion); 734 pixman_region_init_rect(®ion, scanout->x, scanout->y, 735 scanout->width, scanout->height); 736 737 pixman_region_intersect(&finalregion, &flush_region, ®ion); 738 739 extents = pixman_region_extents(&finalregion); 740 size_t width = extents->x2 - extents->x1; 741 size_t height = extents->y2 - extents->y1; 742 743 if (vugbm_buffer_can_get_dmabuf_fd(&res->buffer)) { 744 VhostUserGpuMsg vmsg = { 745 .request = VHOST_USER_GPU_DMABUF_UPDATE, 746 .size = sizeof(VhostUserGpuUpdate), 747 .payload.update = (VhostUserGpuUpdate) { 748 .scanout_id = i, 749 .x = extents->x1, 750 .y = extents->y1, 751 .width = width, 752 .height = height, 753 } 754 }; 755 vg_send_msg(g, &vmsg, -1); 756 vg_wait_ok(g); 757 } else { 758 size_t bpp = 759 PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) / 8; 760 size_t size = width * height * bpp; 761 762 void *p = g_malloc(VHOST_USER_GPU_HDR_SIZE + 763 sizeof(VhostUserGpuUpdate) + size); 764 VhostUserGpuMsg *msg = p; 765 msg->request = VHOST_USER_GPU_UPDATE; 766 msg->size = sizeof(VhostUserGpuUpdate) + size; 767 msg->payload.update = (VhostUserGpuUpdate) { 768 .scanout_id = i, 769 .x = extents->x1, 770 .y = extents->y1, 771 .width = width, 772 .height = height, 773 }; 774 pixman_image_t *i = 775 pixman_image_create_bits(pixman_image_get_format(res->image), 776 msg->payload.update.width, 777 msg->payload.update.height, 778 p + offsetof(VhostUserGpuMsg, 779 payload.update.data), 780 width * bpp); 781 pixman_image_composite(PIXMAN_OP_SRC, 782 res->image, NULL, i, 783 extents->x1, extents->y1, 784 0, 0, 0, 0, 785 width, height); 786 pixman_image_unref(i); 787 vg_send_msg(g, msg, -1); 788 g_free(msg); 789 } 790 pixman_region_fini(®ion); 791 pixman_region_fini(&finalregion); 792 } 793 pixman_region_fini(&flush_region); 794 } 795 796 static void 797 vg_process_cmd(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd) 798 { 799 switch (cmd->cmd_hdr.type) { 800 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 801 vg_get_display_info(vg, cmd); 802 break; 803 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 804 vg_resource_create_2d(vg, cmd); 805 break; 806 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 807 vg_resource_unref(vg, cmd); 808 break; 809 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 810 vg_resource_flush(vg, cmd); 811 break; 812 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 813 vg_transfer_to_host_2d(vg, cmd); 814 break; 815 case VIRTIO_GPU_CMD_SET_SCANOUT: 816 vg_set_scanout(vg, cmd); 817 break; 818 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 819 vg_resource_attach_backing(vg, cmd); 820 break; 821 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 822 vg_resource_detach_backing(vg, cmd); 823 break; 824 /* case VIRTIO_GPU_CMD_GET_EDID: */ 825 /* break */ 826 default: 827 g_warning("TODO handle ctrl %x\n", cmd->cmd_hdr.type); 828 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 829 break; 830 } 831 if (cmd->state == VG_CMD_STATE_NEW) { 832 vg_ctrl_response_nodata(vg, cmd, cmd->error ? cmd->error : 833 VIRTIO_GPU_RESP_OK_NODATA); 834 } 835 } 836 837 static void 838 vg_handle_ctrl(VuDev *dev, int qidx) 839 { 840 VuGpu *vg = container_of(dev, VuGpu, dev.parent); 841 VuVirtq *vq = vu_get_queue(dev, qidx); 842 struct virtio_gpu_ctrl_command *cmd = NULL; 843 size_t len; 844 845 for (;;) { 846 if (vg->wait_in != 0) { 847 return; 848 } 849 850 cmd = vu_queue_pop(dev, vq, sizeof(struct virtio_gpu_ctrl_command)); 851 if (!cmd) { 852 break; 853 } 854 cmd->vq = vq; 855 cmd->error = 0; 856 cmd->state = VG_CMD_STATE_NEW; 857 858 len = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 859 0, &cmd->cmd_hdr, sizeof(cmd->cmd_hdr)); 860 if (len != sizeof(cmd->cmd_hdr)) { 861 g_warning("%s: command size incorrect %zu vs %zu\n", 862 __func__, len, sizeof(cmd->cmd_hdr)); 863 } 864 865 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 866 g_debug("%d %s\n", cmd->cmd_hdr.type, 867 vg_cmd_to_string(cmd->cmd_hdr.type)); 868 869 if (vg->virgl) { 870 vg_virgl_process_cmd(vg, cmd); 871 } else { 872 vg_process_cmd(vg, cmd); 873 } 874 875 if (cmd->state != VG_CMD_STATE_FINISHED) { 876 QTAILQ_INSERT_TAIL(&vg->fenceq, cmd, next); 877 vg->inflight++; 878 } else { 879 free(cmd); 880 } 881 } 882 } 883 884 static void 885 update_cursor_data_simple(VuGpu *g, uint32_t resource_id, gpointer data) 886 { 887 struct virtio_gpu_simple_resource *res; 888 889 res = virtio_gpu_find_resource(g, resource_id); 890 g_return_if_fail(res != NULL); 891 g_return_if_fail(pixman_image_get_width(res->image) == 64); 892 g_return_if_fail(pixman_image_get_height(res->image) == 64); 893 g_return_if_fail( 894 PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) == 32); 895 896 memcpy(data, pixman_image_get_data(res->image), 64 * 64 * sizeof(uint32_t)); 897 } 898 899 static void 900 vg_process_cursor_cmd(VuGpu *g, struct virtio_gpu_update_cursor *cursor) 901 { 902 switch (cursor->hdr.type) { 903 case VIRTIO_GPU_CMD_MOVE_CURSOR: { 904 VhostUserGpuMsg msg = { 905 .request = cursor->resource_id ? 906 VHOST_USER_GPU_CURSOR_POS : VHOST_USER_GPU_CURSOR_POS_HIDE, 907 .size = sizeof(VhostUserGpuCursorPos), 908 .payload.cursor_pos = { 909 .scanout_id = cursor->pos.scanout_id, 910 .x = cursor->pos.x, 911 .y = cursor->pos.y, 912 } 913 }; 914 g_debug("%s: move", G_STRFUNC); 915 vg_send_msg(g, &msg, -1); 916 break; 917 } 918 case VIRTIO_GPU_CMD_UPDATE_CURSOR: { 919 VhostUserGpuMsg msg = { 920 .request = VHOST_USER_GPU_CURSOR_UPDATE, 921 .size = sizeof(VhostUserGpuCursorUpdate), 922 .payload.cursor_update = { 923 .pos = { 924 .scanout_id = cursor->pos.scanout_id, 925 .x = cursor->pos.x, 926 .y = cursor->pos.y, 927 }, 928 .hot_x = cursor->hot_x, 929 .hot_y = cursor->hot_y, 930 } 931 }; 932 g_debug("%s: update", G_STRFUNC); 933 if (g->virgl) { 934 vg_virgl_update_cursor_data(g, cursor->resource_id, 935 msg.payload.cursor_update.data); 936 } else { 937 update_cursor_data_simple(g, cursor->resource_id, 938 msg.payload.cursor_update.data); 939 } 940 vg_send_msg(g, &msg, -1); 941 break; 942 } 943 default: 944 g_debug("%s: unknown cmd %d", G_STRFUNC, cursor->hdr.type); 945 break; 946 } 947 } 948 949 static void 950 vg_handle_cursor(VuDev *dev, int qidx) 951 { 952 VuGpu *g = container_of(dev, VuGpu, dev.parent); 953 VuVirtq *vq = vu_get_queue(dev, qidx); 954 VuVirtqElement *elem; 955 size_t len; 956 struct virtio_gpu_update_cursor cursor; 957 958 for (;;) { 959 elem = vu_queue_pop(dev, vq, sizeof(VuVirtqElement)); 960 if (!elem) { 961 break; 962 } 963 g_debug("cursor out:%d in:%d\n", elem->out_num, elem->in_num); 964 965 len = iov_to_buf(elem->out_sg, elem->out_num, 966 0, &cursor, sizeof(cursor)); 967 if (len != sizeof(cursor)) { 968 g_warning("%s: cursor size incorrect %zu vs %zu\n", 969 __func__, len, sizeof(cursor)); 970 } else { 971 virtio_gpu_bswap_32(&cursor, sizeof(cursor)); 972 vg_process_cursor_cmd(g, &cursor); 973 } 974 vu_queue_push(dev, vq, elem, 0); 975 vu_queue_notify(dev, vq); 976 free(elem); 977 } 978 } 979 980 static void 981 vg_panic(VuDev *dev, const char *msg) 982 { 983 g_critical("%s\n", msg); 984 exit(1); 985 } 986 987 static void 988 vg_queue_set_started(VuDev *dev, int qidx, bool started) 989 { 990 VuVirtq *vq = vu_get_queue(dev, qidx); 991 992 g_debug("queue started %d:%d\n", qidx, started); 993 994 switch (qidx) { 995 case 0: 996 vu_set_queue_handler(dev, vq, started ? vg_handle_ctrl : NULL); 997 break; 998 case 1: 999 vu_set_queue_handler(dev, vq, started ? vg_handle_cursor : NULL); 1000 break; 1001 default: 1002 break; 1003 } 1004 } 1005 1006 static gboolean 1007 protocol_features_cb(gint fd, GIOCondition condition, gpointer user_data) 1008 { 1009 VuGpu *g = user_data; 1010 uint64_t u64; 1011 VhostUserGpuMsg msg = { 1012 .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES 1013 }; 1014 1015 if (!vg_recv_msg(g, msg.request, sizeof(u64), &u64)) { 1016 return G_SOURCE_CONTINUE; 1017 } 1018 1019 msg = (VhostUserGpuMsg) { 1020 .request = VHOST_USER_GPU_SET_PROTOCOL_FEATURES, 1021 .size = sizeof(uint64_t), 1022 .payload.u64 = 0 1023 }; 1024 vg_send_msg(g, &msg, -1); 1025 1026 g->wait_in = 0; 1027 vg_handle_ctrl(&g->dev.parent, 0); 1028 1029 return G_SOURCE_REMOVE; 1030 } 1031 1032 static void 1033 set_gpu_protocol_features(VuGpu *g) 1034 { 1035 VhostUserGpuMsg msg = { 1036 .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES 1037 }; 1038 1039 vg_send_msg(g, &msg, -1); 1040 assert(g->wait_in == 0); 1041 g->wait_in = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP, 1042 protocol_features_cb, g); 1043 } 1044 1045 static int 1046 vg_process_msg(VuDev *dev, VhostUserMsg *msg, int *do_reply) 1047 { 1048 VuGpu *g = container_of(dev, VuGpu, dev.parent); 1049 1050 switch (msg->request) { 1051 case VHOST_USER_GPU_SET_SOCKET: { 1052 g_return_val_if_fail(msg->fd_num == 1, 1); 1053 g_return_val_if_fail(g->sock_fd == -1, 1); 1054 g->sock_fd = msg->fds[0]; 1055 set_gpu_protocol_features(g); 1056 return 1; 1057 } 1058 default: 1059 return 0; 1060 } 1061 1062 return 0; 1063 } 1064 1065 static uint64_t 1066 vg_get_features(VuDev *dev) 1067 { 1068 uint64_t features = 0; 1069 1070 if (opt_virgl) { 1071 features |= 1 << VIRTIO_GPU_F_VIRGL; 1072 } 1073 1074 return features; 1075 } 1076 1077 static void 1078 vg_set_features(VuDev *dev, uint64_t features) 1079 { 1080 VuGpu *g = container_of(dev, VuGpu, dev.parent); 1081 bool virgl = features & (1 << VIRTIO_GPU_F_VIRGL); 1082 1083 if (virgl && !g->virgl_inited) { 1084 if (!vg_virgl_init(g)) { 1085 vg_panic(dev, "Failed to initialize virgl"); 1086 } 1087 g->virgl_inited = true; 1088 } 1089 1090 g->virgl = virgl; 1091 } 1092 1093 static int 1094 vg_get_config(VuDev *dev, uint8_t *config, uint32_t len) 1095 { 1096 VuGpu *g = container_of(dev, VuGpu, dev.parent); 1097 1098 if (len > sizeof(struct virtio_gpu_config)) { 1099 return -1; 1100 } 1101 1102 if (opt_virgl) { 1103 g->virtio_config.num_capsets = vg_virgl_get_num_capsets(); 1104 } 1105 1106 memcpy(config, &g->virtio_config, len); 1107 1108 return 0; 1109 } 1110 1111 static int 1112 vg_set_config(VuDev *dev, const uint8_t *data, 1113 uint32_t offset, uint32_t size, 1114 uint32_t flags) 1115 { 1116 VuGpu *g = container_of(dev, VuGpu, dev.parent); 1117 struct virtio_gpu_config *config = (struct virtio_gpu_config *)data; 1118 1119 if (config->events_clear) { 1120 g->virtio_config.events_read &= ~config->events_clear; 1121 } 1122 1123 return 0; 1124 } 1125 1126 static const VuDevIface vuiface = { 1127 .set_features = vg_set_features, 1128 .get_features = vg_get_features, 1129 .queue_set_started = vg_queue_set_started, 1130 .process_msg = vg_process_msg, 1131 .get_config = vg_get_config, 1132 .set_config = vg_set_config, 1133 }; 1134 1135 static void 1136 vg_destroy(VuGpu *g) 1137 { 1138 struct virtio_gpu_simple_resource *res, *tmp; 1139 1140 vug_deinit(&g->dev); 1141 1142 vg_sock_fd_close(g); 1143 1144 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1145 vg_resource_destroy(g, res); 1146 } 1147 1148 vugbm_device_destroy(&g->gdev); 1149 } 1150 1151 static GOptionEntry entries[] = { 1152 { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE, &opt_print_caps, 1153 "Print capabilities", NULL }, 1154 { "fd", 'f', 0, G_OPTION_ARG_INT, &opt_fdnum, 1155 "Use inherited fd socket", "FDNUM" }, 1156 { "socket-path", 's', 0, G_OPTION_ARG_FILENAME, &opt_socket_path, 1157 "Use UNIX socket path", "PATH" }, 1158 { "render-node", 'r', 0, G_OPTION_ARG_FILENAME, &opt_render_node, 1159 "Specify DRM render node", "PATH" }, 1160 { "virgl", 'v', 0, G_OPTION_ARG_NONE, &opt_virgl, 1161 "Turn virgl rendering on", NULL }, 1162 { NULL, } 1163 }; 1164 1165 int 1166 main(int argc, char *argv[]) 1167 { 1168 GOptionContext *context; 1169 GError *error = NULL; 1170 GMainLoop *loop = NULL; 1171 int fd; 1172 VuGpu g = { .sock_fd = -1, .drm_rnode_fd = -1 }; 1173 1174 QTAILQ_INIT(&g.reslist); 1175 QTAILQ_INIT(&g.fenceq); 1176 1177 context = g_option_context_new("QEMU vhost-user-gpu"); 1178 g_option_context_add_main_entries(context, entries, NULL); 1179 if (!g_option_context_parse(context, &argc, &argv, &error)) { 1180 g_printerr("Option parsing failed: %s\n", error->message); 1181 exit(EXIT_FAILURE); 1182 } 1183 g_option_context_free(context); 1184 1185 if (opt_print_caps) { 1186 g_print("{\n"); 1187 g_print(" \"type\": \"gpu\",\n"); 1188 g_print(" \"features\": [\n"); 1189 g_print(" \"render-node\",\n"); 1190 g_print(" \"virgl\"\n"); 1191 g_print(" ]\n"); 1192 g_print("}\n"); 1193 exit(EXIT_SUCCESS); 1194 } 1195 1196 g.drm_rnode_fd = qemu_drm_rendernode_open(opt_render_node); 1197 if (opt_render_node && g.drm_rnode_fd == -1) { 1198 g_printerr("Failed to open DRM rendernode.\n"); 1199 exit(EXIT_FAILURE); 1200 } 1201 1202 vugbm_device_init(&g.gdev, g.drm_rnode_fd); 1203 1204 if ((!!opt_socket_path + (opt_fdnum != -1)) != 1) { 1205 g_printerr("Please specify either --fd or --socket-path\n"); 1206 exit(EXIT_FAILURE); 1207 } 1208 1209 if (opt_socket_path) { 1210 int lsock = unix_listen(opt_socket_path, &error_fatal); 1211 if (lsock < 0) { 1212 g_printerr("Failed to listen on %s.\n", opt_socket_path); 1213 exit(EXIT_FAILURE); 1214 } 1215 fd = accept(lsock, NULL, NULL); 1216 close(lsock); 1217 } else { 1218 fd = opt_fdnum; 1219 } 1220 if (fd == -1) { 1221 g_printerr("Invalid vhost-user socket.\n"); 1222 exit(EXIT_FAILURE); 1223 } 1224 1225 if (!vug_init(&g.dev, VHOST_USER_GPU_MAX_QUEUES, fd, vg_panic, &vuiface)) { 1226 g_printerr("Failed to initialize libvhost-user-glib.\n"); 1227 exit(EXIT_FAILURE); 1228 } 1229 1230 loop = g_main_loop_new(NULL, FALSE); 1231 g_main_loop_run(loop); 1232 g_main_loop_unref(loop); 1233 1234 vg_destroy(&g); 1235 if (g.drm_rnode_fd >= 0) { 1236 close(g.drm_rnode_fd); 1237 } 1238 1239 return 0; 1240 } 1241