1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu-common.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "hw/virtio/virtio.h" 20 #include "hw/virtio/virtio-gpu.h" 21 #include "hw/virtio/virtio-bus.h" 22 #include "qemu/log.h" 23 #include "qapi/error.h" 24 25 static struct virtio_gpu_simple_resource* 26 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 27 28 #ifdef CONFIG_VIRGL 29 #include "virglrenderer.h" 30 #define VIRGL(_g, _virgl, _simple, ...) \ 31 do { \ 32 if (_g->use_virgl_renderer) { \ 33 _virgl(__VA_ARGS__); \ 34 } else { \ 35 _simple(__VA_ARGS__); \ 36 } \ 37 } while (0) 38 #else 39 #define VIRGL(_g, _virgl, _simple, ...) \ 40 do { \ 41 _simple(__VA_ARGS__); \ 42 } while (0) 43 #endif 44 45 static void update_cursor_data_simple(VirtIOGPU *g, 46 struct virtio_gpu_scanout *s, 47 uint32_t resource_id) 48 { 49 struct virtio_gpu_simple_resource *res; 50 uint32_t pixels; 51 52 res = virtio_gpu_find_resource(g, resource_id); 53 if (!res) { 54 return; 55 } 56 57 if (pixman_image_get_width(res->image) != s->current_cursor->width || 58 pixman_image_get_height(res->image) != s->current_cursor->height) { 59 return; 60 } 61 62 pixels = s->current_cursor->width * s->current_cursor->height; 63 memcpy(s->current_cursor->data, 64 pixman_image_get_data(res->image), 65 pixels * sizeof(uint32_t)); 66 } 67 68 #ifdef CONFIG_VIRGL 69 70 static void update_cursor_data_virgl(VirtIOGPU *g, 71 struct virtio_gpu_scanout *s, 72 uint32_t resource_id) 73 { 74 uint32_t width, height; 75 uint32_t pixels, *data; 76 77 data = virgl_renderer_get_cursor_data(resource_id, &width, &height); 78 if (!data) { 79 return; 80 } 81 82 if (width != s->current_cursor->width || 83 height != s->current_cursor->height) { 84 return; 85 } 86 87 pixels = s->current_cursor->width * s->current_cursor->height; 88 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); 89 free(data); 90 } 91 92 #endif 93 94 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 95 { 96 struct virtio_gpu_scanout *s; 97 bool move = cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR; 98 99 if (cursor->pos.scanout_id >= g->conf.max_outputs) { 100 return; 101 } 102 s = &g->scanout[cursor->pos.scanout_id]; 103 104 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 105 cursor->pos.x, 106 cursor->pos.y, 107 move ? "move" : "update", 108 cursor->resource_id); 109 110 if (move) { 111 if (!s->current_cursor) { 112 s->current_cursor = cursor_alloc(64, 64); 113 } 114 115 s->current_cursor->hot_x = cursor->hot_x; 116 s->current_cursor->hot_y = cursor->hot_y; 117 118 if (cursor->resource_id > 0) { 119 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple, 120 g, s, cursor->resource_id); 121 } 122 dpy_cursor_define(s->con, s->current_cursor); 123 } 124 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 125 cursor->resource_id ? 1 : 0); 126 } 127 128 static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 129 { 130 VirtIOGPU *g = VIRTIO_GPU(vdev); 131 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 132 } 133 134 static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 135 { 136 VirtIOGPU *g = VIRTIO_GPU(vdev); 137 struct virtio_gpu_config vgconfig; 138 139 memcpy(&vgconfig, config, sizeof(g->virtio_config)); 140 141 if (vgconfig.events_clear) { 142 g->virtio_config.events_read &= ~vgconfig.events_clear; 143 } 144 } 145 146 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, 147 Error **errp) 148 { 149 VirtIOGPU *g = VIRTIO_GPU(vdev); 150 151 if (virtio_gpu_virgl_enabled(g->conf)) { 152 features |= (1 << VIRTIO_GPU_F_VIRGL); 153 } 154 return features; 155 } 156 157 static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features) 158 { 159 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); 160 VirtIOGPU *g = VIRTIO_GPU(vdev); 161 162 g->use_virgl_renderer = ((features & virgl) == virgl); 163 trace_virtio_gpu_features(g->use_virgl_renderer); 164 } 165 166 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) 167 { 168 g->virtio_config.events_read |= event_type; 169 virtio_notify_config(&g->parent_obj); 170 } 171 172 static struct virtio_gpu_simple_resource * 173 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 174 { 175 struct virtio_gpu_simple_resource *res; 176 177 QTAILQ_FOREACH(res, &g->reslist, next) { 178 if (res->resource_id == resource_id) { 179 return res; 180 } 181 } 182 return NULL; 183 } 184 185 void virtio_gpu_ctrl_response(VirtIOGPU *g, 186 struct virtio_gpu_ctrl_command *cmd, 187 struct virtio_gpu_ctrl_hdr *resp, 188 size_t resp_len) 189 { 190 size_t s; 191 192 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 193 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 194 resp->fence_id = cmd->cmd_hdr.fence_id; 195 resp->ctx_id = cmd->cmd_hdr.ctx_id; 196 } 197 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 198 if (s != resp_len) { 199 qemu_log_mask(LOG_GUEST_ERROR, 200 "%s: response size incorrect %zu vs %zu\n", 201 __func__, s, resp_len); 202 } 203 virtqueue_push(cmd->vq, &cmd->elem, s); 204 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 205 cmd->finished = true; 206 } 207 208 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 209 struct virtio_gpu_ctrl_command *cmd, 210 enum virtio_gpu_ctrl_type type) 211 { 212 struct virtio_gpu_ctrl_hdr resp; 213 214 memset(&resp, 0, sizeof(resp)); 215 resp.type = type; 216 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 217 } 218 219 static void 220 virtio_gpu_fill_display_info(VirtIOGPU *g, 221 struct virtio_gpu_resp_display_info *dpy_info) 222 { 223 int i; 224 225 for (i = 0; i < g->conf.max_outputs; i++) { 226 if (g->enabled_output_bitmask & (1 << i)) { 227 dpy_info->pmodes[i].enabled = 1; 228 dpy_info->pmodes[i].r.width = g->req_state[i].width; 229 dpy_info->pmodes[i].r.height = g->req_state[i].height; 230 } 231 } 232 } 233 234 void virtio_gpu_get_display_info(VirtIOGPU *g, 235 struct virtio_gpu_ctrl_command *cmd) 236 { 237 struct virtio_gpu_resp_display_info display_info; 238 239 trace_virtio_gpu_cmd_get_display_info(); 240 memset(&display_info, 0, sizeof(display_info)); 241 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 242 virtio_gpu_fill_display_info(g, &display_info); 243 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 244 sizeof(display_info)); 245 } 246 247 static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) 248 { 249 switch (virtio_gpu_format) { 250 #ifdef HOST_WORDS_BIGENDIAN 251 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 252 return PIXMAN_b8g8r8x8; 253 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 254 return PIXMAN_b8g8r8a8; 255 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 256 return PIXMAN_x8r8g8b8; 257 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 258 return PIXMAN_a8r8g8b8; 259 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 260 return PIXMAN_r8g8b8x8; 261 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 262 return PIXMAN_r8g8b8a8; 263 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 264 return PIXMAN_x8b8g8r8; 265 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 266 return PIXMAN_a8b8g8r8; 267 #else 268 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 269 return PIXMAN_x8r8g8b8; 270 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 271 return PIXMAN_a8r8g8b8; 272 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 273 return PIXMAN_b8g8r8x8; 274 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 275 return PIXMAN_b8g8r8a8; 276 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 277 return PIXMAN_x8b8g8r8; 278 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 279 return PIXMAN_a8b8g8r8; 280 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 281 return PIXMAN_r8g8b8x8; 282 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 283 return PIXMAN_r8g8b8a8; 284 #endif 285 default: 286 return 0; 287 } 288 } 289 290 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 291 struct virtio_gpu_ctrl_command *cmd) 292 { 293 pixman_format_code_t pformat; 294 struct virtio_gpu_simple_resource *res; 295 struct virtio_gpu_resource_create_2d c2d; 296 297 VIRTIO_GPU_FILL_CMD(c2d); 298 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 299 c2d.width, c2d.height); 300 301 if (c2d.resource_id == 0) { 302 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 303 __func__); 304 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 305 return; 306 } 307 308 res = virtio_gpu_find_resource(g, c2d.resource_id); 309 if (res) { 310 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 311 __func__, c2d.resource_id); 312 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 313 return; 314 } 315 316 res = g_new0(struct virtio_gpu_simple_resource, 1); 317 318 res->width = c2d.width; 319 res->height = c2d.height; 320 res->format = c2d.format; 321 res->resource_id = c2d.resource_id; 322 323 pformat = get_pixman_format(c2d.format); 324 if (!pformat) { 325 qemu_log_mask(LOG_GUEST_ERROR, 326 "%s: host couldn't handle guest format %d\n", 327 __func__, c2d.format); 328 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 329 return; 330 } 331 res->image = pixman_image_create_bits(pformat, 332 c2d.width, 333 c2d.height, 334 NULL, 0); 335 336 if (!res->image) { 337 qemu_log_mask(LOG_GUEST_ERROR, 338 "%s: resource creation failed %d %d %d\n", 339 __func__, c2d.resource_id, c2d.width, c2d.height); 340 g_free(res); 341 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 342 return; 343 } 344 345 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 346 } 347 348 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 349 struct virtio_gpu_simple_resource *res) 350 { 351 pixman_image_unref(res->image); 352 QTAILQ_REMOVE(&g->reslist, res, next); 353 g_free(res); 354 } 355 356 static void virtio_gpu_resource_unref(VirtIOGPU *g, 357 struct virtio_gpu_ctrl_command *cmd) 358 { 359 struct virtio_gpu_simple_resource *res; 360 struct virtio_gpu_resource_unref unref; 361 362 VIRTIO_GPU_FILL_CMD(unref); 363 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 364 365 res = virtio_gpu_find_resource(g, unref.resource_id); 366 if (!res) { 367 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 368 __func__, unref.resource_id); 369 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 370 return; 371 } 372 virtio_gpu_resource_destroy(g, res); 373 } 374 375 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 376 struct virtio_gpu_ctrl_command *cmd) 377 { 378 struct virtio_gpu_simple_resource *res; 379 int h; 380 uint32_t src_offset, dst_offset, stride; 381 int bpp; 382 pixman_format_code_t format; 383 struct virtio_gpu_transfer_to_host_2d t2d; 384 385 VIRTIO_GPU_FILL_CMD(t2d); 386 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 387 388 res = virtio_gpu_find_resource(g, t2d.resource_id); 389 if (!res || !res->iov) { 390 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 391 __func__, t2d.resource_id); 392 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 393 return; 394 } 395 396 if (t2d.r.x > res->width || 397 t2d.r.y > res->height || 398 t2d.r.width > res->width || 399 t2d.r.height > res->height || 400 t2d.r.x + t2d.r.width > res->width || 401 t2d.r.y + t2d.r.height > res->height) { 402 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 403 " bounds for resource %d: %d %d %d %d vs %d %d\n", 404 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 405 t2d.r.width, t2d.r.height, res->width, res->height); 406 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 407 return; 408 } 409 410 format = pixman_image_get_format(res->image); 411 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 412 stride = pixman_image_get_stride(res->image); 413 414 if (t2d.offset || t2d.r.x || t2d.r.y || 415 t2d.r.width != pixman_image_get_width(res->image)) { 416 void *img_data = pixman_image_get_data(res->image); 417 for (h = 0; h < t2d.r.height; h++) { 418 src_offset = t2d.offset + stride * h; 419 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 420 421 iov_to_buf(res->iov, res->iov_cnt, src_offset, 422 (uint8_t *)img_data 423 + dst_offset, t2d.r.width * bpp); 424 } 425 } else { 426 iov_to_buf(res->iov, res->iov_cnt, 0, 427 pixman_image_get_data(res->image), 428 pixman_image_get_stride(res->image) 429 * pixman_image_get_height(res->image)); 430 } 431 } 432 433 static void virtio_gpu_resource_flush(VirtIOGPU *g, 434 struct virtio_gpu_ctrl_command *cmd) 435 { 436 struct virtio_gpu_simple_resource *res; 437 struct virtio_gpu_resource_flush rf; 438 pixman_region16_t flush_region; 439 int i; 440 441 VIRTIO_GPU_FILL_CMD(rf); 442 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 443 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 444 445 res = virtio_gpu_find_resource(g, rf.resource_id); 446 if (!res) { 447 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 448 __func__, rf.resource_id); 449 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 450 return; 451 } 452 453 if (rf.r.x > res->width || 454 rf.r.y > res->height || 455 rf.r.width > res->width || 456 rf.r.height > res->height || 457 rf.r.x + rf.r.width > res->width || 458 rf.r.y + rf.r.height > res->height) { 459 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 460 " bounds for resource %d: %d %d %d %d vs %d %d\n", 461 __func__, rf.resource_id, rf.r.x, rf.r.y, 462 rf.r.width, rf.r.height, res->width, res->height); 463 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 464 return; 465 } 466 467 pixman_region_init_rect(&flush_region, 468 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 469 for (i = 0; i < g->conf.max_outputs; i++) { 470 struct virtio_gpu_scanout *scanout; 471 pixman_region16_t region, finalregion; 472 pixman_box16_t *extents; 473 474 if (!(res->scanout_bitmask & (1 << i))) { 475 continue; 476 } 477 scanout = &g->scanout[i]; 478 479 pixman_region_init(&finalregion); 480 pixman_region_init_rect(®ion, scanout->x, scanout->y, 481 scanout->width, scanout->height); 482 483 pixman_region_intersect(&finalregion, &flush_region, ®ion); 484 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 485 extents = pixman_region_extents(&finalregion); 486 /* work out the area we need to update for each console */ 487 dpy_gfx_update(g->scanout[i].con, 488 extents->x1, extents->y1, 489 extents->x2 - extents->x1, 490 extents->y2 - extents->y1); 491 492 pixman_region_fini(®ion); 493 pixman_region_fini(&finalregion); 494 } 495 pixman_region_fini(&flush_region); 496 } 497 498 static void virtio_gpu_set_scanout(VirtIOGPU *g, 499 struct virtio_gpu_ctrl_command *cmd) 500 { 501 struct virtio_gpu_simple_resource *res; 502 struct virtio_gpu_scanout *scanout; 503 pixman_format_code_t format; 504 uint32_t offset; 505 int bpp; 506 struct virtio_gpu_set_scanout ss; 507 508 VIRTIO_GPU_FILL_CMD(ss); 509 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 510 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 511 512 if (ss.scanout_id >= g->conf.max_outputs) { 513 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 514 __func__, ss.scanout_id); 515 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 516 return; 517 } 518 519 g->enable = 1; 520 if (ss.resource_id == 0) { 521 scanout = &g->scanout[ss.scanout_id]; 522 if (scanout->resource_id) { 523 res = virtio_gpu_find_resource(g, scanout->resource_id); 524 if (res) { 525 res->scanout_bitmask &= ~(1 << ss.scanout_id); 526 } 527 } 528 if (ss.scanout_id == 0) { 529 qemu_log_mask(LOG_GUEST_ERROR, 530 "%s: illegal scanout id specified %d", 531 __func__, ss.scanout_id); 532 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 533 return; 534 } 535 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); 536 scanout->ds = NULL; 537 scanout->width = 0; 538 scanout->height = 0; 539 return; 540 } 541 542 /* create a surface for this scanout */ 543 res = virtio_gpu_find_resource(g, ss.resource_id); 544 if (!res) { 545 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 546 __func__, ss.resource_id); 547 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 548 return; 549 } 550 551 if (ss.r.x > res->width || 552 ss.r.y > res->height || 553 ss.r.width > res->width || 554 ss.r.height > res->height || 555 ss.r.x + ss.r.width > res->width || 556 ss.r.y + ss.r.height > res->height) { 557 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 558 " resource %d, (%d,%d)+%d,%d vs %d %d\n", 559 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 560 ss.r.width, ss.r.height, res->width, res->height); 561 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 562 return; 563 } 564 565 scanout = &g->scanout[ss.scanout_id]; 566 567 format = pixman_image_get_format(res->image); 568 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 569 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); 570 if (!scanout->ds || surface_data(scanout->ds) 571 != ((uint8_t *)pixman_image_get_data(res->image) + offset) || 572 scanout->width != ss.r.width || 573 scanout->height != ss.r.height) { 574 /* realloc the surface ptr */ 575 scanout->ds = qemu_create_displaysurface_pixman(res->image); 576 if (!scanout->ds) { 577 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 578 return; 579 } 580 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); 581 } 582 583 res->scanout_bitmask |= (1 << ss.scanout_id); 584 scanout->resource_id = ss.resource_id; 585 scanout->x = ss.r.x; 586 scanout->y = ss.r.y; 587 scanout->width = ss.r.width; 588 scanout->height = ss.r.height; 589 } 590 591 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, 592 struct virtio_gpu_ctrl_command *cmd, 593 struct iovec **iov) 594 { 595 struct virtio_gpu_mem_entry *ents; 596 size_t esize, s; 597 int i; 598 599 if (ab->nr_entries > 16384) { 600 qemu_log_mask(LOG_GUEST_ERROR, 601 "%s: nr_entries is too big (%d > 16384)\n", 602 __func__, ab->nr_entries); 603 return -1; 604 } 605 606 esize = sizeof(*ents) * ab->nr_entries; 607 ents = g_malloc(esize); 608 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 609 sizeof(*ab), ents, esize); 610 if (s != esize) { 611 qemu_log_mask(LOG_GUEST_ERROR, 612 "%s: command data size incorrect %zu vs %zu\n", 613 __func__, s, esize); 614 g_free(ents); 615 return -1; 616 } 617 618 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 619 for (i = 0; i < ab->nr_entries; i++) { 620 hwaddr len = ents[i].length; 621 (*iov)[i].iov_len = ents[i].length; 622 (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1); 623 if (!(*iov)[i].iov_base || len != ents[i].length) { 624 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 625 " resource %d element %d\n", 626 __func__, ab->resource_id, i); 627 virtio_gpu_cleanup_mapping_iov(*iov, i); 628 g_free(ents); 629 *iov = NULL; 630 return -1; 631 } 632 } 633 g_free(ents); 634 return 0; 635 } 636 637 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) 638 { 639 int i; 640 641 for (i = 0; i < count; i++) { 642 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, 643 iov[i].iov_len); 644 } 645 g_free(iov); 646 } 647 648 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) 649 { 650 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); 651 res->iov = NULL; 652 res->iov_cnt = 0; 653 } 654 655 static void 656 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 657 struct virtio_gpu_ctrl_command *cmd) 658 { 659 struct virtio_gpu_simple_resource *res; 660 struct virtio_gpu_resource_attach_backing ab; 661 int ret; 662 663 VIRTIO_GPU_FILL_CMD(ab); 664 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 665 666 res = virtio_gpu_find_resource(g, ab.resource_id); 667 if (!res) { 668 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 669 __func__, ab.resource_id); 670 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 671 return; 672 } 673 674 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->iov); 675 if (ret != 0) { 676 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 677 return; 678 } 679 680 res->iov_cnt = ab.nr_entries; 681 } 682 683 static void 684 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 685 struct virtio_gpu_ctrl_command *cmd) 686 { 687 struct virtio_gpu_simple_resource *res; 688 struct virtio_gpu_resource_detach_backing detach; 689 690 VIRTIO_GPU_FILL_CMD(detach); 691 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 692 693 res = virtio_gpu_find_resource(g, detach.resource_id); 694 if (!res || !res->iov) { 695 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 696 __func__, detach.resource_id); 697 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 698 return; 699 } 700 virtio_gpu_cleanup_mapping(res); 701 } 702 703 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 704 struct virtio_gpu_ctrl_command *cmd) 705 { 706 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 707 708 switch (cmd->cmd_hdr.type) { 709 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 710 virtio_gpu_get_display_info(g, cmd); 711 break; 712 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 713 virtio_gpu_resource_create_2d(g, cmd); 714 break; 715 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 716 virtio_gpu_resource_unref(g, cmd); 717 break; 718 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 719 virtio_gpu_resource_flush(g, cmd); 720 break; 721 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 722 virtio_gpu_transfer_to_host_2d(g, cmd); 723 break; 724 case VIRTIO_GPU_CMD_SET_SCANOUT: 725 virtio_gpu_set_scanout(g, cmd); 726 break; 727 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 728 virtio_gpu_resource_attach_backing(g, cmd); 729 break; 730 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 731 virtio_gpu_resource_detach_backing(g, cmd); 732 break; 733 default: 734 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 735 break; 736 } 737 if (!cmd->finished) { 738 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 739 VIRTIO_GPU_RESP_OK_NODATA); 740 } 741 } 742 743 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 744 { 745 VirtIOGPU *g = VIRTIO_GPU(vdev); 746 qemu_bh_schedule(g->ctrl_bh); 747 } 748 749 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 750 { 751 VirtIOGPU *g = VIRTIO_GPU(vdev); 752 qemu_bh_schedule(g->cursor_bh); 753 } 754 755 void virtio_gpu_process_cmdq(VirtIOGPU *g) 756 { 757 struct virtio_gpu_ctrl_command *cmd; 758 759 while (!QTAILQ_EMPTY(&g->cmdq)) { 760 cmd = QTAILQ_FIRST(&g->cmdq); 761 762 /* process command */ 763 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd, 764 g, cmd); 765 if (cmd->waiting) { 766 break; 767 } 768 QTAILQ_REMOVE(&g->cmdq, cmd, next); 769 if (virtio_gpu_stats_enabled(g->conf)) { 770 g->stats.requests++; 771 } 772 773 if (!cmd->finished) { 774 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 775 g->inflight++; 776 if (virtio_gpu_stats_enabled(g->conf)) { 777 if (g->stats.max_inflight < g->inflight) { 778 g->stats.max_inflight = g->inflight; 779 } 780 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 781 } 782 } else { 783 g_free(cmd); 784 } 785 } 786 } 787 788 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 789 { 790 VirtIOGPU *g = VIRTIO_GPU(vdev); 791 struct virtio_gpu_ctrl_command *cmd; 792 793 if (!virtio_queue_ready(vq)) { 794 return; 795 } 796 797 #ifdef CONFIG_VIRGL 798 if (!g->renderer_inited && g->use_virgl_renderer) { 799 virtio_gpu_virgl_init(g); 800 g->renderer_inited = true; 801 } 802 #endif 803 804 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 805 while (cmd) { 806 cmd->vq = vq; 807 cmd->error = 0; 808 cmd->finished = false; 809 cmd->waiting = false; 810 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 811 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 812 } 813 814 virtio_gpu_process_cmdq(g); 815 816 #ifdef CONFIG_VIRGL 817 if (g->use_virgl_renderer) { 818 virtio_gpu_virgl_fence_poll(g); 819 } 820 #endif 821 } 822 823 static void virtio_gpu_ctrl_bh(void *opaque) 824 { 825 VirtIOGPU *g = opaque; 826 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); 827 } 828 829 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 830 { 831 VirtIOGPU *g = VIRTIO_GPU(vdev); 832 VirtQueueElement *elem; 833 size_t s; 834 struct virtio_gpu_update_cursor cursor_info; 835 836 if (!virtio_queue_ready(vq)) { 837 return; 838 } 839 for (;;) { 840 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 841 if (!elem) { 842 break; 843 } 844 845 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 846 &cursor_info, sizeof(cursor_info)); 847 if (s != sizeof(cursor_info)) { 848 qemu_log_mask(LOG_GUEST_ERROR, 849 "%s: cursor size incorrect %zu vs %zu\n", 850 __func__, s, sizeof(cursor_info)); 851 } else { 852 update_cursor(g, &cursor_info); 853 } 854 virtqueue_push(vq, elem, 0); 855 virtio_notify(vdev, vq); 856 g_free(elem); 857 } 858 } 859 860 static void virtio_gpu_cursor_bh(void *opaque) 861 { 862 VirtIOGPU *g = opaque; 863 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); 864 } 865 866 static void virtio_gpu_invalidate_display(void *opaque) 867 { 868 } 869 870 static void virtio_gpu_update_display(void *opaque) 871 { 872 } 873 874 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) 875 { 876 } 877 878 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) 879 { 880 VirtIOGPU *g = opaque; 881 882 if (idx >= g->conf.max_outputs) { 883 return -1; 884 } 885 886 g->req_state[idx].x = info->xoff; 887 g->req_state[idx].y = info->yoff; 888 g->req_state[idx].width = info->width; 889 g->req_state[idx].height = info->height; 890 891 if (info->width && info->height) { 892 g->enabled_output_bitmask |= (1 << idx); 893 } else { 894 g->enabled_output_bitmask &= ~(1 << idx); 895 } 896 897 /* send event to guest */ 898 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); 899 return 0; 900 } 901 902 static void virtio_gpu_gl_block(void *opaque, bool block) 903 { 904 VirtIOGPU *g = opaque; 905 906 g->renderer_blocked = block; 907 if (!block) { 908 virtio_gpu_process_cmdq(g); 909 } 910 } 911 912 const GraphicHwOps virtio_gpu_ops = { 913 .invalidate = virtio_gpu_invalidate_display, 914 .gfx_update = virtio_gpu_update_display, 915 .text_update = virtio_gpu_text_update, 916 .ui_info = virtio_gpu_ui_info, 917 .gl_block = virtio_gpu_gl_block, 918 }; 919 920 static const VMStateDescription vmstate_virtio_gpu_unmigratable = { 921 .name = "virtio-gpu", 922 .unmigratable = 1, 923 }; 924 925 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 926 { 927 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 928 VirtIOGPU *g = VIRTIO_GPU(qdev); 929 bool have_virgl; 930 int i; 931 932 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) { 933 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS); 934 return; 935 } 936 937 g->config_size = sizeof(struct virtio_gpu_config); 938 g->virtio_config.num_scanouts = g->conf.max_outputs; 939 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, 940 g->config_size); 941 942 g->req_state[0].width = 1024; 943 g->req_state[0].height = 768; 944 945 g->use_virgl_renderer = false; 946 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) 947 have_virgl = false; 948 #else 949 have_virgl = display_opengl; 950 #endif 951 if (!have_virgl) { 952 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); 953 } 954 955 if (virtio_gpu_virgl_enabled(g->conf)) { 956 /* use larger control queue in 3d mode */ 957 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); 958 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 959 g->virtio_config.num_capsets = 1; 960 } else { 961 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); 962 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 963 } 964 965 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 966 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 967 QTAILQ_INIT(&g->reslist); 968 QTAILQ_INIT(&g->cmdq); 969 QTAILQ_INIT(&g->fenceq); 970 971 g->enabled_output_bitmask = 1; 972 g->qdev = qdev; 973 974 for (i = 0; i < g->conf.max_outputs; i++) { 975 g->scanout[i].con = 976 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); 977 if (i > 0) { 978 dpy_gfx_replace_surface(g->scanout[i].con, NULL); 979 } 980 } 981 982 vmstate_register(qdev, -1, &vmstate_virtio_gpu_unmigratable, g); 983 } 984 985 static void virtio_gpu_instance_init(Object *obj) 986 { 987 } 988 989 static void virtio_gpu_reset(VirtIODevice *vdev) 990 { 991 VirtIOGPU *g = VIRTIO_GPU(vdev); 992 struct virtio_gpu_simple_resource *res, *tmp; 993 int i; 994 995 g->enable = 0; 996 997 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 998 virtio_gpu_resource_destroy(g, res); 999 } 1000 for (i = 0; i < g->conf.max_outputs; i++) { 1001 #if 0 1002 g->req_state[i].x = 0; 1003 g->req_state[i].y = 0; 1004 if (i == 0) { 1005 g->req_state[0].width = 1024; 1006 g->req_state[0].height = 768; 1007 } else { 1008 g->req_state[i].width = 0; 1009 g->req_state[i].height = 0; 1010 } 1011 #endif 1012 g->scanout[i].resource_id = 0; 1013 g->scanout[i].width = 0; 1014 g->scanout[i].height = 0; 1015 g->scanout[i].x = 0; 1016 g->scanout[i].y = 0; 1017 g->scanout[i].ds = NULL; 1018 } 1019 g->enabled_output_bitmask = 1; 1020 1021 #ifdef CONFIG_VIRGL 1022 if (g->use_virgl_renderer) { 1023 virtio_gpu_virgl_reset(g); 1024 g->use_virgl_renderer = 0; 1025 } 1026 #endif 1027 } 1028 1029 static Property virtio_gpu_properties[] = { 1030 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), 1031 #ifdef CONFIG_VIRGL 1032 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags, 1033 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), 1034 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags, 1035 VIRTIO_GPU_FLAG_STATS_ENABLED, false), 1036 #endif 1037 DEFINE_PROP_END_OF_LIST(), 1038 }; 1039 1040 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1041 { 1042 DeviceClass *dc = DEVICE_CLASS(klass); 1043 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1044 1045 vdc->realize = virtio_gpu_device_realize; 1046 vdc->get_config = virtio_gpu_get_config; 1047 vdc->set_config = virtio_gpu_set_config; 1048 vdc->get_features = virtio_gpu_get_features; 1049 vdc->set_features = virtio_gpu_set_features; 1050 1051 vdc->reset = virtio_gpu_reset; 1052 1053 dc->props = virtio_gpu_properties; 1054 } 1055 1056 static const TypeInfo virtio_gpu_info = { 1057 .name = TYPE_VIRTIO_GPU, 1058 .parent = TYPE_VIRTIO_DEVICE, 1059 .instance_size = sizeof(VirtIOGPU), 1060 .instance_init = virtio_gpu_instance_init, 1061 .class_init = virtio_gpu_class_init, 1062 }; 1063 1064 static void virtio_register_types(void) 1065 { 1066 type_register_static(&virtio_gpu_info); 1067 } 1068 1069 type_init(virtio_register_types) 1070 1071 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); 1072 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); 1073 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); 1074 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); 1075 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); 1076 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); 1077 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); 1078 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); 1079 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); 1080 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); 1081 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); 1082 1083 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); 1084 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); 1085 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); 1086 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); 1087 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); 1088 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); 1089 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); 1090 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); 1091 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); 1092 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); 1093