19d9e1521SGerd Hoffmann /* 29d9e1521SGerd Hoffmann * Virtio GPU Device 39d9e1521SGerd Hoffmann * 49d9e1521SGerd Hoffmann * Copyright Red Hat, Inc. 2013-2014 59d9e1521SGerd Hoffmann * 69d9e1521SGerd Hoffmann * Authors: 79d9e1521SGerd Hoffmann * Dave Airlie <airlied@redhat.com> 89d9e1521SGerd Hoffmann * Gerd Hoffmann <kraxel@redhat.com> 99d9e1521SGerd Hoffmann * 109d9e1521SGerd Hoffmann * This work is licensed under the terms of the GNU GPL, version 2 or later. 119d9e1521SGerd Hoffmann * See the COPYING file in the top-level directory. 129d9e1521SGerd Hoffmann */ 139d9e1521SGerd Hoffmann 149b8bfe21SPeter Maydell #include "qemu/osdep.h" 155feed38cSThomas Huth #include "qemu/error-report.h" 169d9e1521SGerd Hoffmann #include "qemu/iov.h" 179d9e1521SGerd Hoffmann #include "trace.h" 189d9e1521SGerd Hoffmann #include "hw/virtio/virtio.h" 199d9e1521SGerd Hoffmann #include "hw/virtio/virtio-gpu.h" 207c092f17SRobert Beckett #include "hw/virtio/virtio-gpu-bswap.h" 217c092f17SRobert Beckett #include "hw/virtio/virtio-gpu-pixman.h" 229d9e1521SGerd Hoffmann 23e8a2db94SMarc-André Lureau #include "ui/egl-helpers.h" 24e8a2db94SMarc-André Lureau 25a9c94277SMarkus Armbruster #include <virglrenderer.h> 269d9e1521SGerd Hoffmann 27df4c498eSHuang Rui struct virtio_gpu_virgl_resource { 28df4c498eSHuang Rui struct virtio_gpu_simple_resource base; 297c092f17SRobert Beckett MemoryRegion *mr; 30df4c498eSHuang Rui }; 31df4c498eSHuang Rui 32df4c498eSHuang Rui static struct virtio_gpu_virgl_resource * 33df4c498eSHuang Rui virtio_gpu_virgl_find_resource(VirtIOGPU *g, uint32_t resource_id) 34df4c498eSHuang Rui { 35df4c498eSHuang Rui struct virtio_gpu_simple_resource *res; 36df4c498eSHuang Rui 37df4c498eSHuang Rui res = virtio_gpu_find_resource(g, resource_id); 38df4c498eSHuang Rui if (!res) { 39df4c498eSHuang Rui return NULL; 40df4c498eSHuang Rui } 41df4c498eSHuang Rui 42df4c498eSHuang Rui return container_of(res, struct virtio_gpu_virgl_resource, base); 43df4c498eSHuang Rui } 44df4c498eSHuang Rui 45e8a2db94SMarc-André Lureau #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4 46e8a2db94SMarc-André Lureau static void * 47e8a2db94SMarc-André Lureau virgl_get_egl_display(G_GNUC_UNUSED void *cookie) 48e8a2db94SMarc-André Lureau { 49e8a2db94SMarc-André Lureau return qemu_egl_display; 50e8a2db94SMarc-André Lureau } 51e8a2db94SMarc-André Lureau #endif 529d9e1521SGerd Hoffmann 537c092f17SRobert Beckett #if VIRGL_VERSION_MAJOR >= 1 547c092f17SRobert Beckett struct virtio_gpu_virgl_hostmem_region { 557c092f17SRobert Beckett MemoryRegion mr; 567c092f17SRobert Beckett struct VirtIOGPU *g; 577c092f17SRobert Beckett bool finish_unmapping; 587c092f17SRobert Beckett }; 597c092f17SRobert Beckett 607c092f17SRobert Beckett static struct virtio_gpu_virgl_hostmem_region * 617c092f17SRobert Beckett to_hostmem_region(MemoryRegion *mr) 627c092f17SRobert Beckett { 637c092f17SRobert Beckett return container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr); 647c092f17SRobert Beckett } 657c092f17SRobert Beckett 667c092f17SRobert Beckett static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque) 677c092f17SRobert Beckett { 687c092f17SRobert Beckett VirtIOGPU *g = opaque; 697c092f17SRobert Beckett 707c092f17SRobert Beckett virtio_gpu_process_cmdq(g); 717c092f17SRobert Beckett } 727c092f17SRobert Beckett 737c092f17SRobert Beckett static void virtio_gpu_virgl_hostmem_region_free(void *obj) 747c092f17SRobert Beckett { 757c092f17SRobert Beckett MemoryRegion *mr = MEMORY_REGION(obj); 767c092f17SRobert Beckett struct virtio_gpu_virgl_hostmem_region *vmr; 777c092f17SRobert Beckett VirtIOGPUBase *b; 787c092f17SRobert Beckett VirtIOGPUGL *gl; 797c092f17SRobert Beckett 807c092f17SRobert Beckett vmr = to_hostmem_region(mr); 817c092f17SRobert Beckett vmr->finish_unmapping = true; 827c092f17SRobert Beckett 837c092f17SRobert Beckett b = VIRTIO_GPU_BASE(vmr->g); 847c092f17SRobert Beckett b->renderer_blocked--; 857c092f17SRobert Beckett 867c092f17SRobert Beckett /* 877c092f17SRobert Beckett * memory_region_unref() is executed from RCU thread context, while 887c092f17SRobert Beckett * virglrenderer works only on the main-loop thread that's holding GL 897c092f17SRobert Beckett * context. 907c092f17SRobert Beckett */ 917c092f17SRobert Beckett gl = VIRTIO_GPU_GL(vmr->g); 927c092f17SRobert Beckett qemu_bh_schedule(gl->cmdq_resume_bh); 937c092f17SRobert Beckett } 947c092f17SRobert Beckett 957c092f17SRobert Beckett static int 967c092f17SRobert Beckett virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g, 977c092f17SRobert Beckett struct virtio_gpu_virgl_resource *res, 987c092f17SRobert Beckett uint64_t offset) 997c092f17SRobert Beckett { 1007c092f17SRobert Beckett struct virtio_gpu_virgl_hostmem_region *vmr; 1017c092f17SRobert Beckett VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 1027c092f17SRobert Beckett MemoryRegion *mr; 1037c092f17SRobert Beckett uint64_t size; 1047c092f17SRobert Beckett void *data; 1057c092f17SRobert Beckett int ret; 1067c092f17SRobert Beckett 1077c092f17SRobert Beckett if (!virtio_gpu_hostmem_enabled(b->conf)) { 1087c092f17SRobert Beckett qemu_log_mask(LOG_GUEST_ERROR, "%s: hostmem disabled\n", __func__); 1097c092f17SRobert Beckett return -EOPNOTSUPP; 1107c092f17SRobert Beckett } 1117c092f17SRobert Beckett 1127c092f17SRobert Beckett ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size); 1137c092f17SRobert Beckett if (ret) { 1147c092f17SRobert Beckett qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n", 1157c092f17SRobert Beckett __func__, strerror(-ret)); 1167c092f17SRobert Beckett return ret; 1177c092f17SRobert Beckett } 1187c092f17SRobert Beckett 1197c092f17SRobert Beckett vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1); 1207c092f17SRobert Beckett vmr->g = g; 1217c092f17SRobert Beckett 1227c092f17SRobert Beckett mr = &vmr->mr; 1237c092f17SRobert Beckett memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data); 1247c092f17SRobert Beckett memory_region_add_subregion(&b->hostmem, offset, mr); 1257c092f17SRobert Beckett memory_region_set_enabled(mr, true); 1267c092f17SRobert Beckett 1277c092f17SRobert Beckett /* 1287c092f17SRobert Beckett * MR could outlive the resource if MR's reference is held outside of 1297c092f17SRobert Beckett * virtio-gpu. In order to prevent unmapping resource while MR is alive, 1307c092f17SRobert Beckett * and thus, making the data pointer invalid, we will block virtio-gpu 1317c092f17SRobert Beckett * command processing until MR is fully unreferenced and freed. 1327c092f17SRobert Beckett */ 1337c092f17SRobert Beckett OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free; 1347c092f17SRobert Beckett 1357c092f17SRobert Beckett res->mr = mr; 1367c092f17SRobert Beckett 1377c092f17SRobert Beckett return 0; 1387c092f17SRobert Beckett } 1397c092f17SRobert Beckett 1407c092f17SRobert Beckett static int 1417c092f17SRobert Beckett virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g, 1427c092f17SRobert Beckett struct virtio_gpu_virgl_resource *res, 1437c092f17SRobert Beckett bool *cmd_suspended) 1447c092f17SRobert Beckett { 1457c092f17SRobert Beckett struct virtio_gpu_virgl_hostmem_region *vmr; 1467c092f17SRobert Beckett VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 1477c092f17SRobert Beckett MemoryRegion *mr = res->mr; 1487c092f17SRobert Beckett int ret; 1497c092f17SRobert Beckett 1507c092f17SRobert Beckett if (!mr) { 1517c092f17SRobert Beckett return 0; 1527c092f17SRobert Beckett } 1537c092f17SRobert Beckett 1547c092f17SRobert Beckett vmr = to_hostmem_region(res->mr); 1557c092f17SRobert Beckett 1567c092f17SRobert Beckett /* 1577c092f17SRobert Beckett * Perform async unmapping in 3 steps: 1587c092f17SRobert Beckett * 1597c092f17SRobert Beckett * 1. Begin async unmapping with memory_region_del_subregion() 1607c092f17SRobert Beckett * and suspend/block cmd processing. 1617c092f17SRobert Beckett * 2. Wait for res->mr to be freed and cmd processing resumed 1627c092f17SRobert Beckett * asynchronously by virtio_gpu_virgl_hostmem_region_free(). 1637c092f17SRobert Beckett * 3. Finish the unmapping with final virgl_renderer_resource_unmap(). 1647c092f17SRobert Beckett */ 1657c092f17SRobert Beckett if (vmr->finish_unmapping) { 1667c092f17SRobert Beckett res->mr = NULL; 1677c092f17SRobert Beckett g_free(vmr); 1687c092f17SRobert Beckett 1697c092f17SRobert Beckett ret = virgl_renderer_resource_unmap(res->base.resource_id); 1707c092f17SRobert Beckett if (ret) { 1717c092f17SRobert Beckett qemu_log_mask(LOG_GUEST_ERROR, 1727c092f17SRobert Beckett "%s: failed to unmap virgl resource: %s\n", 1737c092f17SRobert Beckett __func__, strerror(-ret)); 1747c092f17SRobert Beckett return ret; 1757c092f17SRobert Beckett } 1767c092f17SRobert Beckett } else { 1777c092f17SRobert Beckett *cmd_suspended = true; 1787c092f17SRobert Beckett 1797c092f17SRobert Beckett /* render will be unblocked once MR is freed */ 1807c092f17SRobert Beckett b->renderer_blocked++; 1817c092f17SRobert Beckett 1827c092f17SRobert Beckett /* memory region owns self res->mr object and frees it by itself */ 1837c092f17SRobert Beckett memory_region_set_enabled(mr, false); 1847c092f17SRobert Beckett memory_region_del_subregion(&b->hostmem, mr); 1857c092f17SRobert Beckett object_unparent(OBJECT(mr)); 1867c092f17SRobert Beckett } 1877c092f17SRobert Beckett 1887c092f17SRobert Beckett return 0; 1897c092f17SRobert Beckett } 1907c092f17SRobert Beckett #endif 1917c092f17SRobert Beckett 1929d9e1521SGerd Hoffmann static void virgl_cmd_create_resource_2d(VirtIOGPU *g, 1939d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 1949d9e1521SGerd Hoffmann { 1959d9e1521SGerd Hoffmann struct virtio_gpu_resource_create_2d c2d; 1969d9e1521SGerd Hoffmann struct virgl_renderer_resource_create_args args; 197df4c498eSHuang Rui struct virtio_gpu_virgl_resource *res; 1989d9e1521SGerd Hoffmann 1999d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(c2d); 2009d9e1521SGerd Hoffmann trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 2019d9e1521SGerd Hoffmann c2d.width, c2d.height); 2029d9e1521SGerd Hoffmann 203df4c498eSHuang Rui if (c2d.resource_id == 0) { 204df4c498eSHuang Rui qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 205df4c498eSHuang Rui __func__); 206df4c498eSHuang Rui cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 207df4c498eSHuang Rui return; 208df4c498eSHuang Rui } 209df4c498eSHuang Rui 210df4c498eSHuang Rui res = virtio_gpu_virgl_find_resource(g, c2d.resource_id); 211df4c498eSHuang Rui if (res) { 212df4c498eSHuang Rui qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 213df4c498eSHuang Rui __func__, c2d.resource_id); 214df4c498eSHuang Rui cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 215df4c498eSHuang Rui return; 216df4c498eSHuang Rui } 217df4c498eSHuang Rui 218df4c498eSHuang Rui res = g_new0(struct virtio_gpu_virgl_resource, 1); 219df4c498eSHuang Rui res->base.width = c2d.width; 220df4c498eSHuang Rui res->base.height = c2d.height; 221df4c498eSHuang Rui res->base.format = c2d.format; 222df4c498eSHuang Rui res->base.resource_id = c2d.resource_id; 2237c092f17SRobert Beckett res->base.dmabuf_fd = -1; 224df4c498eSHuang Rui QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next); 225df4c498eSHuang Rui 2269d9e1521SGerd Hoffmann args.handle = c2d.resource_id; 2279d9e1521SGerd Hoffmann args.target = 2; 2289d9e1521SGerd Hoffmann args.format = c2d.format; 2299d9e1521SGerd Hoffmann args.bind = (1 << 1); 2309d9e1521SGerd Hoffmann args.width = c2d.width; 2319d9e1521SGerd Hoffmann args.height = c2d.height; 2329d9e1521SGerd Hoffmann args.depth = 1; 2339d9e1521SGerd Hoffmann args.array_size = 1; 2349d9e1521SGerd Hoffmann args.last_level = 0; 2359d9e1521SGerd Hoffmann args.nr_samples = 0; 2369d9e1521SGerd Hoffmann args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP; 2379d9e1521SGerd Hoffmann virgl_renderer_resource_create(&args, NULL, 0); 2389d9e1521SGerd Hoffmann } 2399d9e1521SGerd Hoffmann 2409d9e1521SGerd Hoffmann static void virgl_cmd_create_resource_3d(VirtIOGPU *g, 2419d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 2429d9e1521SGerd Hoffmann { 2439d9e1521SGerd Hoffmann struct virtio_gpu_resource_create_3d c3d; 2449d9e1521SGerd Hoffmann struct virgl_renderer_resource_create_args args; 245df4c498eSHuang Rui struct virtio_gpu_virgl_resource *res; 2469d9e1521SGerd Hoffmann 2479d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(c3d); 2489d9e1521SGerd Hoffmann trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format, 2499d9e1521SGerd Hoffmann c3d.width, c3d.height, c3d.depth); 2509d9e1521SGerd Hoffmann 251df4c498eSHuang Rui if (c3d.resource_id == 0) { 252df4c498eSHuang Rui qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 253df4c498eSHuang Rui __func__); 254df4c498eSHuang Rui cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 255df4c498eSHuang Rui return; 256df4c498eSHuang Rui } 257df4c498eSHuang Rui 258df4c498eSHuang Rui res = virtio_gpu_virgl_find_resource(g, c3d.resource_id); 259df4c498eSHuang Rui if (res) { 260df4c498eSHuang Rui qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 261df4c498eSHuang Rui __func__, c3d.resource_id); 262df4c498eSHuang Rui cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 263df4c498eSHuang Rui return; 264df4c498eSHuang Rui } 265df4c498eSHuang Rui 266df4c498eSHuang Rui res = g_new0(struct virtio_gpu_virgl_resource, 1); 267df4c498eSHuang Rui res->base.width = c3d.width; 268df4c498eSHuang Rui res->base.height = c3d.height; 269df4c498eSHuang Rui res->base.format = c3d.format; 270df4c498eSHuang Rui res->base.resource_id = c3d.resource_id; 2717c092f17SRobert Beckett res->base.dmabuf_fd = -1; 272df4c498eSHuang Rui QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next); 273df4c498eSHuang Rui 2749d9e1521SGerd Hoffmann args.handle = c3d.resource_id; 2759d9e1521SGerd Hoffmann args.target = c3d.target; 2769d9e1521SGerd Hoffmann args.format = c3d.format; 2779d9e1521SGerd Hoffmann args.bind = c3d.bind; 2789d9e1521SGerd Hoffmann args.width = c3d.width; 2799d9e1521SGerd Hoffmann args.height = c3d.height; 2809d9e1521SGerd Hoffmann args.depth = c3d.depth; 2819d9e1521SGerd Hoffmann args.array_size = c3d.array_size; 2829d9e1521SGerd Hoffmann args.last_level = c3d.last_level; 2839d9e1521SGerd Hoffmann args.nr_samples = c3d.nr_samples; 2849d9e1521SGerd Hoffmann args.flags = c3d.flags; 2859d9e1521SGerd Hoffmann virgl_renderer_resource_create(&args, NULL, 0); 2869d9e1521SGerd Hoffmann } 2879d9e1521SGerd Hoffmann 2889d9e1521SGerd Hoffmann static void virgl_cmd_resource_unref(VirtIOGPU *g, 2897c092f17SRobert Beckett struct virtio_gpu_ctrl_command *cmd, 2907c092f17SRobert Beckett bool *cmd_suspended) 2919d9e1521SGerd Hoffmann { 2929d9e1521SGerd Hoffmann struct virtio_gpu_resource_unref unref; 293df4c498eSHuang Rui struct virtio_gpu_virgl_resource *res; 2945e8e3c4cSGerd Hoffmann struct iovec *res_iovs = NULL; 2955e8e3c4cSGerd Hoffmann int num_iovs = 0; 2969d9e1521SGerd Hoffmann 2979d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(unref); 2989d9e1521SGerd Hoffmann trace_virtio_gpu_cmd_res_unref(unref.resource_id); 2999d9e1521SGerd Hoffmann 300df4c498eSHuang Rui res = virtio_gpu_virgl_find_resource(g, unref.resource_id); 301df4c498eSHuang Rui if (!res) { 302df4c498eSHuang Rui qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", 303df4c498eSHuang Rui __func__, unref.resource_id); 304df4c498eSHuang Rui cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 305df4c498eSHuang Rui return; 306df4c498eSHuang Rui } 307df4c498eSHuang Rui 3087c092f17SRobert Beckett #if VIRGL_VERSION_MAJOR >= 1 3097c092f17SRobert Beckett if (virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended)) { 3107c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 3117c092f17SRobert Beckett return; 3127c092f17SRobert Beckett } 3137c092f17SRobert Beckett if (*cmd_suspended) { 3147c092f17SRobert Beckett return; 3157c092f17SRobert Beckett } 3167c092f17SRobert Beckett #endif 3177c092f17SRobert Beckett 3185e8e3c4cSGerd Hoffmann virgl_renderer_resource_detach_iov(unref.resource_id, 3195e8e3c4cSGerd Hoffmann &res_iovs, 3205e8e3c4cSGerd Hoffmann &num_iovs); 3215e8e3c4cSGerd Hoffmann if (res_iovs != NULL && num_iovs != 0) { 3223bb68f79SGerd Hoffmann virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); 3235e8e3c4cSGerd Hoffmann } 3249d9e1521SGerd Hoffmann virgl_renderer_resource_unref(unref.resource_id); 325df4c498eSHuang Rui 326df4c498eSHuang Rui QTAILQ_REMOVE(&g->reslist, &res->base, next); 327df4c498eSHuang Rui 328df4c498eSHuang Rui g_free(res); 3299d9e1521SGerd Hoffmann } 3309d9e1521SGerd Hoffmann 3319d9e1521SGerd Hoffmann static void virgl_cmd_context_create(VirtIOGPU *g, 3329d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 3339d9e1521SGerd Hoffmann { 3349d9e1521SGerd Hoffmann struct virtio_gpu_ctx_create cc; 3359d9e1521SGerd Hoffmann 3369d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(cc); 3379d9e1521SGerd Hoffmann trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id, 3389d9e1521SGerd Hoffmann cc.debug_name); 3399d9e1521SGerd Hoffmann 3402c868c79SHuang Rui if (cc.context_init) { 3412c868c79SHuang Rui if (!virtio_gpu_context_init_enabled(g->parent_obj.conf)) { 3422c868c79SHuang Rui qemu_log_mask(LOG_GUEST_ERROR, "%s: context_init disabled", 3432c868c79SHuang Rui __func__); 3442c868c79SHuang Rui cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 3452c868c79SHuang Rui return; 3462c868c79SHuang Rui } 3472c868c79SHuang Rui 3482c868c79SHuang Rui #if VIRGL_VERSION_MAJOR >= 1 3492c868c79SHuang Rui virgl_renderer_context_create_with_flags(cc.hdr.ctx_id, 3502c868c79SHuang Rui cc.context_init, 3512c868c79SHuang Rui cc.nlen, 3529d9e1521SGerd Hoffmann cc.debug_name); 3532c868c79SHuang Rui return; 3542c868c79SHuang Rui #endif 3552c868c79SHuang Rui } 3562c868c79SHuang Rui 3572c868c79SHuang Rui virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, cc.debug_name); 3589d9e1521SGerd Hoffmann } 3599d9e1521SGerd Hoffmann 3609d9e1521SGerd Hoffmann static void virgl_cmd_context_destroy(VirtIOGPU *g, 3619d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 3629d9e1521SGerd Hoffmann { 3639d9e1521SGerd Hoffmann struct virtio_gpu_ctx_destroy cd; 3649d9e1521SGerd Hoffmann 3659d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(cd); 3669d9e1521SGerd Hoffmann trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id); 3679d9e1521SGerd Hoffmann 3689d9e1521SGerd Hoffmann virgl_renderer_context_destroy(cd.hdr.ctx_id); 3699d9e1521SGerd Hoffmann } 3709d9e1521SGerd Hoffmann 3719d9e1521SGerd Hoffmann static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y, 3729d9e1521SGerd Hoffmann int width, int height) 3739d9e1521SGerd Hoffmann { 37450d8e25eSMarc-André Lureau if (!g->parent_obj.scanout[idx].con) { 3759d9e1521SGerd Hoffmann return; 3769d9e1521SGerd Hoffmann } 3779d9e1521SGerd Hoffmann 37850d8e25eSMarc-André Lureau dpy_gl_update(g->parent_obj.scanout[idx].con, x, y, width, height); 3799d9e1521SGerd Hoffmann } 3809d9e1521SGerd Hoffmann 3819d9e1521SGerd Hoffmann static void virgl_cmd_resource_flush(VirtIOGPU *g, 3829d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 3839d9e1521SGerd Hoffmann { 3849d9e1521SGerd Hoffmann struct virtio_gpu_resource_flush rf; 3859d9e1521SGerd Hoffmann int i; 3869d9e1521SGerd Hoffmann 3879d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(rf); 3889d9e1521SGerd Hoffmann trace_virtio_gpu_cmd_res_flush(rf.resource_id, 3899d9e1521SGerd Hoffmann rf.r.width, rf.r.height, rf.r.x, rf.r.y); 3909d9e1521SGerd Hoffmann 39150d8e25eSMarc-André Lureau for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 39250d8e25eSMarc-André Lureau if (g->parent_obj.scanout[i].resource_id != rf.resource_id) { 3939d9e1521SGerd Hoffmann continue; 3949d9e1521SGerd Hoffmann } 3959d9e1521SGerd Hoffmann virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height); 3969d9e1521SGerd Hoffmann } 3979d9e1521SGerd Hoffmann } 3989d9e1521SGerd Hoffmann 3999d9e1521SGerd Hoffmann static void virgl_cmd_set_scanout(VirtIOGPU *g, 4009d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 4019d9e1521SGerd Hoffmann { 4029d9e1521SGerd Hoffmann struct virtio_gpu_set_scanout ss; 4039d9e1521SGerd Hoffmann int ret; 4049d9e1521SGerd Hoffmann 4059d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(ss); 4069d9e1521SGerd Hoffmann trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 4079d9e1521SGerd Hoffmann ss.r.width, ss.r.height, ss.r.x, ss.r.y); 4089d9e1521SGerd Hoffmann 40950d8e25eSMarc-André Lureau if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 4109d9e1521SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 4119d9e1521SGerd Hoffmann __func__, ss.scanout_id); 4129d9e1521SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 4139d9e1521SGerd Hoffmann return; 4149d9e1521SGerd Hoffmann } 41550d8e25eSMarc-André Lureau g->parent_obj.enable = 1; 4169d9e1521SGerd Hoffmann 4179d9e1521SGerd Hoffmann if (ss.resource_id && ss.r.width && ss.r.height) { 418c1600f84SMarc-André Lureau struct virgl_renderer_resource_info info; 419c1600f84SMarc-André Lureau void *d3d_tex2d = NULL; 420c1600f84SMarc-André Lureau 421ffac9641SDmitry Osipenko #if VIRGL_VERSION_MAJOR >= 1 422c1600f84SMarc-André Lureau struct virgl_renderer_resource_info_ext ext; 423c1600f84SMarc-André Lureau memset(&ext, 0, sizeof(ext)); 424c1600f84SMarc-André Lureau ret = virgl_renderer_resource_get_info_ext(ss.resource_id, &ext); 425c1600f84SMarc-André Lureau info = ext.base; 426c1600f84SMarc-André Lureau d3d_tex2d = ext.d3d_tex2d; 427c1600f84SMarc-André Lureau #else 428c1600f84SMarc-André Lureau memset(&info, 0, sizeof(info)); 4299d9e1521SGerd Hoffmann ret = virgl_renderer_resource_get_info(ss.resource_id, &info); 430c1600f84SMarc-André Lureau #endif 431574b64aaSDmitry Osipenko if (ret) { 4329d9e1521SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 4339d9e1521SGerd Hoffmann "%s: illegal resource specified %d\n", 4349d9e1521SGerd Hoffmann __func__, ss.resource_id); 4359d9e1521SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 4369d9e1521SGerd Hoffmann return; 4379d9e1521SGerd Hoffmann } 43850d8e25eSMarc-André Lureau qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con, 4399d9e1521SGerd Hoffmann ss.r.width, ss.r.height); 4409d9e1521SGerd Hoffmann virgl_renderer_force_ctx_0(); 44150d8e25eSMarc-André Lureau dpy_gl_scanout_texture( 44250d8e25eSMarc-André Lureau g->parent_obj.scanout[ss.scanout_id].con, info.tex_id, 44346e4609eSMarc-André Lureau info.flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP, 4449d8256ebSMarc-André Lureau info.width, info.height, 445bf41ab61SMarc-André Lureau ss.r.x, ss.r.y, ss.r.width, ss.r.height, 446bf41ab61SMarc-André Lureau d3d_tex2d); 4479d9e1521SGerd Hoffmann } else { 44850d8e25eSMarc-André Lureau dpy_gfx_replace_surface( 44950d8e25eSMarc-André Lureau g->parent_obj.scanout[ss.scanout_id].con, NULL); 45050d8e25eSMarc-André Lureau dpy_gl_scanout_disable(g->parent_obj.scanout[ss.scanout_id].con); 4519d9e1521SGerd Hoffmann } 45250d8e25eSMarc-André Lureau g->parent_obj.scanout[ss.scanout_id].resource_id = ss.resource_id; 4539d9e1521SGerd Hoffmann } 4549d9e1521SGerd Hoffmann 4559d9e1521SGerd Hoffmann static void virgl_cmd_submit_3d(VirtIOGPU *g, 4569d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 4579d9e1521SGerd Hoffmann { 4589d9e1521SGerd Hoffmann struct virtio_gpu_cmd_submit cs; 4599d9e1521SGerd Hoffmann void *buf; 4609d9e1521SGerd Hoffmann size_t s; 4619d9e1521SGerd Hoffmann 4629d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(cs); 4639d9e1521SGerd Hoffmann trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size); 4649d9e1521SGerd Hoffmann 4659d9e1521SGerd Hoffmann buf = g_malloc(cs.size); 4669d9e1521SGerd Hoffmann s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 4679d9e1521SGerd Hoffmann sizeof(cs), buf, cs.size); 4689d9e1521SGerd Hoffmann if (s != cs.size) { 4699d9e1521SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)", 4709d9e1521SGerd Hoffmann __func__, s, cs.size); 4719d9e1521SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 4728d94c1caSGerd Hoffmann goto out; 4739d9e1521SGerd Hoffmann } 4749d9e1521SGerd Hoffmann 47550d8e25eSMarc-André Lureau if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 4769d9e1521SGerd Hoffmann g->stats.req_3d++; 4779d9e1521SGerd Hoffmann g->stats.bytes_3d += cs.size; 4789d9e1521SGerd Hoffmann } 4799d9e1521SGerd Hoffmann 4809d9e1521SGerd Hoffmann virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4); 4819d9e1521SGerd Hoffmann 4828d94c1caSGerd Hoffmann out: 4839d9e1521SGerd Hoffmann g_free(buf); 4849d9e1521SGerd Hoffmann } 4859d9e1521SGerd Hoffmann 4869d9e1521SGerd Hoffmann static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g, 4879d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 4889d9e1521SGerd Hoffmann { 4899d9e1521SGerd Hoffmann struct virtio_gpu_transfer_to_host_2d t2d; 4909d9e1521SGerd Hoffmann struct virtio_gpu_box box; 4919d9e1521SGerd Hoffmann 4929d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(t2d); 4939d9e1521SGerd Hoffmann trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 4949d9e1521SGerd Hoffmann 4959d9e1521SGerd Hoffmann box.x = t2d.r.x; 4969d9e1521SGerd Hoffmann box.y = t2d.r.y; 4979d9e1521SGerd Hoffmann box.z = 0; 4989d9e1521SGerd Hoffmann box.w = t2d.r.width; 4999d9e1521SGerd Hoffmann box.h = t2d.r.height; 5009d9e1521SGerd Hoffmann box.d = 1; 5019d9e1521SGerd Hoffmann 5029d9e1521SGerd Hoffmann virgl_renderer_transfer_write_iov(t2d.resource_id, 5039d9e1521SGerd Hoffmann 0, 5049d9e1521SGerd Hoffmann 0, 5059d9e1521SGerd Hoffmann 0, 5069d9e1521SGerd Hoffmann 0, 5079d9e1521SGerd Hoffmann (struct virgl_box *)&box, 5089d9e1521SGerd Hoffmann t2d.offset, NULL, 0); 5099d9e1521SGerd Hoffmann } 5109d9e1521SGerd Hoffmann 5119d9e1521SGerd Hoffmann static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g, 5129d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 5139d9e1521SGerd Hoffmann { 5149d9e1521SGerd Hoffmann struct virtio_gpu_transfer_host_3d t3d; 5159d9e1521SGerd Hoffmann 5169d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(t3d); 5179d9e1521SGerd Hoffmann trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id); 5189d9e1521SGerd Hoffmann 5199d9e1521SGerd Hoffmann virgl_renderer_transfer_write_iov(t3d.resource_id, 5209d9e1521SGerd Hoffmann t3d.hdr.ctx_id, 5219d9e1521SGerd Hoffmann t3d.level, 5229d9e1521SGerd Hoffmann t3d.stride, 5239d9e1521SGerd Hoffmann t3d.layer_stride, 5249d9e1521SGerd Hoffmann (struct virgl_box *)&t3d.box, 5259d9e1521SGerd Hoffmann t3d.offset, NULL, 0); 5269d9e1521SGerd Hoffmann } 5279d9e1521SGerd Hoffmann 5289d9e1521SGerd Hoffmann static void 5299d9e1521SGerd Hoffmann virgl_cmd_transfer_from_host_3d(VirtIOGPU *g, 5309d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 5319d9e1521SGerd Hoffmann { 5329d9e1521SGerd Hoffmann struct virtio_gpu_transfer_host_3d tf3d; 5339d9e1521SGerd Hoffmann 5349d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(tf3d); 5359d9e1521SGerd Hoffmann trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id); 5369d9e1521SGerd Hoffmann 5379d9e1521SGerd Hoffmann virgl_renderer_transfer_read_iov(tf3d.resource_id, 5389d9e1521SGerd Hoffmann tf3d.hdr.ctx_id, 5399d9e1521SGerd Hoffmann tf3d.level, 5409d9e1521SGerd Hoffmann tf3d.stride, 5419d9e1521SGerd Hoffmann tf3d.layer_stride, 5429d9e1521SGerd Hoffmann (struct virgl_box *)&tf3d.box, 5439d9e1521SGerd Hoffmann tf3d.offset, NULL, 0); 5449d9e1521SGerd Hoffmann } 5459d9e1521SGerd Hoffmann 5469d9e1521SGerd Hoffmann 5479d9e1521SGerd Hoffmann static void virgl_resource_attach_backing(VirtIOGPU *g, 5489d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 5499d9e1521SGerd Hoffmann { 5509d9e1521SGerd Hoffmann struct virtio_gpu_resource_attach_backing att_rb; 5519d9e1521SGerd Hoffmann struct iovec *res_iovs; 5529049f8bcSGerd Hoffmann uint32_t res_niov; 5539d9e1521SGerd Hoffmann int ret; 5549d9e1521SGerd Hoffmann 5559d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(att_rb); 5569d9e1521SGerd Hoffmann trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id); 5579d9e1521SGerd Hoffmann 55870d37662SVivek Kasireddy ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb), 55970d37662SVivek Kasireddy cmd, NULL, &res_iovs, &res_niov); 5609d9e1521SGerd Hoffmann if (ret != 0) { 5619d9e1521SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 5629d9e1521SGerd Hoffmann return; 5639d9e1521SGerd Hoffmann } 5649d9e1521SGerd Hoffmann 56533243031SLi Qiang ret = virgl_renderer_resource_attach_iov(att_rb.resource_id, 5669049f8bcSGerd Hoffmann res_iovs, res_niov); 56733243031SLi Qiang 56833243031SLi Qiang if (ret != 0) 5699049f8bcSGerd Hoffmann virtio_gpu_cleanup_mapping_iov(g, res_iovs, res_niov); 5709d9e1521SGerd Hoffmann } 5719d9e1521SGerd Hoffmann 5729d9e1521SGerd Hoffmann static void virgl_resource_detach_backing(VirtIOGPU *g, 5739d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 5749d9e1521SGerd Hoffmann { 5759d9e1521SGerd Hoffmann struct virtio_gpu_resource_detach_backing detach_rb; 5769d9e1521SGerd Hoffmann struct iovec *res_iovs = NULL; 5779d9e1521SGerd Hoffmann int num_iovs = 0; 5789d9e1521SGerd Hoffmann 5799d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(detach_rb); 5809d9e1521SGerd Hoffmann trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id); 5819d9e1521SGerd Hoffmann 5829d9e1521SGerd Hoffmann virgl_renderer_resource_detach_iov(detach_rb.resource_id, 5839d9e1521SGerd Hoffmann &res_iovs, 5849d9e1521SGerd Hoffmann &num_iovs); 5859d9e1521SGerd Hoffmann if (res_iovs == NULL || num_iovs == 0) { 5869d9e1521SGerd Hoffmann return; 5879d9e1521SGerd Hoffmann } 5883bb68f79SGerd Hoffmann virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); 5899d9e1521SGerd Hoffmann } 5909d9e1521SGerd Hoffmann 5919d9e1521SGerd Hoffmann 5929d9e1521SGerd Hoffmann static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g, 5939d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 5949d9e1521SGerd Hoffmann { 5959d9e1521SGerd Hoffmann struct virtio_gpu_ctx_resource att_res; 5969d9e1521SGerd Hoffmann 5979d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(att_res); 5989d9e1521SGerd Hoffmann trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id, 5999d9e1521SGerd Hoffmann att_res.resource_id); 6009d9e1521SGerd Hoffmann 6019d9e1521SGerd Hoffmann virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id); 6029d9e1521SGerd Hoffmann } 6039d9e1521SGerd Hoffmann 6049d9e1521SGerd Hoffmann static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g, 6059d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 6069d9e1521SGerd Hoffmann { 6079d9e1521SGerd Hoffmann struct virtio_gpu_ctx_resource det_res; 6089d9e1521SGerd Hoffmann 6099d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(det_res); 6109d9e1521SGerd Hoffmann trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id, 6119d9e1521SGerd Hoffmann det_res.resource_id); 6129d9e1521SGerd Hoffmann 6139d9e1521SGerd Hoffmann virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id); 6149d9e1521SGerd Hoffmann } 6159d9e1521SGerd Hoffmann 6169d9e1521SGerd Hoffmann static void virgl_cmd_get_capset_info(VirtIOGPU *g, 6179d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 6189d9e1521SGerd Hoffmann { 6199d9e1521SGerd Hoffmann struct virtio_gpu_get_capset_info info; 6209d9e1521SGerd Hoffmann struct virtio_gpu_resp_capset_info resp; 6219d9e1521SGerd Hoffmann 6229d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(info); 6239d9e1521SGerd Hoffmann 62442a8dadcSLi Qiang memset(&resp, 0, sizeof(resp)); 625*1333fd06SPierre-Eric Pelloux-Prayer 626*1333fd06SPierre-Eric Pelloux-Prayer if (info.capset_index < g->capset_ids->len) { 627*1333fd06SPierre-Eric Pelloux-Prayer resp.capset_id = g_array_index(g->capset_ids, uint32_t, 628*1333fd06SPierre-Eric Pelloux-Prayer info.capset_index); 6299d9e1521SGerd Hoffmann virgl_renderer_get_cap_set(resp.capset_id, 6309d9e1521SGerd Hoffmann &resp.capset_max_version, 6319d9e1521SGerd Hoffmann &resp.capset_max_size); 6329d9e1521SGerd Hoffmann } 6339d9e1521SGerd Hoffmann resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO; 6349d9e1521SGerd Hoffmann virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); 6359d9e1521SGerd Hoffmann } 6369d9e1521SGerd Hoffmann 6379d9e1521SGerd Hoffmann static void virgl_cmd_get_capset(VirtIOGPU *g, 6389d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 6399d9e1521SGerd Hoffmann { 6409d9e1521SGerd Hoffmann struct virtio_gpu_get_capset gc; 6419d9e1521SGerd Hoffmann struct virtio_gpu_resp_capset *resp; 6429d9e1521SGerd Hoffmann uint32_t max_ver, max_size; 6439d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(gc); 6449d9e1521SGerd Hoffmann 6459d9e1521SGerd Hoffmann virgl_renderer_get_cap_set(gc.capset_id, &max_ver, 6469d9e1521SGerd Hoffmann &max_size); 647abd7f08bSPrasad J Pandit if (!max_size) { 648abd7f08bSPrasad J Pandit cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 649abd7f08bSPrasad J Pandit return; 650abd7f08bSPrasad J Pandit } 6519d9e1521SGerd Hoffmann 65285d9d044SLi Qiang resp = g_malloc0(sizeof(*resp) + max_size); 6539d9e1521SGerd Hoffmann resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET; 6549d9e1521SGerd Hoffmann virgl_renderer_fill_caps(gc.capset_id, 6559d9e1521SGerd Hoffmann gc.capset_version, 6569d9e1521SGerd Hoffmann (void *)resp->capset_data); 6579d9e1521SGerd Hoffmann virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size); 6589d9e1521SGerd Hoffmann g_free(resp); 6599d9e1521SGerd Hoffmann } 6609d9e1521SGerd Hoffmann 6617c092f17SRobert Beckett #if VIRGL_VERSION_MAJOR >= 1 6627c092f17SRobert Beckett static void virgl_cmd_resource_create_blob(VirtIOGPU *g, 6637c092f17SRobert Beckett struct virtio_gpu_ctrl_command *cmd) 6647c092f17SRobert Beckett { 6657c092f17SRobert Beckett struct virgl_renderer_resource_create_blob_args virgl_args = { 0 }; 6667c092f17SRobert Beckett g_autofree struct virtio_gpu_virgl_resource *res = NULL; 6677c092f17SRobert Beckett struct virtio_gpu_resource_create_blob cblob; 6687c092f17SRobert Beckett struct virgl_renderer_resource_info info; 6697c092f17SRobert Beckett int ret; 6707c092f17SRobert Beckett 6717c092f17SRobert Beckett if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 6727c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 6737c092f17SRobert Beckett return; 6747c092f17SRobert Beckett } 6757c092f17SRobert Beckett 6767c092f17SRobert Beckett VIRTIO_GPU_FILL_CMD(cblob); 6777c092f17SRobert Beckett virtio_gpu_create_blob_bswap(&cblob); 6787c092f17SRobert Beckett trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); 6797c092f17SRobert Beckett 6807c092f17SRobert Beckett if (cblob.resource_id == 0) { 6817c092f17SRobert Beckett qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 6827c092f17SRobert Beckett __func__); 6837c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 6847c092f17SRobert Beckett return; 6857c092f17SRobert Beckett } 6867c092f17SRobert Beckett 6877c092f17SRobert Beckett res = virtio_gpu_virgl_find_resource(g, cblob.resource_id); 6887c092f17SRobert Beckett if (res) { 6897c092f17SRobert Beckett qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 6907c092f17SRobert Beckett __func__, cblob.resource_id); 6917c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 6927c092f17SRobert Beckett return; 6937c092f17SRobert Beckett } 6947c092f17SRobert Beckett 6957c092f17SRobert Beckett res = g_new0(struct virtio_gpu_virgl_resource, 1); 6967c092f17SRobert Beckett res->base.resource_id = cblob.resource_id; 6977c092f17SRobert Beckett res->base.blob_size = cblob.size; 6987c092f17SRobert Beckett res->base.dmabuf_fd = -1; 6997c092f17SRobert Beckett 7007c092f17SRobert Beckett if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) { 7017c092f17SRobert Beckett ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), 7027c092f17SRobert Beckett cmd, &res->base.addrs, 7037c092f17SRobert Beckett &res->base.iov, &res->base.iov_cnt); 7047c092f17SRobert Beckett if (!ret) { 7057c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 7067c092f17SRobert Beckett return; 7077c092f17SRobert Beckett } 7087c092f17SRobert Beckett } 7097c092f17SRobert Beckett 7107c092f17SRobert Beckett virgl_args.res_handle = cblob.resource_id; 7117c092f17SRobert Beckett virgl_args.ctx_id = cblob.hdr.ctx_id; 7127c092f17SRobert Beckett virgl_args.blob_mem = cblob.blob_mem; 7137c092f17SRobert Beckett virgl_args.blob_id = cblob.blob_id; 7147c092f17SRobert Beckett virgl_args.blob_flags = cblob.blob_flags; 7157c092f17SRobert Beckett virgl_args.size = cblob.size; 7167c092f17SRobert Beckett virgl_args.iovecs = res->base.iov; 7177c092f17SRobert Beckett virgl_args.num_iovs = res->base.iov_cnt; 7187c092f17SRobert Beckett 7197c092f17SRobert Beckett ret = virgl_renderer_resource_create_blob(&virgl_args); 7207c092f17SRobert Beckett if (ret) { 7217c092f17SRobert Beckett qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n", 7227c092f17SRobert Beckett __func__, strerror(-ret)); 7237c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 7247c092f17SRobert Beckett virtio_gpu_cleanup_mapping(g, &res->base); 7257c092f17SRobert Beckett return; 7267c092f17SRobert Beckett } 7277c092f17SRobert Beckett 7287c092f17SRobert Beckett ret = virgl_renderer_resource_get_info(cblob.resource_id, &info); 7297c092f17SRobert Beckett if (ret) { 7307c092f17SRobert Beckett qemu_log_mask(LOG_GUEST_ERROR, 7317c092f17SRobert Beckett "%s: resource does not have info %d: %s\n", 7327c092f17SRobert Beckett __func__, cblob.resource_id, strerror(-ret)); 7337c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 7347c092f17SRobert Beckett virtio_gpu_cleanup_mapping(g, &res->base); 7357c092f17SRobert Beckett virgl_renderer_resource_unref(cblob.resource_id); 7367c092f17SRobert Beckett return; 7377c092f17SRobert Beckett } 7387c092f17SRobert Beckett 7397c092f17SRobert Beckett res->base.dmabuf_fd = info.fd; 7407c092f17SRobert Beckett 7417c092f17SRobert Beckett QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next); 7427c092f17SRobert Beckett res = NULL; 7437c092f17SRobert Beckett } 7447c092f17SRobert Beckett 7457c092f17SRobert Beckett static void virgl_cmd_resource_map_blob(VirtIOGPU *g, 7467c092f17SRobert Beckett struct virtio_gpu_ctrl_command *cmd) 7477c092f17SRobert Beckett { 7487c092f17SRobert Beckett struct virtio_gpu_resource_map_blob mblob; 7497c092f17SRobert Beckett struct virtio_gpu_virgl_resource *res; 7507c092f17SRobert Beckett struct virtio_gpu_resp_map_info resp; 7517c092f17SRobert Beckett int ret; 7527c092f17SRobert Beckett 7537c092f17SRobert Beckett VIRTIO_GPU_FILL_CMD(mblob); 7547c092f17SRobert Beckett virtio_gpu_map_blob_bswap(&mblob); 7557c092f17SRobert Beckett 7567c092f17SRobert Beckett res = virtio_gpu_virgl_find_resource(g, mblob.resource_id); 7577c092f17SRobert Beckett if (!res) { 7587c092f17SRobert Beckett qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", 7597c092f17SRobert Beckett __func__, mblob.resource_id); 7607c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 7617c092f17SRobert Beckett return; 7627c092f17SRobert Beckett } 7637c092f17SRobert Beckett 7647c092f17SRobert Beckett ret = virtio_gpu_virgl_map_resource_blob(g, res, mblob.offset); 7657c092f17SRobert Beckett if (ret) { 7667c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 7677c092f17SRobert Beckett return; 7687c092f17SRobert Beckett } 7697c092f17SRobert Beckett 7707c092f17SRobert Beckett memset(&resp, 0, sizeof(resp)); 7717c092f17SRobert Beckett resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO; 7727c092f17SRobert Beckett virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info); 7737c092f17SRobert Beckett virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); 7747c092f17SRobert Beckett } 7757c092f17SRobert Beckett 7767c092f17SRobert Beckett static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g, 7777c092f17SRobert Beckett struct virtio_gpu_ctrl_command *cmd, 7787c092f17SRobert Beckett bool *cmd_suspended) 7797c092f17SRobert Beckett { 7807c092f17SRobert Beckett struct virtio_gpu_resource_unmap_blob ublob; 7817c092f17SRobert Beckett struct virtio_gpu_virgl_resource *res; 7827c092f17SRobert Beckett int ret; 7837c092f17SRobert Beckett 7847c092f17SRobert Beckett VIRTIO_GPU_FILL_CMD(ublob); 7857c092f17SRobert Beckett virtio_gpu_unmap_blob_bswap(&ublob); 7867c092f17SRobert Beckett 7877c092f17SRobert Beckett res = virtio_gpu_virgl_find_resource(g, ublob.resource_id); 7887c092f17SRobert Beckett if (!res) { 7897c092f17SRobert Beckett qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", 7907c092f17SRobert Beckett __func__, ublob.resource_id); 7917c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 7927c092f17SRobert Beckett return; 7937c092f17SRobert Beckett } 7947c092f17SRobert Beckett 7957c092f17SRobert Beckett ret = virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended); 7967c092f17SRobert Beckett if (ret) { 7977c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 7987c092f17SRobert Beckett return; 7997c092f17SRobert Beckett } 8007c092f17SRobert Beckett } 8017c092f17SRobert Beckett 8027c092f17SRobert Beckett static void virgl_cmd_set_scanout_blob(VirtIOGPU *g, 8037c092f17SRobert Beckett struct virtio_gpu_ctrl_command *cmd) 8047c092f17SRobert Beckett { 8057c092f17SRobert Beckett struct virtio_gpu_framebuffer fb = { 0 }; 8067c092f17SRobert Beckett struct virtio_gpu_virgl_resource *res; 8077c092f17SRobert Beckett struct virtio_gpu_set_scanout_blob ss; 8087c092f17SRobert Beckett uint64_t fbend; 8097c092f17SRobert Beckett 8107c092f17SRobert Beckett VIRTIO_GPU_FILL_CMD(ss); 8117c092f17SRobert Beckett virtio_gpu_scanout_blob_bswap(&ss); 8127c092f17SRobert Beckett trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id, 8137c092f17SRobert Beckett ss.r.width, ss.r.height, ss.r.x, 8147c092f17SRobert Beckett ss.r.y); 8157c092f17SRobert Beckett 8167c092f17SRobert Beckett if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 8177c092f17SRobert Beckett qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 8187c092f17SRobert Beckett __func__, ss.scanout_id); 8197c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 8207c092f17SRobert Beckett return; 8217c092f17SRobert Beckett } 8227c092f17SRobert Beckett 8237c092f17SRobert Beckett if (ss.resource_id == 0) { 8247c092f17SRobert Beckett virtio_gpu_disable_scanout(g, ss.scanout_id); 8257c092f17SRobert Beckett return; 8267c092f17SRobert Beckett } 8277c092f17SRobert Beckett 8287c092f17SRobert Beckett if (ss.width < 16 || 8297c092f17SRobert Beckett ss.height < 16 || 8307c092f17SRobert Beckett ss.r.x + ss.r.width > ss.width || 8317c092f17SRobert Beckett ss.r.y + ss.r.height > ss.height) { 8327c092f17SRobert Beckett qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 8337c092f17SRobert Beckett " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", 8347c092f17SRobert Beckett __func__, ss.scanout_id, ss.resource_id, 8357c092f17SRobert Beckett ss.r.x, ss.r.y, ss.r.width, ss.r.height, 8367c092f17SRobert Beckett ss.width, ss.height); 8377c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 8387c092f17SRobert Beckett return; 8397c092f17SRobert Beckett } 8407c092f17SRobert Beckett 8417c092f17SRobert Beckett res = virtio_gpu_virgl_find_resource(g, ss.resource_id); 8427c092f17SRobert Beckett if (!res) { 8437c092f17SRobert Beckett qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", 8447c092f17SRobert Beckett __func__, ss.resource_id); 8457c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 8467c092f17SRobert Beckett return; 8477c092f17SRobert Beckett } 8487c092f17SRobert Beckett if (res->base.dmabuf_fd < 0) { 8497c092f17SRobert Beckett qemu_log_mask(LOG_GUEST_ERROR, "%s: resource not backed by dmabuf %d\n", 8507c092f17SRobert Beckett __func__, ss.resource_id); 8517c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 8527c092f17SRobert Beckett return; 8537c092f17SRobert Beckett } 8547c092f17SRobert Beckett 8557c092f17SRobert Beckett fb.format = virtio_gpu_get_pixman_format(ss.format); 8567c092f17SRobert Beckett if (!fb.format) { 8577c092f17SRobert Beckett qemu_log_mask(LOG_GUEST_ERROR, "%s: pixel format not supported %d\n", 8587c092f17SRobert Beckett __func__, ss.format); 8597c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 8607c092f17SRobert Beckett return; 8617c092f17SRobert Beckett } 8627c092f17SRobert Beckett 8637c092f17SRobert Beckett fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 8647c092f17SRobert Beckett fb.width = ss.width; 8657c092f17SRobert Beckett fb.height = ss.height; 8667c092f17SRobert Beckett fb.stride = ss.strides[0]; 8677c092f17SRobert Beckett fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 8687c092f17SRobert Beckett 8697c092f17SRobert Beckett fbend = fb.offset; 8707c092f17SRobert Beckett fbend += fb.stride * (ss.r.height - 1); 8717c092f17SRobert Beckett fbend += fb.bytes_pp * ss.r.width; 8727c092f17SRobert Beckett if (fbend > res->base.blob_size) { 8737c092f17SRobert Beckett qemu_log_mask(LOG_GUEST_ERROR, "%s: fb end out of range\n", 8747c092f17SRobert Beckett __func__); 8757c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 8767c092f17SRobert Beckett return; 8777c092f17SRobert Beckett } 8787c092f17SRobert Beckett 8797c092f17SRobert Beckett g->parent_obj.enable = 1; 8807c092f17SRobert Beckett if (virtio_gpu_update_dmabuf(g, ss.scanout_id, &res->base, &fb, &ss.r)) { 8817c092f17SRobert Beckett qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to update dmabuf\n", 8827c092f17SRobert Beckett __func__); 8837c092f17SRobert Beckett cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 8847c092f17SRobert Beckett return; 8857c092f17SRobert Beckett } 8867c092f17SRobert Beckett 8877c092f17SRobert Beckett virtio_gpu_update_scanout(g, ss.scanout_id, &res->base, &fb, &ss.r); 8887c092f17SRobert Beckett } 8897c092f17SRobert Beckett #endif 8907c092f17SRobert Beckett 8919d9e1521SGerd Hoffmann void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, 8929d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 8939d9e1521SGerd Hoffmann { 8947c092f17SRobert Beckett bool cmd_suspended = false; 8957c092f17SRobert Beckett 8969d9e1521SGerd Hoffmann VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 8979d9e1521SGerd Hoffmann 8989d9e1521SGerd Hoffmann virgl_renderer_force_ctx_0(); 8999d9e1521SGerd Hoffmann switch (cmd->cmd_hdr.type) { 9009d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_CTX_CREATE: 9019d9e1521SGerd Hoffmann virgl_cmd_context_create(g, cmd); 9029d9e1521SGerd Hoffmann break; 9039d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_CTX_DESTROY: 9049d9e1521SGerd Hoffmann virgl_cmd_context_destroy(g, cmd); 9059d9e1521SGerd Hoffmann break; 9069d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 9079d9e1521SGerd Hoffmann virgl_cmd_create_resource_2d(g, cmd); 9089d9e1521SGerd Hoffmann break; 9099d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D: 9109d9e1521SGerd Hoffmann virgl_cmd_create_resource_3d(g, cmd); 9119d9e1521SGerd Hoffmann break; 9129d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_SUBMIT_3D: 9139d9e1521SGerd Hoffmann virgl_cmd_submit_3d(g, cmd); 9149d9e1521SGerd Hoffmann break; 9159d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 9169d9e1521SGerd Hoffmann virgl_cmd_transfer_to_host_2d(g, cmd); 9179d9e1521SGerd Hoffmann break; 9189d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D: 9199d9e1521SGerd Hoffmann virgl_cmd_transfer_to_host_3d(g, cmd); 9209d9e1521SGerd Hoffmann break; 9219d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D: 9229d9e1521SGerd Hoffmann virgl_cmd_transfer_from_host_3d(g, cmd); 9239d9e1521SGerd Hoffmann break; 9249d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 9259d9e1521SGerd Hoffmann virgl_resource_attach_backing(g, cmd); 9269d9e1521SGerd Hoffmann break; 9279d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 9289d9e1521SGerd Hoffmann virgl_resource_detach_backing(g, cmd); 9299d9e1521SGerd Hoffmann break; 9309d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_SET_SCANOUT: 9319d9e1521SGerd Hoffmann virgl_cmd_set_scanout(g, cmd); 9329d9e1521SGerd Hoffmann break; 9339d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 9349d9e1521SGerd Hoffmann virgl_cmd_resource_flush(g, cmd); 9359d9e1521SGerd Hoffmann break; 9369d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_UNREF: 9377c092f17SRobert Beckett virgl_cmd_resource_unref(g, cmd, &cmd_suspended); 9389d9e1521SGerd Hoffmann break; 9399d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE: 9409d9e1521SGerd Hoffmann /* TODO add security */ 9419d9e1521SGerd Hoffmann virgl_cmd_ctx_attach_resource(g, cmd); 9429d9e1521SGerd Hoffmann break; 9439d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE: 9449d9e1521SGerd Hoffmann /* TODO add security */ 9459d9e1521SGerd Hoffmann virgl_cmd_ctx_detach_resource(g, cmd); 9469d9e1521SGerd Hoffmann break; 9479d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_GET_CAPSET_INFO: 9489d9e1521SGerd Hoffmann virgl_cmd_get_capset_info(g, cmd); 9499d9e1521SGerd Hoffmann break; 9509d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_GET_CAPSET: 9519d9e1521SGerd Hoffmann virgl_cmd_get_capset(g, cmd); 9529d9e1521SGerd Hoffmann break; 9539d9e1521SGerd Hoffmann case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 9549d9e1521SGerd Hoffmann virtio_gpu_get_display_info(g, cmd); 9559d9e1521SGerd Hoffmann break; 9561ed2cb32SGerd Hoffmann case VIRTIO_GPU_CMD_GET_EDID: 9571ed2cb32SGerd Hoffmann virtio_gpu_get_edid(g, cmd); 9581ed2cb32SGerd Hoffmann break; 9597c092f17SRobert Beckett #if VIRGL_VERSION_MAJOR >= 1 9607c092f17SRobert Beckett case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: 9617c092f17SRobert Beckett virgl_cmd_resource_create_blob(g, cmd); 9627c092f17SRobert Beckett break; 9637c092f17SRobert Beckett case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB: 9647c092f17SRobert Beckett virgl_cmd_resource_map_blob(g, cmd); 9657c092f17SRobert Beckett break; 9667c092f17SRobert Beckett case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB: 9677c092f17SRobert Beckett virgl_cmd_resource_unmap_blob(g, cmd, &cmd_suspended); 9687c092f17SRobert Beckett break; 9697c092f17SRobert Beckett case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB: 9707c092f17SRobert Beckett virgl_cmd_set_scanout_blob(g, cmd); 9717c092f17SRobert Beckett break; 9727c092f17SRobert Beckett #endif 9739d9e1521SGerd Hoffmann default: 9749d9e1521SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 9759d9e1521SGerd Hoffmann break; 9769d9e1521SGerd Hoffmann } 9779d9e1521SGerd Hoffmann 9787c092f17SRobert Beckett if (cmd_suspended || cmd->finished) { 9799d9e1521SGerd Hoffmann return; 9809d9e1521SGerd Hoffmann } 9819d9e1521SGerd Hoffmann if (cmd->error) { 9829d9e1521SGerd Hoffmann fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n", __func__, 9839d9e1521SGerd Hoffmann cmd->cmd_hdr.type, cmd->error); 9849d9e1521SGerd Hoffmann virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error); 9859d9e1521SGerd Hoffmann return; 9869d9e1521SGerd Hoffmann } 9879d9e1521SGerd Hoffmann if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) { 9889d9e1521SGerd Hoffmann virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 9899d9e1521SGerd Hoffmann return; 9909d9e1521SGerd Hoffmann } 9919d9e1521SGerd Hoffmann 9929d9e1521SGerd Hoffmann trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); 9939d9e1521SGerd Hoffmann virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); 9949d9e1521SGerd Hoffmann } 9959d9e1521SGerd Hoffmann 9969d9e1521SGerd Hoffmann static void virgl_write_fence(void *opaque, uint32_t fence) 9979d9e1521SGerd Hoffmann { 9989d9e1521SGerd Hoffmann VirtIOGPU *g = opaque; 9999d9e1521SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd, *tmp; 10009d9e1521SGerd Hoffmann 10019d9e1521SGerd Hoffmann QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { 10029d9e1521SGerd Hoffmann /* 10039d9e1521SGerd Hoffmann * the guest can end up emitting fences out of order 10049d9e1521SGerd Hoffmann * so we should check all fenced cmds not just the first one. 10059d9e1521SGerd Hoffmann */ 10069d9e1521SGerd Hoffmann if (cmd->cmd_hdr.fence_id > fence) { 10079d9e1521SGerd Hoffmann continue; 10089d9e1521SGerd Hoffmann } 10099d9e1521SGerd Hoffmann trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); 10109d9e1521SGerd Hoffmann virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 10119d9e1521SGerd Hoffmann QTAILQ_REMOVE(&g->fenceq, cmd, next); 10129d9e1521SGerd Hoffmann g_free(cmd); 10139d9e1521SGerd Hoffmann g->inflight--; 101450d8e25eSMarc-André Lureau if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1015cd7ebf6bSDmitry Osipenko trace_virtio_gpu_dec_inflight_fences(g->inflight); 10169d9e1521SGerd Hoffmann } 10179d9e1521SGerd Hoffmann } 10189d9e1521SGerd Hoffmann } 10199d9e1521SGerd Hoffmann 10209d9e1521SGerd Hoffmann static virgl_renderer_gl_context 10219d9e1521SGerd Hoffmann virgl_create_context(void *opaque, int scanout_idx, 10229d9e1521SGerd Hoffmann struct virgl_renderer_gl_ctx_param *params) 10239d9e1521SGerd Hoffmann { 10249d9e1521SGerd Hoffmann VirtIOGPU *g = opaque; 10259d9e1521SGerd Hoffmann QEMUGLContext ctx; 10269d9e1521SGerd Hoffmann QEMUGLParams qparams; 10279d9e1521SGerd Hoffmann 10289d9e1521SGerd Hoffmann qparams.major_ver = params->major_ver; 10299d9e1521SGerd Hoffmann qparams.minor_ver = params->minor_ver; 10309d9e1521SGerd Hoffmann 103150d8e25eSMarc-André Lureau ctx = dpy_gl_ctx_create(g->parent_obj.scanout[scanout_idx].con, &qparams); 10329d9e1521SGerd Hoffmann return (virgl_renderer_gl_context)ctx; 10339d9e1521SGerd Hoffmann } 10349d9e1521SGerd Hoffmann 10359d9e1521SGerd Hoffmann static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx) 10369d9e1521SGerd Hoffmann { 10379d9e1521SGerd Hoffmann VirtIOGPU *g = opaque; 10389d9e1521SGerd Hoffmann QEMUGLContext qctx = (QEMUGLContext)ctx; 10399d9e1521SGerd Hoffmann 104050d8e25eSMarc-André Lureau dpy_gl_ctx_destroy(g->parent_obj.scanout[0].con, qctx); 10419d9e1521SGerd Hoffmann } 10429d9e1521SGerd Hoffmann 10439d9e1521SGerd Hoffmann static int virgl_make_context_current(void *opaque, int scanout_idx, 10449d9e1521SGerd Hoffmann virgl_renderer_gl_context ctx) 10459d9e1521SGerd Hoffmann { 10469d9e1521SGerd Hoffmann VirtIOGPU *g = opaque; 10479d9e1521SGerd Hoffmann QEMUGLContext qctx = (QEMUGLContext)ctx; 10489d9e1521SGerd Hoffmann 104950d8e25eSMarc-André Lureau return dpy_gl_ctx_make_current(g->parent_obj.scanout[scanout_idx].con, 105050d8e25eSMarc-André Lureau qctx); 10519d9e1521SGerd Hoffmann } 10529d9e1521SGerd Hoffmann 10539d9e1521SGerd Hoffmann static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = { 10549d9e1521SGerd Hoffmann .version = 1, 10559d9e1521SGerd Hoffmann .write_fence = virgl_write_fence, 10569d9e1521SGerd Hoffmann .create_gl_context = virgl_create_context, 10579d9e1521SGerd Hoffmann .destroy_gl_context = virgl_destroy_context, 10589d9e1521SGerd Hoffmann .make_current = virgl_make_context_current, 10599d9e1521SGerd Hoffmann }; 10609d9e1521SGerd Hoffmann 10619d9e1521SGerd Hoffmann static void virtio_gpu_print_stats(void *opaque) 10629d9e1521SGerd Hoffmann { 10639d9e1521SGerd Hoffmann VirtIOGPU *g = opaque; 1064a0a8f47fSDmitry Osipenko VirtIOGPUGL *gl = VIRTIO_GPU_GL(g); 10659d9e1521SGerd Hoffmann 10669d9e1521SGerd Hoffmann if (g->stats.requests) { 10679d9e1521SGerd Hoffmann fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n", 10689d9e1521SGerd Hoffmann g->stats.requests, 10699d9e1521SGerd Hoffmann g->stats.max_inflight, 10709d9e1521SGerd Hoffmann g->stats.req_3d, 10719d9e1521SGerd Hoffmann g->stats.bytes_3d); 10729d9e1521SGerd Hoffmann g->stats.requests = 0; 10739d9e1521SGerd Hoffmann g->stats.max_inflight = 0; 10749d9e1521SGerd Hoffmann g->stats.req_3d = 0; 10759d9e1521SGerd Hoffmann g->stats.bytes_3d = 0; 10769d9e1521SGerd Hoffmann } else { 10779d9e1521SGerd Hoffmann fprintf(stderr, "stats: idle\r"); 10789d9e1521SGerd Hoffmann } 1079a0a8f47fSDmitry Osipenko timer_mod(gl->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); 10809d9e1521SGerd Hoffmann } 10819d9e1521SGerd Hoffmann 10829d9e1521SGerd Hoffmann static void virtio_gpu_fence_poll(void *opaque) 10839d9e1521SGerd Hoffmann { 10849d9e1521SGerd Hoffmann VirtIOGPU *g = opaque; 1085a723d2eaSDmitry Osipenko VirtIOGPUGL *gl = VIRTIO_GPU_GL(g); 10869d9e1521SGerd Hoffmann 10879d9e1521SGerd Hoffmann virgl_renderer_poll(); 10880c55a1cfSGerd Hoffmann virtio_gpu_process_cmdq(g); 10890c55a1cfSGerd Hoffmann if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) { 1090a723d2eaSDmitry Osipenko timer_mod(gl->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10); 10919d9e1521SGerd Hoffmann } 10929d9e1521SGerd Hoffmann } 10939d9e1521SGerd Hoffmann 10949d9e1521SGerd Hoffmann void virtio_gpu_virgl_fence_poll(VirtIOGPU *g) 10959d9e1521SGerd Hoffmann { 10969d9e1521SGerd Hoffmann virtio_gpu_fence_poll(g); 10979d9e1521SGerd Hoffmann } 10989d9e1521SGerd Hoffmann 10998a13b9bcSMarc-André Lureau void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g) 11009d9e1521SGerd Hoffmann { 11019d9e1521SGerd Hoffmann int i; 11029d9e1521SGerd Hoffmann 110350d8e25eSMarc-André Lureau for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 110450d8e25eSMarc-André Lureau dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL); 110550d8e25eSMarc-André Lureau dpy_gl_scanout_disable(g->parent_obj.scanout[i].con); 11069d9e1521SGerd Hoffmann } 11079d9e1521SGerd Hoffmann } 11089d9e1521SGerd Hoffmann 11098a13b9bcSMarc-André Lureau void virtio_gpu_virgl_reset(VirtIOGPU *g) 11108a13b9bcSMarc-André Lureau { 11118a13b9bcSMarc-André Lureau virgl_renderer_reset(); 11128a13b9bcSMarc-André Lureau } 11138a13b9bcSMarc-André Lureau 11149d9e1521SGerd Hoffmann int virtio_gpu_virgl_init(VirtIOGPU *g) 11159d9e1521SGerd Hoffmann { 11169d9e1521SGerd Hoffmann int ret; 1117c1600f84SMarc-André Lureau uint32_t flags = 0; 1118a723d2eaSDmitry Osipenko VirtIOGPUGL *gl = VIRTIO_GPU_GL(g); 11199d9e1521SGerd Hoffmann 1120e8a2db94SMarc-André Lureau #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4 1121e8a2db94SMarc-André Lureau if (qemu_egl_display) { 1122e8a2db94SMarc-André Lureau virtio_gpu_3d_cbs.version = 4; 1123e8a2db94SMarc-André Lureau virtio_gpu_3d_cbs.get_egl_display = virgl_get_egl_display; 1124e8a2db94SMarc-André Lureau } 1125e8a2db94SMarc-André Lureau #endif 1126c1600f84SMarc-André Lureau #ifdef VIRGL_RENDERER_D3D11_SHARE_TEXTURE 1127c1600f84SMarc-André Lureau if (qemu_egl_angle_d3d) { 1128c1600f84SMarc-André Lureau flags |= VIRGL_RENDERER_D3D11_SHARE_TEXTURE; 1129c1600f84SMarc-André Lureau } 1130c1600f84SMarc-André Lureau #endif 1131e8a2db94SMarc-André Lureau 1132c1600f84SMarc-André Lureau ret = virgl_renderer_init(g, flags, &virtio_gpu_3d_cbs); 11339d9e1521SGerd Hoffmann if (ret != 0) { 11348f5f1ea0SMarc-André Lureau error_report("virgl could not be initialized: %d", ret); 11359d9e1521SGerd Hoffmann return ret; 11369d9e1521SGerd Hoffmann } 11379d9e1521SGerd Hoffmann 1138a723d2eaSDmitry Osipenko gl->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL, 11399d9e1521SGerd Hoffmann virtio_gpu_fence_poll, g); 11409d9e1521SGerd Hoffmann 114150d8e25eSMarc-André Lureau if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1142a0a8f47fSDmitry Osipenko gl->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL, 11439d9e1521SGerd Hoffmann virtio_gpu_print_stats, g); 1144a0a8f47fSDmitry Osipenko timer_mod(gl->print_stats, 1145a0a8f47fSDmitry Osipenko qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); 11469d9e1521SGerd Hoffmann } 11477c092f17SRobert Beckett 11487c092f17SRobert Beckett #if VIRGL_VERSION_MAJOR >= 1 11497c092f17SRobert Beckett gl->cmdq_resume_bh = aio_bh_new(qemu_get_aio_context(), 11507c092f17SRobert Beckett virtio_gpu_virgl_resume_cmdq_bh, 11517c092f17SRobert Beckett g); 11527c092f17SRobert Beckett #endif 11537c092f17SRobert Beckett 11549d9e1521SGerd Hoffmann return 0; 11559d9e1521SGerd Hoffmann } 11569d9e1521SGerd Hoffmann 1157*1333fd06SPierre-Eric Pelloux-Prayer static void virtio_gpu_virgl_add_capset(GArray *capset_ids, uint32_t capset_id) 1158*1333fd06SPierre-Eric Pelloux-Prayer { 1159*1333fd06SPierre-Eric Pelloux-Prayer g_array_append_val(capset_ids, capset_id); 1160*1333fd06SPierre-Eric Pelloux-Prayer } 1161*1333fd06SPierre-Eric Pelloux-Prayer 1162*1333fd06SPierre-Eric Pelloux-Prayer GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g) 11635643cc94SDave Airlie { 11645643cc94SDave Airlie uint32_t capset2_max_ver, capset2_max_size; 1165*1333fd06SPierre-Eric Pelloux-Prayer GArray *capset_ids; 1166*1333fd06SPierre-Eric Pelloux-Prayer 1167*1333fd06SPierre-Eric Pelloux-Prayer capset_ids = g_array_new(false, false, sizeof(uint32_t)); 1168*1333fd06SPierre-Eric Pelloux-Prayer 1169*1333fd06SPierre-Eric Pelloux-Prayer /* VIRGL is always supported. */ 1170*1333fd06SPierre-Eric Pelloux-Prayer virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VIRGL); 1171*1333fd06SPierre-Eric Pelloux-Prayer 11725643cc94SDave Airlie virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2, 11735643cc94SDave Airlie &capset2_max_ver, 11745643cc94SDave Airlie &capset2_max_size); 1175*1333fd06SPierre-Eric Pelloux-Prayer if (capset2_max_ver) { 1176*1333fd06SPierre-Eric Pelloux-Prayer virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VIRGL2); 1177*1333fd06SPierre-Eric Pelloux-Prayer } 11785643cc94SDave Airlie 1179*1333fd06SPierre-Eric Pelloux-Prayer return capset_ids; 11805643cc94SDave Airlie } 1181