xref: /qemu/hw/display/virtio-gpu-virgl.c (revision 7c092f17cceef10258ed23006b40e19b14996471)
19d9e1521SGerd Hoffmann /*
29d9e1521SGerd Hoffmann  * Virtio GPU Device
39d9e1521SGerd Hoffmann  *
49d9e1521SGerd Hoffmann  * Copyright Red Hat, Inc. 2013-2014
59d9e1521SGerd Hoffmann  *
69d9e1521SGerd Hoffmann  * Authors:
79d9e1521SGerd Hoffmann  *     Dave Airlie <airlied@redhat.com>
89d9e1521SGerd Hoffmann  *     Gerd Hoffmann <kraxel@redhat.com>
99d9e1521SGerd Hoffmann  *
109d9e1521SGerd Hoffmann  * This work is licensed under the terms of the GNU GPL, version 2 or later.
119d9e1521SGerd Hoffmann  * See the COPYING file in the top-level directory.
129d9e1521SGerd Hoffmann  */
139d9e1521SGerd Hoffmann 
149b8bfe21SPeter Maydell #include "qemu/osdep.h"
155feed38cSThomas Huth #include "qemu/error-report.h"
169d9e1521SGerd Hoffmann #include "qemu/iov.h"
179d9e1521SGerd Hoffmann #include "trace.h"
189d9e1521SGerd Hoffmann #include "hw/virtio/virtio.h"
199d9e1521SGerd Hoffmann #include "hw/virtio/virtio-gpu.h"
20*7c092f17SRobert Beckett #include "hw/virtio/virtio-gpu-bswap.h"
21*7c092f17SRobert Beckett #include "hw/virtio/virtio-gpu-pixman.h"
229d9e1521SGerd Hoffmann 
23e8a2db94SMarc-André Lureau #include "ui/egl-helpers.h"
24e8a2db94SMarc-André Lureau 
25a9c94277SMarkus Armbruster #include <virglrenderer.h>
269d9e1521SGerd Hoffmann 
27df4c498eSHuang Rui struct virtio_gpu_virgl_resource {
28df4c498eSHuang Rui     struct virtio_gpu_simple_resource base;
29*7c092f17SRobert Beckett     MemoryRegion *mr;
30df4c498eSHuang Rui };
31df4c498eSHuang Rui 
32df4c498eSHuang Rui static struct virtio_gpu_virgl_resource *
33df4c498eSHuang Rui virtio_gpu_virgl_find_resource(VirtIOGPU *g, uint32_t resource_id)
34df4c498eSHuang Rui {
35df4c498eSHuang Rui     struct virtio_gpu_simple_resource *res;
36df4c498eSHuang Rui 
37df4c498eSHuang Rui     res = virtio_gpu_find_resource(g, resource_id);
38df4c498eSHuang Rui     if (!res) {
39df4c498eSHuang Rui         return NULL;
40df4c498eSHuang Rui     }
41df4c498eSHuang Rui 
42df4c498eSHuang Rui     return container_of(res, struct virtio_gpu_virgl_resource, base);
43df4c498eSHuang Rui }
44df4c498eSHuang Rui 
45e8a2db94SMarc-André Lureau #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
46e8a2db94SMarc-André Lureau static void *
47e8a2db94SMarc-André Lureau virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
48e8a2db94SMarc-André Lureau {
49e8a2db94SMarc-André Lureau     return qemu_egl_display;
50e8a2db94SMarc-André Lureau }
51e8a2db94SMarc-André Lureau #endif
529d9e1521SGerd Hoffmann 
53*7c092f17SRobert Beckett #if VIRGL_VERSION_MAJOR >= 1
54*7c092f17SRobert Beckett struct virtio_gpu_virgl_hostmem_region {
55*7c092f17SRobert Beckett     MemoryRegion mr;
56*7c092f17SRobert Beckett     struct VirtIOGPU *g;
57*7c092f17SRobert Beckett     bool finish_unmapping;
58*7c092f17SRobert Beckett };
59*7c092f17SRobert Beckett 
60*7c092f17SRobert Beckett static struct virtio_gpu_virgl_hostmem_region *
61*7c092f17SRobert Beckett to_hostmem_region(MemoryRegion *mr)
62*7c092f17SRobert Beckett {
63*7c092f17SRobert Beckett     return container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr);
64*7c092f17SRobert Beckett }
65*7c092f17SRobert Beckett 
66*7c092f17SRobert Beckett static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque)
67*7c092f17SRobert Beckett {
68*7c092f17SRobert Beckett     VirtIOGPU *g = opaque;
69*7c092f17SRobert Beckett 
70*7c092f17SRobert Beckett     virtio_gpu_process_cmdq(g);
71*7c092f17SRobert Beckett }
72*7c092f17SRobert Beckett 
73*7c092f17SRobert Beckett static void virtio_gpu_virgl_hostmem_region_free(void *obj)
74*7c092f17SRobert Beckett {
75*7c092f17SRobert Beckett     MemoryRegion *mr = MEMORY_REGION(obj);
76*7c092f17SRobert Beckett     struct virtio_gpu_virgl_hostmem_region *vmr;
77*7c092f17SRobert Beckett     VirtIOGPUBase *b;
78*7c092f17SRobert Beckett     VirtIOGPUGL *gl;
79*7c092f17SRobert Beckett 
80*7c092f17SRobert Beckett     vmr = to_hostmem_region(mr);
81*7c092f17SRobert Beckett     vmr->finish_unmapping = true;
82*7c092f17SRobert Beckett 
83*7c092f17SRobert Beckett     b = VIRTIO_GPU_BASE(vmr->g);
84*7c092f17SRobert Beckett     b->renderer_blocked--;
85*7c092f17SRobert Beckett 
86*7c092f17SRobert Beckett     /*
87*7c092f17SRobert Beckett      * memory_region_unref() is executed from RCU thread context, while
88*7c092f17SRobert Beckett      * virglrenderer works only on the main-loop thread that's holding GL
89*7c092f17SRobert Beckett      * context.
90*7c092f17SRobert Beckett      */
91*7c092f17SRobert Beckett     gl = VIRTIO_GPU_GL(vmr->g);
92*7c092f17SRobert Beckett     qemu_bh_schedule(gl->cmdq_resume_bh);
93*7c092f17SRobert Beckett }
94*7c092f17SRobert Beckett 
95*7c092f17SRobert Beckett static int
96*7c092f17SRobert Beckett virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
97*7c092f17SRobert Beckett                                    struct virtio_gpu_virgl_resource *res,
98*7c092f17SRobert Beckett                                    uint64_t offset)
99*7c092f17SRobert Beckett {
100*7c092f17SRobert Beckett     struct virtio_gpu_virgl_hostmem_region *vmr;
101*7c092f17SRobert Beckett     VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
102*7c092f17SRobert Beckett     MemoryRegion *mr;
103*7c092f17SRobert Beckett     uint64_t size;
104*7c092f17SRobert Beckett     void *data;
105*7c092f17SRobert Beckett     int ret;
106*7c092f17SRobert Beckett 
107*7c092f17SRobert Beckett     if (!virtio_gpu_hostmem_enabled(b->conf)) {
108*7c092f17SRobert Beckett         qemu_log_mask(LOG_GUEST_ERROR, "%s: hostmem disabled\n", __func__);
109*7c092f17SRobert Beckett         return -EOPNOTSUPP;
110*7c092f17SRobert Beckett     }
111*7c092f17SRobert Beckett 
112*7c092f17SRobert Beckett     ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size);
113*7c092f17SRobert Beckett     if (ret) {
114*7c092f17SRobert Beckett         qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n",
115*7c092f17SRobert Beckett                       __func__, strerror(-ret));
116*7c092f17SRobert Beckett         return ret;
117*7c092f17SRobert Beckett     }
118*7c092f17SRobert Beckett 
119*7c092f17SRobert Beckett     vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
120*7c092f17SRobert Beckett     vmr->g = g;
121*7c092f17SRobert Beckett 
122*7c092f17SRobert Beckett     mr = &vmr->mr;
123*7c092f17SRobert Beckett     memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
124*7c092f17SRobert Beckett     memory_region_add_subregion(&b->hostmem, offset, mr);
125*7c092f17SRobert Beckett     memory_region_set_enabled(mr, true);
126*7c092f17SRobert Beckett 
127*7c092f17SRobert Beckett     /*
128*7c092f17SRobert Beckett      * MR could outlive the resource if MR's reference is held outside of
129*7c092f17SRobert Beckett      * virtio-gpu. In order to prevent unmapping resource while MR is alive,
130*7c092f17SRobert Beckett      * and thus, making the data pointer invalid, we will block virtio-gpu
131*7c092f17SRobert Beckett      * command processing until MR is fully unreferenced and freed.
132*7c092f17SRobert Beckett      */
133*7c092f17SRobert Beckett     OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free;
134*7c092f17SRobert Beckett 
135*7c092f17SRobert Beckett     res->mr = mr;
136*7c092f17SRobert Beckett 
137*7c092f17SRobert Beckett     return 0;
138*7c092f17SRobert Beckett }
139*7c092f17SRobert Beckett 
140*7c092f17SRobert Beckett static int
141*7c092f17SRobert Beckett virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
142*7c092f17SRobert Beckett                                      struct virtio_gpu_virgl_resource *res,
143*7c092f17SRobert Beckett                                      bool *cmd_suspended)
144*7c092f17SRobert Beckett {
145*7c092f17SRobert Beckett     struct virtio_gpu_virgl_hostmem_region *vmr;
146*7c092f17SRobert Beckett     VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
147*7c092f17SRobert Beckett     MemoryRegion *mr = res->mr;
148*7c092f17SRobert Beckett     int ret;
149*7c092f17SRobert Beckett 
150*7c092f17SRobert Beckett     if (!mr) {
151*7c092f17SRobert Beckett         return 0;
152*7c092f17SRobert Beckett     }
153*7c092f17SRobert Beckett 
154*7c092f17SRobert Beckett     vmr = to_hostmem_region(res->mr);
155*7c092f17SRobert Beckett 
156*7c092f17SRobert Beckett     /*
157*7c092f17SRobert Beckett      * Perform async unmapping in 3 steps:
158*7c092f17SRobert Beckett      *
159*7c092f17SRobert Beckett      * 1. Begin async unmapping with memory_region_del_subregion()
160*7c092f17SRobert Beckett      *    and suspend/block cmd processing.
161*7c092f17SRobert Beckett      * 2. Wait for res->mr to be freed and cmd processing resumed
162*7c092f17SRobert Beckett      *    asynchronously by virtio_gpu_virgl_hostmem_region_free().
163*7c092f17SRobert Beckett      * 3. Finish the unmapping with final virgl_renderer_resource_unmap().
164*7c092f17SRobert Beckett      */
165*7c092f17SRobert Beckett     if (vmr->finish_unmapping) {
166*7c092f17SRobert Beckett         res->mr = NULL;
167*7c092f17SRobert Beckett         g_free(vmr);
168*7c092f17SRobert Beckett 
169*7c092f17SRobert Beckett         ret = virgl_renderer_resource_unmap(res->base.resource_id);
170*7c092f17SRobert Beckett         if (ret) {
171*7c092f17SRobert Beckett             qemu_log_mask(LOG_GUEST_ERROR,
172*7c092f17SRobert Beckett                           "%s: failed to unmap virgl resource: %s\n",
173*7c092f17SRobert Beckett                           __func__, strerror(-ret));
174*7c092f17SRobert Beckett             return ret;
175*7c092f17SRobert Beckett         }
176*7c092f17SRobert Beckett     } else {
177*7c092f17SRobert Beckett         *cmd_suspended = true;
178*7c092f17SRobert Beckett 
179*7c092f17SRobert Beckett         /* render will be unblocked once MR is freed */
180*7c092f17SRobert Beckett         b->renderer_blocked++;
181*7c092f17SRobert Beckett 
182*7c092f17SRobert Beckett         /* memory region owns self res->mr object and frees it by itself */
183*7c092f17SRobert Beckett         memory_region_set_enabled(mr, false);
184*7c092f17SRobert Beckett         memory_region_del_subregion(&b->hostmem, mr);
185*7c092f17SRobert Beckett         object_unparent(OBJECT(mr));
186*7c092f17SRobert Beckett     }
187*7c092f17SRobert Beckett 
188*7c092f17SRobert Beckett     return 0;
189*7c092f17SRobert Beckett }
190*7c092f17SRobert Beckett #endif
191*7c092f17SRobert Beckett 
1929d9e1521SGerd Hoffmann static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
1939d9e1521SGerd Hoffmann                                          struct virtio_gpu_ctrl_command *cmd)
1949d9e1521SGerd Hoffmann {
1959d9e1521SGerd Hoffmann     struct virtio_gpu_resource_create_2d c2d;
1969d9e1521SGerd Hoffmann     struct virgl_renderer_resource_create_args args;
197df4c498eSHuang Rui     struct virtio_gpu_virgl_resource *res;
1989d9e1521SGerd Hoffmann 
1999d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(c2d);
2009d9e1521SGerd Hoffmann     trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
2019d9e1521SGerd Hoffmann                                        c2d.width, c2d.height);
2029d9e1521SGerd Hoffmann 
203df4c498eSHuang Rui     if (c2d.resource_id == 0) {
204df4c498eSHuang Rui         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
205df4c498eSHuang Rui                       __func__);
206df4c498eSHuang Rui         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
207df4c498eSHuang Rui         return;
208df4c498eSHuang Rui     }
209df4c498eSHuang Rui 
210df4c498eSHuang Rui     res = virtio_gpu_virgl_find_resource(g, c2d.resource_id);
211df4c498eSHuang Rui     if (res) {
212df4c498eSHuang Rui         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
213df4c498eSHuang Rui                       __func__, c2d.resource_id);
214df4c498eSHuang Rui         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
215df4c498eSHuang Rui         return;
216df4c498eSHuang Rui     }
217df4c498eSHuang Rui 
218df4c498eSHuang Rui     res = g_new0(struct virtio_gpu_virgl_resource, 1);
219df4c498eSHuang Rui     res->base.width = c2d.width;
220df4c498eSHuang Rui     res->base.height = c2d.height;
221df4c498eSHuang Rui     res->base.format = c2d.format;
222df4c498eSHuang Rui     res->base.resource_id = c2d.resource_id;
223*7c092f17SRobert Beckett     res->base.dmabuf_fd = -1;
224df4c498eSHuang Rui     QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
225df4c498eSHuang Rui 
2269d9e1521SGerd Hoffmann     args.handle = c2d.resource_id;
2279d9e1521SGerd Hoffmann     args.target = 2;
2289d9e1521SGerd Hoffmann     args.format = c2d.format;
2299d9e1521SGerd Hoffmann     args.bind = (1 << 1);
2309d9e1521SGerd Hoffmann     args.width = c2d.width;
2319d9e1521SGerd Hoffmann     args.height = c2d.height;
2329d9e1521SGerd Hoffmann     args.depth = 1;
2339d9e1521SGerd Hoffmann     args.array_size = 1;
2349d9e1521SGerd Hoffmann     args.last_level = 0;
2359d9e1521SGerd Hoffmann     args.nr_samples = 0;
2369d9e1521SGerd Hoffmann     args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
2379d9e1521SGerd Hoffmann     virgl_renderer_resource_create(&args, NULL, 0);
2389d9e1521SGerd Hoffmann }
2399d9e1521SGerd Hoffmann 
2409d9e1521SGerd Hoffmann static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
2419d9e1521SGerd Hoffmann                                          struct virtio_gpu_ctrl_command *cmd)
2429d9e1521SGerd Hoffmann {
2439d9e1521SGerd Hoffmann     struct virtio_gpu_resource_create_3d c3d;
2449d9e1521SGerd Hoffmann     struct virgl_renderer_resource_create_args args;
245df4c498eSHuang Rui     struct virtio_gpu_virgl_resource *res;
2469d9e1521SGerd Hoffmann 
2479d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(c3d);
2489d9e1521SGerd Hoffmann     trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
2499d9e1521SGerd Hoffmann                                        c3d.width, c3d.height, c3d.depth);
2509d9e1521SGerd Hoffmann 
251df4c498eSHuang Rui     if (c3d.resource_id == 0) {
252df4c498eSHuang Rui         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
253df4c498eSHuang Rui                       __func__);
254df4c498eSHuang Rui         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
255df4c498eSHuang Rui         return;
256df4c498eSHuang Rui     }
257df4c498eSHuang Rui 
258df4c498eSHuang Rui     res = virtio_gpu_virgl_find_resource(g, c3d.resource_id);
259df4c498eSHuang Rui     if (res) {
260df4c498eSHuang Rui         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
261df4c498eSHuang Rui                       __func__, c3d.resource_id);
262df4c498eSHuang Rui         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
263df4c498eSHuang Rui         return;
264df4c498eSHuang Rui     }
265df4c498eSHuang Rui 
266df4c498eSHuang Rui     res = g_new0(struct virtio_gpu_virgl_resource, 1);
267df4c498eSHuang Rui     res->base.width = c3d.width;
268df4c498eSHuang Rui     res->base.height = c3d.height;
269df4c498eSHuang Rui     res->base.format = c3d.format;
270df4c498eSHuang Rui     res->base.resource_id = c3d.resource_id;
271*7c092f17SRobert Beckett     res->base.dmabuf_fd = -1;
272df4c498eSHuang Rui     QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
273df4c498eSHuang Rui 
2749d9e1521SGerd Hoffmann     args.handle = c3d.resource_id;
2759d9e1521SGerd Hoffmann     args.target = c3d.target;
2769d9e1521SGerd Hoffmann     args.format = c3d.format;
2779d9e1521SGerd Hoffmann     args.bind = c3d.bind;
2789d9e1521SGerd Hoffmann     args.width = c3d.width;
2799d9e1521SGerd Hoffmann     args.height = c3d.height;
2809d9e1521SGerd Hoffmann     args.depth = c3d.depth;
2819d9e1521SGerd Hoffmann     args.array_size = c3d.array_size;
2829d9e1521SGerd Hoffmann     args.last_level = c3d.last_level;
2839d9e1521SGerd Hoffmann     args.nr_samples = c3d.nr_samples;
2849d9e1521SGerd Hoffmann     args.flags = c3d.flags;
2859d9e1521SGerd Hoffmann     virgl_renderer_resource_create(&args, NULL, 0);
2869d9e1521SGerd Hoffmann }
2879d9e1521SGerd Hoffmann 
2889d9e1521SGerd Hoffmann static void virgl_cmd_resource_unref(VirtIOGPU *g,
289*7c092f17SRobert Beckett                                      struct virtio_gpu_ctrl_command *cmd,
290*7c092f17SRobert Beckett                                      bool *cmd_suspended)
2919d9e1521SGerd Hoffmann {
2929d9e1521SGerd Hoffmann     struct virtio_gpu_resource_unref unref;
293df4c498eSHuang Rui     struct virtio_gpu_virgl_resource *res;
2945e8e3c4cSGerd Hoffmann     struct iovec *res_iovs = NULL;
2955e8e3c4cSGerd Hoffmann     int num_iovs = 0;
2969d9e1521SGerd Hoffmann 
2979d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(unref);
2989d9e1521SGerd Hoffmann     trace_virtio_gpu_cmd_res_unref(unref.resource_id);
2999d9e1521SGerd Hoffmann 
300df4c498eSHuang Rui     res = virtio_gpu_virgl_find_resource(g, unref.resource_id);
301df4c498eSHuang Rui     if (!res) {
302df4c498eSHuang Rui         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
303df4c498eSHuang Rui                       __func__, unref.resource_id);
304df4c498eSHuang Rui         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
305df4c498eSHuang Rui         return;
306df4c498eSHuang Rui     }
307df4c498eSHuang Rui 
308*7c092f17SRobert Beckett #if VIRGL_VERSION_MAJOR >= 1
309*7c092f17SRobert Beckett     if (virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended)) {
310*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
311*7c092f17SRobert Beckett         return;
312*7c092f17SRobert Beckett     }
313*7c092f17SRobert Beckett     if (*cmd_suspended) {
314*7c092f17SRobert Beckett         return;
315*7c092f17SRobert Beckett     }
316*7c092f17SRobert Beckett #endif
317*7c092f17SRobert Beckett 
3185e8e3c4cSGerd Hoffmann     virgl_renderer_resource_detach_iov(unref.resource_id,
3195e8e3c4cSGerd Hoffmann                                        &res_iovs,
3205e8e3c4cSGerd Hoffmann                                        &num_iovs);
3215e8e3c4cSGerd Hoffmann     if (res_iovs != NULL && num_iovs != 0) {
3223bb68f79SGerd Hoffmann         virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
3235e8e3c4cSGerd Hoffmann     }
3249d9e1521SGerd Hoffmann     virgl_renderer_resource_unref(unref.resource_id);
325df4c498eSHuang Rui 
326df4c498eSHuang Rui     QTAILQ_REMOVE(&g->reslist, &res->base, next);
327df4c498eSHuang Rui 
328df4c498eSHuang Rui     g_free(res);
3299d9e1521SGerd Hoffmann }
3309d9e1521SGerd Hoffmann 
3319d9e1521SGerd Hoffmann static void virgl_cmd_context_create(VirtIOGPU *g,
3329d9e1521SGerd Hoffmann                                      struct virtio_gpu_ctrl_command *cmd)
3339d9e1521SGerd Hoffmann {
3349d9e1521SGerd Hoffmann     struct virtio_gpu_ctx_create cc;
3359d9e1521SGerd Hoffmann 
3369d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(cc);
3379d9e1521SGerd Hoffmann     trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id,
3389d9e1521SGerd Hoffmann                                     cc.debug_name);
3399d9e1521SGerd Hoffmann 
3402c868c79SHuang Rui     if (cc.context_init) {
3412c868c79SHuang Rui         if (!virtio_gpu_context_init_enabled(g->parent_obj.conf)) {
3422c868c79SHuang Rui             qemu_log_mask(LOG_GUEST_ERROR, "%s: context_init disabled",
3432c868c79SHuang Rui                           __func__);
3442c868c79SHuang Rui             cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
3452c868c79SHuang Rui             return;
3462c868c79SHuang Rui         }
3472c868c79SHuang Rui 
3482c868c79SHuang Rui #if VIRGL_VERSION_MAJOR >= 1
3492c868c79SHuang Rui         virgl_renderer_context_create_with_flags(cc.hdr.ctx_id,
3502c868c79SHuang Rui                                                  cc.context_init,
3512c868c79SHuang Rui                                                  cc.nlen,
3529d9e1521SGerd Hoffmann                                                  cc.debug_name);
3532c868c79SHuang Rui         return;
3542c868c79SHuang Rui #endif
3552c868c79SHuang Rui     }
3562c868c79SHuang Rui 
3572c868c79SHuang Rui     virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, cc.debug_name);
3589d9e1521SGerd Hoffmann }
3599d9e1521SGerd Hoffmann 
3609d9e1521SGerd Hoffmann static void virgl_cmd_context_destroy(VirtIOGPU *g,
3619d9e1521SGerd Hoffmann                                       struct virtio_gpu_ctrl_command *cmd)
3629d9e1521SGerd Hoffmann {
3639d9e1521SGerd Hoffmann     struct virtio_gpu_ctx_destroy cd;
3649d9e1521SGerd Hoffmann 
3659d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(cd);
3669d9e1521SGerd Hoffmann     trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id);
3679d9e1521SGerd Hoffmann 
3689d9e1521SGerd Hoffmann     virgl_renderer_context_destroy(cd.hdr.ctx_id);
3699d9e1521SGerd Hoffmann }
3709d9e1521SGerd Hoffmann 
3719d9e1521SGerd Hoffmann static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y,
3729d9e1521SGerd Hoffmann                                 int width, int height)
3739d9e1521SGerd Hoffmann {
37450d8e25eSMarc-André Lureau     if (!g->parent_obj.scanout[idx].con) {
3759d9e1521SGerd Hoffmann         return;
3769d9e1521SGerd Hoffmann     }
3779d9e1521SGerd Hoffmann 
37850d8e25eSMarc-André Lureau     dpy_gl_update(g->parent_obj.scanout[idx].con, x, y, width, height);
3799d9e1521SGerd Hoffmann }
3809d9e1521SGerd Hoffmann 
3819d9e1521SGerd Hoffmann static void virgl_cmd_resource_flush(VirtIOGPU *g,
3829d9e1521SGerd Hoffmann                                      struct virtio_gpu_ctrl_command *cmd)
3839d9e1521SGerd Hoffmann {
3849d9e1521SGerd Hoffmann     struct virtio_gpu_resource_flush rf;
3859d9e1521SGerd Hoffmann     int i;
3869d9e1521SGerd Hoffmann 
3879d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(rf);
3889d9e1521SGerd Hoffmann     trace_virtio_gpu_cmd_res_flush(rf.resource_id,
3899d9e1521SGerd Hoffmann                                    rf.r.width, rf.r.height, rf.r.x, rf.r.y);
3909d9e1521SGerd Hoffmann 
39150d8e25eSMarc-André Lureau     for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
39250d8e25eSMarc-André Lureau         if (g->parent_obj.scanout[i].resource_id != rf.resource_id) {
3939d9e1521SGerd Hoffmann             continue;
3949d9e1521SGerd Hoffmann         }
3959d9e1521SGerd Hoffmann         virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
3969d9e1521SGerd Hoffmann     }
3979d9e1521SGerd Hoffmann }
3989d9e1521SGerd Hoffmann 
3999d9e1521SGerd Hoffmann static void virgl_cmd_set_scanout(VirtIOGPU *g,
4009d9e1521SGerd Hoffmann                                   struct virtio_gpu_ctrl_command *cmd)
4019d9e1521SGerd Hoffmann {
4029d9e1521SGerd Hoffmann     struct virtio_gpu_set_scanout ss;
4039d9e1521SGerd Hoffmann     int ret;
4049d9e1521SGerd Hoffmann 
4059d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(ss);
4069d9e1521SGerd Hoffmann     trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
4079d9e1521SGerd Hoffmann                                      ss.r.width, ss.r.height, ss.r.x, ss.r.y);
4089d9e1521SGerd Hoffmann 
40950d8e25eSMarc-André Lureau     if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
4109d9e1521SGerd Hoffmann         qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
4119d9e1521SGerd Hoffmann                       __func__, ss.scanout_id);
4129d9e1521SGerd Hoffmann         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
4139d9e1521SGerd Hoffmann         return;
4149d9e1521SGerd Hoffmann     }
41550d8e25eSMarc-André Lureau     g->parent_obj.enable = 1;
4169d9e1521SGerd Hoffmann 
4179d9e1521SGerd Hoffmann     if (ss.resource_id && ss.r.width && ss.r.height) {
418c1600f84SMarc-André Lureau         struct virgl_renderer_resource_info info;
419c1600f84SMarc-André Lureau         void *d3d_tex2d = NULL;
420c1600f84SMarc-André Lureau 
421ffac9641SDmitry Osipenko #if VIRGL_VERSION_MAJOR >= 1
422c1600f84SMarc-André Lureau         struct virgl_renderer_resource_info_ext ext;
423c1600f84SMarc-André Lureau         memset(&ext, 0, sizeof(ext));
424c1600f84SMarc-André Lureau         ret = virgl_renderer_resource_get_info_ext(ss.resource_id, &ext);
425c1600f84SMarc-André Lureau         info = ext.base;
426c1600f84SMarc-André Lureau         d3d_tex2d = ext.d3d_tex2d;
427c1600f84SMarc-André Lureau #else
428c1600f84SMarc-André Lureau         memset(&info, 0, sizeof(info));
4299d9e1521SGerd Hoffmann         ret = virgl_renderer_resource_get_info(ss.resource_id, &info);
430c1600f84SMarc-André Lureau #endif
431574b64aaSDmitry Osipenko         if (ret) {
4329d9e1521SGerd Hoffmann             qemu_log_mask(LOG_GUEST_ERROR,
4339d9e1521SGerd Hoffmann                           "%s: illegal resource specified %d\n",
4349d9e1521SGerd Hoffmann                           __func__, ss.resource_id);
4359d9e1521SGerd Hoffmann             cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
4369d9e1521SGerd Hoffmann             return;
4379d9e1521SGerd Hoffmann         }
43850d8e25eSMarc-André Lureau         qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con,
4399d9e1521SGerd Hoffmann                             ss.r.width, ss.r.height);
4409d9e1521SGerd Hoffmann         virgl_renderer_force_ctx_0();
44150d8e25eSMarc-André Lureau         dpy_gl_scanout_texture(
44250d8e25eSMarc-André Lureau             g->parent_obj.scanout[ss.scanout_id].con, info.tex_id,
44346e4609eSMarc-André Lureau             info.flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP,
4449d8256ebSMarc-André Lureau             info.width, info.height,
445bf41ab61SMarc-André Lureau             ss.r.x, ss.r.y, ss.r.width, ss.r.height,
446bf41ab61SMarc-André Lureau             d3d_tex2d);
4479d9e1521SGerd Hoffmann     } else {
44850d8e25eSMarc-André Lureau         dpy_gfx_replace_surface(
44950d8e25eSMarc-André Lureau             g->parent_obj.scanout[ss.scanout_id].con, NULL);
45050d8e25eSMarc-André Lureau         dpy_gl_scanout_disable(g->parent_obj.scanout[ss.scanout_id].con);
4519d9e1521SGerd Hoffmann     }
45250d8e25eSMarc-André Lureau     g->parent_obj.scanout[ss.scanout_id].resource_id = ss.resource_id;
4539d9e1521SGerd Hoffmann }
4549d9e1521SGerd Hoffmann 
4559d9e1521SGerd Hoffmann static void virgl_cmd_submit_3d(VirtIOGPU *g,
4569d9e1521SGerd Hoffmann                                 struct virtio_gpu_ctrl_command *cmd)
4579d9e1521SGerd Hoffmann {
4589d9e1521SGerd Hoffmann     struct virtio_gpu_cmd_submit cs;
4599d9e1521SGerd Hoffmann     void *buf;
4609d9e1521SGerd Hoffmann     size_t s;
4619d9e1521SGerd Hoffmann 
4629d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(cs);
4639d9e1521SGerd Hoffmann     trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size);
4649d9e1521SGerd Hoffmann 
4659d9e1521SGerd Hoffmann     buf = g_malloc(cs.size);
4669d9e1521SGerd Hoffmann     s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
4679d9e1521SGerd Hoffmann                    sizeof(cs), buf, cs.size);
4689d9e1521SGerd Hoffmann     if (s != cs.size) {
4699d9e1521SGerd Hoffmann         qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)",
4709d9e1521SGerd Hoffmann                       __func__, s, cs.size);
4719d9e1521SGerd Hoffmann         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
4728d94c1caSGerd Hoffmann         goto out;
4739d9e1521SGerd Hoffmann     }
4749d9e1521SGerd Hoffmann 
47550d8e25eSMarc-André Lureau     if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
4769d9e1521SGerd Hoffmann         g->stats.req_3d++;
4779d9e1521SGerd Hoffmann         g->stats.bytes_3d += cs.size;
4789d9e1521SGerd Hoffmann     }
4799d9e1521SGerd Hoffmann 
4809d9e1521SGerd Hoffmann     virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4);
4819d9e1521SGerd Hoffmann 
4828d94c1caSGerd Hoffmann out:
4839d9e1521SGerd Hoffmann     g_free(buf);
4849d9e1521SGerd Hoffmann }
4859d9e1521SGerd Hoffmann 
4869d9e1521SGerd Hoffmann static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g,
4879d9e1521SGerd Hoffmann                                           struct virtio_gpu_ctrl_command *cmd)
4889d9e1521SGerd Hoffmann {
4899d9e1521SGerd Hoffmann     struct virtio_gpu_transfer_to_host_2d t2d;
4909d9e1521SGerd Hoffmann     struct virtio_gpu_box box;
4919d9e1521SGerd Hoffmann 
4929d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(t2d);
4939d9e1521SGerd Hoffmann     trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
4949d9e1521SGerd Hoffmann 
4959d9e1521SGerd Hoffmann     box.x = t2d.r.x;
4969d9e1521SGerd Hoffmann     box.y = t2d.r.y;
4979d9e1521SGerd Hoffmann     box.z = 0;
4989d9e1521SGerd Hoffmann     box.w = t2d.r.width;
4999d9e1521SGerd Hoffmann     box.h = t2d.r.height;
5009d9e1521SGerd Hoffmann     box.d = 1;
5019d9e1521SGerd Hoffmann 
5029d9e1521SGerd Hoffmann     virgl_renderer_transfer_write_iov(t2d.resource_id,
5039d9e1521SGerd Hoffmann                                       0,
5049d9e1521SGerd Hoffmann                                       0,
5059d9e1521SGerd Hoffmann                                       0,
5069d9e1521SGerd Hoffmann                                       0,
5079d9e1521SGerd Hoffmann                                       (struct virgl_box *)&box,
5089d9e1521SGerd Hoffmann                                       t2d.offset, NULL, 0);
5099d9e1521SGerd Hoffmann }
5109d9e1521SGerd Hoffmann 
5119d9e1521SGerd Hoffmann static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g,
5129d9e1521SGerd Hoffmann                                           struct virtio_gpu_ctrl_command *cmd)
5139d9e1521SGerd Hoffmann {
5149d9e1521SGerd Hoffmann     struct virtio_gpu_transfer_host_3d t3d;
5159d9e1521SGerd Hoffmann 
5169d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(t3d);
5179d9e1521SGerd Hoffmann     trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id);
5189d9e1521SGerd Hoffmann 
5199d9e1521SGerd Hoffmann     virgl_renderer_transfer_write_iov(t3d.resource_id,
5209d9e1521SGerd Hoffmann                                       t3d.hdr.ctx_id,
5219d9e1521SGerd Hoffmann                                       t3d.level,
5229d9e1521SGerd Hoffmann                                       t3d.stride,
5239d9e1521SGerd Hoffmann                                       t3d.layer_stride,
5249d9e1521SGerd Hoffmann                                       (struct virgl_box *)&t3d.box,
5259d9e1521SGerd Hoffmann                                       t3d.offset, NULL, 0);
5269d9e1521SGerd Hoffmann }
5279d9e1521SGerd Hoffmann 
5289d9e1521SGerd Hoffmann static void
5299d9e1521SGerd Hoffmann virgl_cmd_transfer_from_host_3d(VirtIOGPU *g,
5309d9e1521SGerd Hoffmann                                 struct virtio_gpu_ctrl_command *cmd)
5319d9e1521SGerd Hoffmann {
5329d9e1521SGerd Hoffmann     struct virtio_gpu_transfer_host_3d tf3d;
5339d9e1521SGerd Hoffmann 
5349d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(tf3d);
5359d9e1521SGerd Hoffmann     trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id);
5369d9e1521SGerd Hoffmann 
5379d9e1521SGerd Hoffmann     virgl_renderer_transfer_read_iov(tf3d.resource_id,
5389d9e1521SGerd Hoffmann                                      tf3d.hdr.ctx_id,
5399d9e1521SGerd Hoffmann                                      tf3d.level,
5409d9e1521SGerd Hoffmann                                      tf3d.stride,
5419d9e1521SGerd Hoffmann                                      tf3d.layer_stride,
5429d9e1521SGerd Hoffmann                                      (struct virgl_box *)&tf3d.box,
5439d9e1521SGerd Hoffmann                                      tf3d.offset, NULL, 0);
5449d9e1521SGerd Hoffmann }
5459d9e1521SGerd Hoffmann 
5469d9e1521SGerd Hoffmann 
5479d9e1521SGerd Hoffmann static void virgl_resource_attach_backing(VirtIOGPU *g,
5489d9e1521SGerd Hoffmann                                           struct virtio_gpu_ctrl_command *cmd)
5499d9e1521SGerd Hoffmann {
5509d9e1521SGerd Hoffmann     struct virtio_gpu_resource_attach_backing att_rb;
5519d9e1521SGerd Hoffmann     struct iovec *res_iovs;
5529049f8bcSGerd Hoffmann     uint32_t res_niov;
5539d9e1521SGerd Hoffmann     int ret;
5549d9e1521SGerd Hoffmann 
5559d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(att_rb);
5569d9e1521SGerd Hoffmann     trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
5579d9e1521SGerd Hoffmann 
55870d37662SVivek Kasireddy     ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb),
55970d37662SVivek Kasireddy                                         cmd, NULL, &res_iovs, &res_niov);
5609d9e1521SGerd Hoffmann     if (ret != 0) {
5619d9e1521SGerd Hoffmann         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
5629d9e1521SGerd Hoffmann         return;
5639d9e1521SGerd Hoffmann     }
5649d9e1521SGerd Hoffmann 
56533243031SLi Qiang     ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
5669049f8bcSGerd Hoffmann                                              res_iovs, res_niov);
56733243031SLi Qiang 
56833243031SLi Qiang     if (ret != 0)
5699049f8bcSGerd Hoffmann         virtio_gpu_cleanup_mapping_iov(g, res_iovs, res_niov);
5709d9e1521SGerd Hoffmann }
5719d9e1521SGerd Hoffmann 
5729d9e1521SGerd Hoffmann static void virgl_resource_detach_backing(VirtIOGPU *g,
5739d9e1521SGerd Hoffmann                                           struct virtio_gpu_ctrl_command *cmd)
5749d9e1521SGerd Hoffmann {
5759d9e1521SGerd Hoffmann     struct virtio_gpu_resource_detach_backing detach_rb;
5769d9e1521SGerd Hoffmann     struct iovec *res_iovs = NULL;
5779d9e1521SGerd Hoffmann     int num_iovs = 0;
5789d9e1521SGerd Hoffmann 
5799d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(detach_rb);
5809d9e1521SGerd Hoffmann     trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id);
5819d9e1521SGerd Hoffmann 
5829d9e1521SGerd Hoffmann     virgl_renderer_resource_detach_iov(detach_rb.resource_id,
5839d9e1521SGerd Hoffmann                                        &res_iovs,
5849d9e1521SGerd Hoffmann                                        &num_iovs);
5859d9e1521SGerd Hoffmann     if (res_iovs == NULL || num_iovs == 0) {
5869d9e1521SGerd Hoffmann         return;
5879d9e1521SGerd Hoffmann     }
5883bb68f79SGerd Hoffmann     virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
5899d9e1521SGerd Hoffmann }
5909d9e1521SGerd Hoffmann 
5919d9e1521SGerd Hoffmann 
5929d9e1521SGerd Hoffmann static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g,
5939d9e1521SGerd Hoffmann                                           struct virtio_gpu_ctrl_command *cmd)
5949d9e1521SGerd Hoffmann {
5959d9e1521SGerd Hoffmann     struct virtio_gpu_ctx_resource att_res;
5969d9e1521SGerd Hoffmann 
5979d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(att_res);
5989d9e1521SGerd Hoffmann     trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id,
5999d9e1521SGerd Hoffmann                                         att_res.resource_id);
6009d9e1521SGerd Hoffmann 
6019d9e1521SGerd Hoffmann     virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id);
6029d9e1521SGerd Hoffmann }
6039d9e1521SGerd Hoffmann 
6049d9e1521SGerd Hoffmann static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g,
6059d9e1521SGerd Hoffmann                                           struct virtio_gpu_ctrl_command *cmd)
6069d9e1521SGerd Hoffmann {
6079d9e1521SGerd Hoffmann     struct virtio_gpu_ctx_resource det_res;
6089d9e1521SGerd Hoffmann 
6099d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(det_res);
6109d9e1521SGerd Hoffmann     trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id,
6119d9e1521SGerd Hoffmann                                         det_res.resource_id);
6129d9e1521SGerd Hoffmann 
6139d9e1521SGerd Hoffmann     virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id);
6149d9e1521SGerd Hoffmann }
6159d9e1521SGerd Hoffmann 
6169d9e1521SGerd Hoffmann static void virgl_cmd_get_capset_info(VirtIOGPU *g,
6179d9e1521SGerd Hoffmann                                       struct virtio_gpu_ctrl_command *cmd)
6189d9e1521SGerd Hoffmann {
6199d9e1521SGerd Hoffmann     struct virtio_gpu_get_capset_info info;
6209d9e1521SGerd Hoffmann     struct virtio_gpu_resp_capset_info resp;
6219d9e1521SGerd Hoffmann 
6229d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(info);
6239d9e1521SGerd Hoffmann 
62442a8dadcSLi Qiang     memset(&resp, 0, sizeof(resp));
6259d9e1521SGerd Hoffmann     if (info.capset_index == 0) {
6269d9e1521SGerd Hoffmann         resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL;
6279d9e1521SGerd Hoffmann         virgl_renderer_get_cap_set(resp.capset_id,
6289d9e1521SGerd Hoffmann                                    &resp.capset_max_version,
6299d9e1521SGerd Hoffmann                                    &resp.capset_max_size);
6305643cc94SDave Airlie     } else if (info.capset_index == 1) {
6315643cc94SDave Airlie         resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2;
6325643cc94SDave Airlie         virgl_renderer_get_cap_set(resp.capset_id,
6335643cc94SDave Airlie                                    &resp.capset_max_version,
6345643cc94SDave Airlie                                    &resp.capset_max_size);
6359d9e1521SGerd Hoffmann     } else {
6369d9e1521SGerd Hoffmann         resp.capset_max_version = 0;
6379d9e1521SGerd Hoffmann         resp.capset_max_size = 0;
6389d9e1521SGerd Hoffmann     }
6399d9e1521SGerd Hoffmann     resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
6409d9e1521SGerd Hoffmann     virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
6419d9e1521SGerd Hoffmann }
6429d9e1521SGerd Hoffmann 
6439d9e1521SGerd Hoffmann static void virgl_cmd_get_capset(VirtIOGPU *g,
6449d9e1521SGerd Hoffmann                                  struct virtio_gpu_ctrl_command *cmd)
6459d9e1521SGerd Hoffmann {
6469d9e1521SGerd Hoffmann     struct virtio_gpu_get_capset gc;
6479d9e1521SGerd Hoffmann     struct virtio_gpu_resp_capset *resp;
6489d9e1521SGerd Hoffmann     uint32_t max_ver, max_size;
6499d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(gc);
6509d9e1521SGerd Hoffmann 
6519d9e1521SGerd Hoffmann     virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
6529d9e1521SGerd Hoffmann                                &max_size);
653abd7f08bSPrasad J Pandit     if (!max_size) {
654abd7f08bSPrasad J Pandit         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
655abd7f08bSPrasad J Pandit         return;
656abd7f08bSPrasad J Pandit     }
6579d9e1521SGerd Hoffmann 
65885d9d044SLi Qiang     resp = g_malloc0(sizeof(*resp) + max_size);
6599d9e1521SGerd Hoffmann     resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
6609d9e1521SGerd Hoffmann     virgl_renderer_fill_caps(gc.capset_id,
6619d9e1521SGerd Hoffmann                              gc.capset_version,
6629d9e1521SGerd Hoffmann                              (void *)resp->capset_data);
6639d9e1521SGerd Hoffmann     virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
6649d9e1521SGerd Hoffmann     g_free(resp);
6659d9e1521SGerd Hoffmann }
6669d9e1521SGerd Hoffmann 
667*7c092f17SRobert Beckett #if VIRGL_VERSION_MAJOR >= 1
668*7c092f17SRobert Beckett static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
669*7c092f17SRobert Beckett                                            struct virtio_gpu_ctrl_command *cmd)
670*7c092f17SRobert Beckett {
671*7c092f17SRobert Beckett     struct virgl_renderer_resource_create_blob_args virgl_args = { 0 };
672*7c092f17SRobert Beckett     g_autofree struct virtio_gpu_virgl_resource *res = NULL;
673*7c092f17SRobert Beckett     struct virtio_gpu_resource_create_blob cblob;
674*7c092f17SRobert Beckett     struct virgl_renderer_resource_info info;
675*7c092f17SRobert Beckett     int ret;
676*7c092f17SRobert Beckett 
677*7c092f17SRobert Beckett     if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
678*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
679*7c092f17SRobert Beckett         return;
680*7c092f17SRobert Beckett     }
681*7c092f17SRobert Beckett 
682*7c092f17SRobert Beckett     VIRTIO_GPU_FILL_CMD(cblob);
683*7c092f17SRobert Beckett     virtio_gpu_create_blob_bswap(&cblob);
684*7c092f17SRobert Beckett     trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
685*7c092f17SRobert Beckett 
686*7c092f17SRobert Beckett     if (cblob.resource_id == 0) {
687*7c092f17SRobert Beckett         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
688*7c092f17SRobert Beckett                       __func__);
689*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
690*7c092f17SRobert Beckett         return;
691*7c092f17SRobert Beckett     }
692*7c092f17SRobert Beckett 
693*7c092f17SRobert Beckett     res = virtio_gpu_virgl_find_resource(g, cblob.resource_id);
694*7c092f17SRobert Beckett     if (res) {
695*7c092f17SRobert Beckett         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
696*7c092f17SRobert Beckett                       __func__, cblob.resource_id);
697*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
698*7c092f17SRobert Beckett         return;
699*7c092f17SRobert Beckett     }
700*7c092f17SRobert Beckett 
701*7c092f17SRobert Beckett     res = g_new0(struct virtio_gpu_virgl_resource, 1);
702*7c092f17SRobert Beckett     res->base.resource_id = cblob.resource_id;
703*7c092f17SRobert Beckett     res->base.blob_size = cblob.size;
704*7c092f17SRobert Beckett     res->base.dmabuf_fd = -1;
705*7c092f17SRobert Beckett 
706*7c092f17SRobert Beckett     if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
707*7c092f17SRobert Beckett         ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
708*7c092f17SRobert Beckett                                             cmd, &res->base.addrs,
709*7c092f17SRobert Beckett                                             &res->base.iov, &res->base.iov_cnt);
710*7c092f17SRobert Beckett         if (!ret) {
711*7c092f17SRobert Beckett             cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
712*7c092f17SRobert Beckett             return;
713*7c092f17SRobert Beckett         }
714*7c092f17SRobert Beckett     }
715*7c092f17SRobert Beckett 
716*7c092f17SRobert Beckett     virgl_args.res_handle = cblob.resource_id;
717*7c092f17SRobert Beckett     virgl_args.ctx_id = cblob.hdr.ctx_id;
718*7c092f17SRobert Beckett     virgl_args.blob_mem = cblob.blob_mem;
719*7c092f17SRobert Beckett     virgl_args.blob_id = cblob.blob_id;
720*7c092f17SRobert Beckett     virgl_args.blob_flags = cblob.blob_flags;
721*7c092f17SRobert Beckett     virgl_args.size = cblob.size;
722*7c092f17SRobert Beckett     virgl_args.iovecs = res->base.iov;
723*7c092f17SRobert Beckett     virgl_args.num_iovs = res->base.iov_cnt;
724*7c092f17SRobert Beckett 
725*7c092f17SRobert Beckett     ret = virgl_renderer_resource_create_blob(&virgl_args);
726*7c092f17SRobert Beckett     if (ret) {
727*7c092f17SRobert Beckett         qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n",
728*7c092f17SRobert Beckett                       __func__, strerror(-ret));
729*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
730*7c092f17SRobert Beckett         virtio_gpu_cleanup_mapping(g, &res->base);
731*7c092f17SRobert Beckett         return;
732*7c092f17SRobert Beckett     }
733*7c092f17SRobert Beckett 
734*7c092f17SRobert Beckett     ret = virgl_renderer_resource_get_info(cblob.resource_id, &info);
735*7c092f17SRobert Beckett     if (ret) {
736*7c092f17SRobert Beckett         qemu_log_mask(LOG_GUEST_ERROR,
737*7c092f17SRobert Beckett                       "%s: resource does not have info %d: %s\n",
738*7c092f17SRobert Beckett                       __func__, cblob.resource_id, strerror(-ret));
739*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
740*7c092f17SRobert Beckett         virtio_gpu_cleanup_mapping(g, &res->base);
741*7c092f17SRobert Beckett         virgl_renderer_resource_unref(cblob.resource_id);
742*7c092f17SRobert Beckett         return;
743*7c092f17SRobert Beckett     }
744*7c092f17SRobert Beckett 
745*7c092f17SRobert Beckett     res->base.dmabuf_fd = info.fd;
746*7c092f17SRobert Beckett 
747*7c092f17SRobert Beckett     QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
748*7c092f17SRobert Beckett     res = NULL;
749*7c092f17SRobert Beckett }
750*7c092f17SRobert Beckett 
751*7c092f17SRobert Beckett static void virgl_cmd_resource_map_blob(VirtIOGPU *g,
752*7c092f17SRobert Beckett                                         struct virtio_gpu_ctrl_command *cmd)
753*7c092f17SRobert Beckett {
754*7c092f17SRobert Beckett     struct virtio_gpu_resource_map_blob mblob;
755*7c092f17SRobert Beckett     struct virtio_gpu_virgl_resource *res;
756*7c092f17SRobert Beckett     struct virtio_gpu_resp_map_info resp;
757*7c092f17SRobert Beckett     int ret;
758*7c092f17SRobert Beckett 
759*7c092f17SRobert Beckett     VIRTIO_GPU_FILL_CMD(mblob);
760*7c092f17SRobert Beckett     virtio_gpu_map_blob_bswap(&mblob);
761*7c092f17SRobert Beckett 
762*7c092f17SRobert Beckett     res = virtio_gpu_virgl_find_resource(g, mblob.resource_id);
763*7c092f17SRobert Beckett     if (!res) {
764*7c092f17SRobert Beckett         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
765*7c092f17SRobert Beckett                       __func__, mblob.resource_id);
766*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
767*7c092f17SRobert Beckett         return;
768*7c092f17SRobert Beckett     }
769*7c092f17SRobert Beckett 
770*7c092f17SRobert Beckett     ret = virtio_gpu_virgl_map_resource_blob(g, res, mblob.offset);
771*7c092f17SRobert Beckett     if (ret) {
772*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
773*7c092f17SRobert Beckett         return;
774*7c092f17SRobert Beckett     }
775*7c092f17SRobert Beckett 
776*7c092f17SRobert Beckett     memset(&resp, 0, sizeof(resp));
777*7c092f17SRobert Beckett     resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO;
778*7c092f17SRobert Beckett     virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info);
779*7c092f17SRobert Beckett     virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
780*7c092f17SRobert Beckett }
781*7c092f17SRobert Beckett 
782*7c092f17SRobert Beckett static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g,
783*7c092f17SRobert Beckett                                           struct virtio_gpu_ctrl_command *cmd,
784*7c092f17SRobert Beckett                                           bool *cmd_suspended)
785*7c092f17SRobert Beckett {
786*7c092f17SRobert Beckett     struct virtio_gpu_resource_unmap_blob ublob;
787*7c092f17SRobert Beckett     struct virtio_gpu_virgl_resource *res;
788*7c092f17SRobert Beckett     int ret;
789*7c092f17SRobert Beckett 
790*7c092f17SRobert Beckett     VIRTIO_GPU_FILL_CMD(ublob);
791*7c092f17SRobert Beckett     virtio_gpu_unmap_blob_bswap(&ublob);
792*7c092f17SRobert Beckett 
793*7c092f17SRobert Beckett     res = virtio_gpu_virgl_find_resource(g, ublob.resource_id);
794*7c092f17SRobert Beckett     if (!res) {
795*7c092f17SRobert Beckett         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
796*7c092f17SRobert Beckett                       __func__, ublob.resource_id);
797*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
798*7c092f17SRobert Beckett         return;
799*7c092f17SRobert Beckett     }
800*7c092f17SRobert Beckett 
801*7c092f17SRobert Beckett     ret = virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended);
802*7c092f17SRobert Beckett     if (ret) {
803*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
804*7c092f17SRobert Beckett         return;
805*7c092f17SRobert Beckett     }
806*7c092f17SRobert Beckett }
807*7c092f17SRobert Beckett 
808*7c092f17SRobert Beckett static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
809*7c092f17SRobert Beckett                                        struct virtio_gpu_ctrl_command *cmd)
810*7c092f17SRobert Beckett {
811*7c092f17SRobert Beckett     struct virtio_gpu_framebuffer fb = { 0 };
812*7c092f17SRobert Beckett     struct virtio_gpu_virgl_resource *res;
813*7c092f17SRobert Beckett     struct virtio_gpu_set_scanout_blob ss;
814*7c092f17SRobert Beckett     uint64_t fbend;
815*7c092f17SRobert Beckett 
816*7c092f17SRobert Beckett     VIRTIO_GPU_FILL_CMD(ss);
817*7c092f17SRobert Beckett     virtio_gpu_scanout_blob_bswap(&ss);
818*7c092f17SRobert Beckett     trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
819*7c092f17SRobert Beckett                                           ss.r.width, ss.r.height, ss.r.x,
820*7c092f17SRobert Beckett                                           ss.r.y);
821*7c092f17SRobert Beckett 
822*7c092f17SRobert Beckett     if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
823*7c092f17SRobert Beckett         qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
824*7c092f17SRobert Beckett                       __func__, ss.scanout_id);
825*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
826*7c092f17SRobert Beckett         return;
827*7c092f17SRobert Beckett     }
828*7c092f17SRobert Beckett 
829*7c092f17SRobert Beckett     if (ss.resource_id == 0) {
830*7c092f17SRobert Beckett         virtio_gpu_disable_scanout(g, ss.scanout_id);
831*7c092f17SRobert Beckett         return;
832*7c092f17SRobert Beckett     }
833*7c092f17SRobert Beckett 
834*7c092f17SRobert Beckett     if (ss.width < 16 ||
835*7c092f17SRobert Beckett         ss.height < 16 ||
836*7c092f17SRobert Beckett         ss.r.x + ss.r.width > ss.width ||
837*7c092f17SRobert Beckett         ss.r.y + ss.r.height > ss.height) {
838*7c092f17SRobert Beckett         qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
839*7c092f17SRobert Beckett                       " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
840*7c092f17SRobert Beckett                       __func__, ss.scanout_id, ss.resource_id,
841*7c092f17SRobert Beckett                       ss.r.x, ss.r.y, ss.r.width, ss.r.height,
842*7c092f17SRobert Beckett                       ss.width, ss.height);
843*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
844*7c092f17SRobert Beckett         return;
845*7c092f17SRobert Beckett     }
846*7c092f17SRobert Beckett 
847*7c092f17SRobert Beckett     res = virtio_gpu_virgl_find_resource(g, ss.resource_id);
848*7c092f17SRobert Beckett     if (!res) {
849*7c092f17SRobert Beckett         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
850*7c092f17SRobert Beckett                       __func__, ss.resource_id);
851*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
852*7c092f17SRobert Beckett         return;
853*7c092f17SRobert Beckett     }
854*7c092f17SRobert Beckett     if (res->base.dmabuf_fd < 0) {
855*7c092f17SRobert Beckett         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource not backed by dmabuf %d\n",
856*7c092f17SRobert Beckett                       __func__, ss.resource_id);
857*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
858*7c092f17SRobert Beckett         return;
859*7c092f17SRobert Beckett     }
860*7c092f17SRobert Beckett 
861*7c092f17SRobert Beckett     fb.format = virtio_gpu_get_pixman_format(ss.format);
862*7c092f17SRobert Beckett     if (!fb.format) {
863*7c092f17SRobert Beckett         qemu_log_mask(LOG_GUEST_ERROR, "%s: pixel format not supported %d\n",
864*7c092f17SRobert Beckett                       __func__, ss.format);
865*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
866*7c092f17SRobert Beckett         return;
867*7c092f17SRobert Beckett     }
868*7c092f17SRobert Beckett 
869*7c092f17SRobert Beckett     fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
870*7c092f17SRobert Beckett     fb.width = ss.width;
871*7c092f17SRobert Beckett     fb.height = ss.height;
872*7c092f17SRobert Beckett     fb.stride = ss.strides[0];
873*7c092f17SRobert Beckett     fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
874*7c092f17SRobert Beckett 
875*7c092f17SRobert Beckett     fbend = fb.offset;
876*7c092f17SRobert Beckett     fbend += fb.stride * (ss.r.height - 1);
877*7c092f17SRobert Beckett     fbend += fb.bytes_pp * ss.r.width;
878*7c092f17SRobert Beckett     if (fbend > res->base.blob_size) {
879*7c092f17SRobert Beckett         qemu_log_mask(LOG_GUEST_ERROR, "%s: fb end out of range\n",
880*7c092f17SRobert Beckett                       __func__);
881*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
882*7c092f17SRobert Beckett         return;
883*7c092f17SRobert Beckett     }
884*7c092f17SRobert Beckett 
885*7c092f17SRobert Beckett     g->parent_obj.enable = 1;
886*7c092f17SRobert Beckett     if (virtio_gpu_update_dmabuf(g, ss.scanout_id, &res->base, &fb, &ss.r)) {
887*7c092f17SRobert Beckett         qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to update dmabuf\n",
888*7c092f17SRobert Beckett                       __func__);
889*7c092f17SRobert Beckett         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
890*7c092f17SRobert Beckett         return;
891*7c092f17SRobert Beckett     }
892*7c092f17SRobert Beckett 
893*7c092f17SRobert Beckett     virtio_gpu_update_scanout(g, ss.scanout_id, &res->base, &fb, &ss.r);
894*7c092f17SRobert Beckett }
895*7c092f17SRobert Beckett #endif
896*7c092f17SRobert Beckett 
8979d9e1521SGerd Hoffmann void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
8989d9e1521SGerd Hoffmann                                       struct virtio_gpu_ctrl_command *cmd)
8999d9e1521SGerd Hoffmann {
900*7c092f17SRobert Beckett     bool cmd_suspended = false;
901*7c092f17SRobert Beckett 
9029d9e1521SGerd Hoffmann     VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
9039d9e1521SGerd Hoffmann 
9049d9e1521SGerd Hoffmann     virgl_renderer_force_ctx_0();
9059d9e1521SGerd Hoffmann     switch (cmd->cmd_hdr.type) {
9069d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_CTX_CREATE:
9079d9e1521SGerd Hoffmann         virgl_cmd_context_create(g, cmd);
9089d9e1521SGerd Hoffmann         break;
9099d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_CTX_DESTROY:
9109d9e1521SGerd Hoffmann         virgl_cmd_context_destroy(g, cmd);
9119d9e1521SGerd Hoffmann         break;
9129d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
9139d9e1521SGerd Hoffmann         virgl_cmd_create_resource_2d(g, cmd);
9149d9e1521SGerd Hoffmann         break;
9159d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
9169d9e1521SGerd Hoffmann         virgl_cmd_create_resource_3d(g, cmd);
9179d9e1521SGerd Hoffmann         break;
9189d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_SUBMIT_3D:
9199d9e1521SGerd Hoffmann         virgl_cmd_submit_3d(g, cmd);
9209d9e1521SGerd Hoffmann         break;
9219d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
9229d9e1521SGerd Hoffmann         virgl_cmd_transfer_to_host_2d(g, cmd);
9239d9e1521SGerd Hoffmann         break;
9249d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
9259d9e1521SGerd Hoffmann         virgl_cmd_transfer_to_host_3d(g, cmd);
9269d9e1521SGerd Hoffmann         break;
9279d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
9289d9e1521SGerd Hoffmann         virgl_cmd_transfer_from_host_3d(g, cmd);
9299d9e1521SGerd Hoffmann         break;
9309d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
9319d9e1521SGerd Hoffmann         virgl_resource_attach_backing(g, cmd);
9329d9e1521SGerd Hoffmann         break;
9339d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
9349d9e1521SGerd Hoffmann         virgl_resource_detach_backing(g, cmd);
9359d9e1521SGerd Hoffmann         break;
9369d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_SET_SCANOUT:
9379d9e1521SGerd Hoffmann         virgl_cmd_set_scanout(g, cmd);
9389d9e1521SGerd Hoffmann         break;
9399d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
9409d9e1521SGerd Hoffmann         virgl_cmd_resource_flush(g, cmd);
9419d9e1521SGerd Hoffmann         break;
9429d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_RESOURCE_UNREF:
943*7c092f17SRobert Beckett         virgl_cmd_resource_unref(g, cmd, &cmd_suspended);
9449d9e1521SGerd Hoffmann         break;
9459d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
9469d9e1521SGerd Hoffmann         /* TODO add security */
9479d9e1521SGerd Hoffmann         virgl_cmd_ctx_attach_resource(g, cmd);
9489d9e1521SGerd Hoffmann         break;
9499d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
9509d9e1521SGerd Hoffmann         /* TODO add security */
9519d9e1521SGerd Hoffmann         virgl_cmd_ctx_detach_resource(g, cmd);
9529d9e1521SGerd Hoffmann         break;
9539d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
9549d9e1521SGerd Hoffmann         virgl_cmd_get_capset_info(g, cmd);
9559d9e1521SGerd Hoffmann         break;
9569d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_GET_CAPSET:
9579d9e1521SGerd Hoffmann         virgl_cmd_get_capset(g, cmd);
9589d9e1521SGerd Hoffmann         break;
9599d9e1521SGerd Hoffmann     case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
9609d9e1521SGerd Hoffmann         virtio_gpu_get_display_info(g, cmd);
9619d9e1521SGerd Hoffmann         break;
9621ed2cb32SGerd Hoffmann     case VIRTIO_GPU_CMD_GET_EDID:
9631ed2cb32SGerd Hoffmann         virtio_gpu_get_edid(g, cmd);
9641ed2cb32SGerd Hoffmann         break;
965*7c092f17SRobert Beckett #if VIRGL_VERSION_MAJOR >= 1
966*7c092f17SRobert Beckett     case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
967*7c092f17SRobert Beckett         virgl_cmd_resource_create_blob(g, cmd);
968*7c092f17SRobert Beckett         break;
969*7c092f17SRobert Beckett     case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB:
970*7c092f17SRobert Beckett         virgl_cmd_resource_map_blob(g, cmd);
971*7c092f17SRobert Beckett         break;
972*7c092f17SRobert Beckett     case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB:
973*7c092f17SRobert Beckett         virgl_cmd_resource_unmap_blob(g, cmd, &cmd_suspended);
974*7c092f17SRobert Beckett         break;
975*7c092f17SRobert Beckett     case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
976*7c092f17SRobert Beckett         virgl_cmd_set_scanout_blob(g, cmd);
977*7c092f17SRobert Beckett         break;
978*7c092f17SRobert Beckett #endif
9799d9e1521SGerd Hoffmann     default:
9809d9e1521SGerd Hoffmann         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
9819d9e1521SGerd Hoffmann         break;
9829d9e1521SGerd Hoffmann     }
9839d9e1521SGerd Hoffmann 
984*7c092f17SRobert Beckett     if (cmd_suspended || cmd->finished) {
9859d9e1521SGerd Hoffmann         return;
9869d9e1521SGerd Hoffmann     }
9879d9e1521SGerd Hoffmann     if (cmd->error) {
9889d9e1521SGerd Hoffmann         fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n", __func__,
9899d9e1521SGerd Hoffmann                 cmd->cmd_hdr.type, cmd->error);
9909d9e1521SGerd Hoffmann         virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error);
9919d9e1521SGerd Hoffmann         return;
9929d9e1521SGerd Hoffmann     }
9939d9e1521SGerd Hoffmann     if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
9949d9e1521SGerd Hoffmann         virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
9959d9e1521SGerd Hoffmann         return;
9969d9e1521SGerd Hoffmann     }
9979d9e1521SGerd Hoffmann 
9989d9e1521SGerd Hoffmann     trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
9999d9e1521SGerd Hoffmann     virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
10009d9e1521SGerd Hoffmann }
10019d9e1521SGerd Hoffmann 
10029d9e1521SGerd Hoffmann static void virgl_write_fence(void *opaque, uint32_t fence)
10039d9e1521SGerd Hoffmann {
10049d9e1521SGerd Hoffmann     VirtIOGPU *g = opaque;
10059d9e1521SGerd Hoffmann     struct virtio_gpu_ctrl_command *cmd, *tmp;
10069d9e1521SGerd Hoffmann 
10079d9e1521SGerd Hoffmann     QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
10089d9e1521SGerd Hoffmann         /*
10099d9e1521SGerd Hoffmann          * the guest can end up emitting fences out of order
10109d9e1521SGerd Hoffmann          * so we should check all fenced cmds not just the first one.
10119d9e1521SGerd Hoffmann          */
10129d9e1521SGerd Hoffmann         if (cmd->cmd_hdr.fence_id > fence) {
10139d9e1521SGerd Hoffmann             continue;
10149d9e1521SGerd Hoffmann         }
10159d9e1521SGerd Hoffmann         trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
10169d9e1521SGerd Hoffmann         virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
10179d9e1521SGerd Hoffmann         QTAILQ_REMOVE(&g->fenceq, cmd, next);
10189d9e1521SGerd Hoffmann         g_free(cmd);
10199d9e1521SGerd Hoffmann         g->inflight--;
102050d8e25eSMarc-André Lureau         if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1021cd7ebf6bSDmitry Osipenko             trace_virtio_gpu_dec_inflight_fences(g->inflight);
10229d9e1521SGerd Hoffmann         }
10239d9e1521SGerd Hoffmann     }
10249d9e1521SGerd Hoffmann }
10259d9e1521SGerd Hoffmann 
10269d9e1521SGerd Hoffmann static virgl_renderer_gl_context
10279d9e1521SGerd Hoffmann virgl_create_context(void *opaque, int scanout_idx,
10289d9e1521SGerd Hoffmann                      struct virgl_renderer_gl_ctx_param *params)
10299d9e1521SGerd Hoffmann {
10309d9e1521SGerd Hoffmann     VirtIOGPU *g = opaque;
10319d9e1521SGerd Hoffmann     QEMUGLContext ctx;
10329d9e1521SGerd Hoffmann     QEMUGLParams qparams;
10339d9e1521SGerd Hoffmann 
10349d9e1521SGerd Hoffmann     qparams.major_ver = params->major_ver;
10359d9e1521SGerd Hoffmann     qparams.minor_ver = params->minor_ver;
10369d9e1521SGerd Hoffmann 
103750d8e25eSMarc-André Lureau     ctx = dpy_gl_ctx_create(g->parent_obj.scanout[scanout_idx].con, &qparams);
10389d9e1521SGerd Hoffmann     return (virgl_renderer_gl_context)ctx;
10399d9e1521SGerd Hoffmann }
10409d9e1521SGerd Hoffmann 
10419d9e1521SGerd Hoffmann static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx)
10429d9e1521SGerd Hoffmann {
10439d9e1521SGerd Hoffmann     VirtIOGPU *g = opaque;
10449d9e1521SGerd Hoffmann     QEMUGLContext qctx = (QEMUGLContext)ctx;
10459d9e1521SGerd Hoffmann 
104650d8e25eSMarc-André Lureau     dpy_gl_ctx_destroy(g->parent_obj.scanout[0].con, qctx);
10479d9e1521SGerd Hoffmann }
10489d9e1521SGerd Hoffmann 
10499d9e1521SGerd Hoffmann static int virgl_make_context_current(void *opaque, int scanout_idx,
10509d9e1521SGerd Hoffmann                                       virgl_renderer_gl_context ctx)
10519d9e1521SGerd Hoffmann {
10529d9e1521SGerd Hoffmann     VirtIOGPU *g = opaque;
10539d9e1521SGerd Hoffmann     QEMUGLContext qctx = (QEMUGLContext)ctx;
10549d9e1521SGerd Hoffmann 
105550d8e25eSMarc-André Lureau     return dpy_gl_ctx_make_current(g->parent_obj.scanout[scanout_idx].con,
105650d8e25eSMarc-André Lureau                                    qctx);
10579d9e1521SGerd Hoffmann }
10589d9e1521SGerd Hoffmann 
10599d9e1521SGerd Hoffmann static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = {
10609d9e1521SGerd Hoffmann     .version             = 1,
10619d9e1521SGerd Hoffmann     .write_fence         = virgl_write_fence,
10629d9e1521SGerd Hoffmann     .create_gl_context   = virgl_create_context,
10639d9e1521SGerd Hoffmann     .destroy_gl_context  = virgl_destroy_context,
10649d9e1521SGerd Hoffmann     .make_current        = virgl_make_context_current,
10659d9e1521SGerd Hoffmann };
10669d9e1521SGerd Hoffmann 
10679d9e1521SGerd Hoffmann static void virtio_gpu_print_stats(void *opaque)
10689d9e1521SGerd Hoffmann {
10699d9e1521SGerd Hoffmann     VirtIOGPU *g = opaque;
1070a0a8f47fSDmitry Osipenko     VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
10719d9e1521SGerd Hoffmann 
10729d9e1521SGerd Hoffmann     if (g->stats.requests) {
10739d9e1521SGerd Hoffmann         fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
10749d9e1521SGerd Hoffmann                 g->stats.requests,
10759d9e1521SGerd Hoffmann                 g->stats.max_inflight,
10769d9e1521SGerd Hoffmann                 g->stats.req_3d,
10779d9e1521SGerd Hoffmann                 g->stats.bytes_3d);
10789d9e1521SGerd Hoffmann         g->stats.requests     = 0;
10799d9e1521SGerd Hoffmann         g->stats.max_inflight = 0;
10809d9e1521SGerd Hoffmann         g->stats.req_3d       = 0;
10819d9e1521SGerd Hoffmann         g->stats.bytes_3d     = 0;
10829d9e1521SGerd Hoffmann     } else {
10839d9e1521SGerd Hoffmann         fprintf(stderr, "stats: idle\r");
10849d9e1521SGerd Hoffmann     }
1085a0a8f47fSDmitry Osipenko     timer_mod(gl->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
10869d9e1521SGerd Hoffmann }
10879d9e1521SGerd Hoffmann 
10889d9e1521SGerd Hoffmann static void virtio_gpu_fence_poll(void *opaque)
10899d9e1521SGerd Hoffmann {
10909d9e1521SGerd Hoffmann     VirtIOGPU *g = opaque;
1091a723d2eaSDmitry Osipenko     VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
10929d9e1521SGerd Hoffmann 
10939d9e1521SGerd Hoffmann     virgl_renderer_poll();
10940c55a1cfSGerd Hoffmann     virtio_gpu_process_cmdq(g);
10950c55a1cfSGerd Hoffmann     if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) {
1096a723d2eaSDmitry Osipenko         timer_mod(gl->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10);
10979d9e1521SGerd Hoffmann     }
10989d9e1521SGerd Hoffmann }
10999d9e1521SGerd Hoffmann 
11009d9e1521SGerd Hoffmann void virtio_gpu_virgl_fence_poll(VirtIOGPU *g)
11019d9e1521SGerd Hoffmann {
11029d9e1521SGerd Hoffmann     virtio_gpu_fence_poll(g);
11039d9e1521SGerd Hoffmann }
11049d9e1521SGerd Hoffmann 
11058a13b9bcSMarc-André Lureau void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g)
11069d9e1521SGerd Hoffmann {
11079d9e1521SGerd Hoffmann     int i;
11089d9e1521SGerd Hoffmann 
110950d8e25eSMarc-André Lureau     for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
111050d8e25eSMarc-André Lureau         dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
111150d8e25eSMarc-André Lureau         dpy_gl_scanout_disable(g->parent_obj.scanout[i].con);
11129d9e1521SGerd Hoffmann     }
11139d9e1521SGerd Hoffmann }
11149d9e1521SGerd Hoffmann 
11158a13b9bcSMarc-André Lureau void virtio_gpu_virgl_reset(VirtIOGPU *g)
11168a13b9bcSMarc-André Lureau {
11178a13b9bcSMarc-André Lureau     virgl_renderer_reset();
11188a13b9bcSMarc-André Lureau }
11198a13b9bcSMarc-André Lureau 
11209d9e1521SGerd Hoffmann int virtio_gpu_virgl_init(VirtIOGPU *g)
11219d9e1521SGerd Hoffmann {
11229d9e1521SGerd Hoffmann     int ret;
1123c1600f84SMarc-André Lureau     uint32_t flags = 0;
1124a723d2eaSDmitry Osipenko     VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
11259d9e1521SGerd Hoffmann 
1126e8a2db94SMarc-André Lureau #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
1127e8a2db94SMarc-André Lureau     if (qemu_egl_display) {
1128e8a2db94SMarc-André Lureau         virtio_gpu_3d_cbs.version = 4;
1129e8a2db94SMarc-André Lureau         virtio_gpu_3d_cbs.get_egl_display = virgl_get_egl_display;
1130e8a2db94SMarc-André Lureau     }
1131e8a2db94SMarc-André Lureau #endif
1132c1600f84SMarc-André Lureau #ifdef VIRGL_RENDERER_D3D11_SHARE_TEXTURE
1133c1600f84SMarc-André Lureau     if (qemu_egl_angle_d3d) {
1134c1600f84SMarc-André Lureau         flags |= VIRGL_RENDERER_D3D11_SHARE_TEXTURE;
1135c1600f84SMarc-André Lureau     }
1136c1600f84SMarc-André Lureau #endif
1137e8a2db94SMarc-André Lureau 
1138c1600f84SMarc-André Lureau     ret = virgl_renderer_init(g, flags, &virtio_gpu_3d_cbs);
11399d9e1521SGerd Hoffmann     if (ret != 0) {
11408f5f1ea0SMarc-André Lureau         error_report("virgl could not be initialized: %d", ret);
11419d9e1521SGerd Hoffmann         return ret;
11429d9e1521SGerd Hoffmann     }
11439d9e1521SGerd Hoffmann 
1144a723d2eaSDmitry Osipenko     gl->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL,
11459d9e1521SGerd Hoffmann                                   virtio_gpu_fence_poll, g);
11469d9e1521SGerd Hoffmann 
114750d8e25eSMarc-André Lureau     if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1148a0a8f47fSDmitry Osipenko         gl->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL,
11499d9e1521SGerd Hoffmann                                        virtio_gpu_print_stats, g);
1150a0a8f47fSDmitry Osipenko         timer_mod(gl->print_stats,
1151a0a8f47fSDmitry Osipenko                   qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
11529d9e1521SGerd Hoffmann     }
1153*7c092f17SRobert Beckett 
1154*7c092f17SRobert Beckett #if VIRGL_VERSION_MAJOR >= 1
1155*7c092f17SRobert Beckett     gl->cmdq_resume_bh = aio_bh_new(qemu_get_aio_context(),
1156*7c092f17SRobert Beckett                                     virtio_gpu_virgl_resume_cmdq_bh,
1157*7c092f17SRobert Beckett                                     g);
1158*7c092f17SRobert Beckett #endif
1159*7c092f17SRobert Beckett 
11609d9e1521SGerd Hoffmann     return 0;
11619d9e1521SGerd Hoffmann }
11629d9e1521SGerd Hoffmann 
11635643cc94SDave Airlie int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g)
11645643cc94SDave Airlie {
11655643cc94SDave Airlie     uint32_t capset2_max_ver, capset2_max_size;
11665643cc94SDave Airlie     virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2,
11675643cc94SDave Airlie                               &capset2_max_ver,
11685643cc94SDave Airlie                               &capset2_max_size);
11695643cc94SDave Airlie 
11705643cc94SDave Airlie     return capset2_max_ver ? 2 : 1;
11715643cc94SDave Airlie }
1172