xref: /qemu/hw/display/virtio-gpu-virgl.c (revision 333b3e5fab751cce9f077b827563296c797ff399)
1  /*
2   * Virtio GPU Device
3   *
4   * Copyright Red Hat, Inc. 2013-2014
5   *
6   * Authors:
7   *     Dave Airlie <airlied@redhat.com>
8   *     Gerd Hoffmann <kraxel@redhat.com>
9   *
10   * This work is licensed under the terms of the GNU GPL, version 2 or later.
11   * See the COPYING file in the top-level directory.
12   */
13  
14  #include "qemu/osdep.h"
15  #include "qemu/error-report.h"
16  #include "qemu/iov.h"
17  #include "trace.h"
18  #include "hw/virtio/virtio.h"
19  #include "hw/virtio/virtio-gpu.h"
20  
21  #include "ui/egl-helpers.h"
22  
23  #include <virglrenderer.h>
24  
25  #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
26  static void *
27  virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
28  {
29      return qemu_egl_display;
30  }
31  #endif
32  
33  static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
34                                           struct virtio_gpu_ctrl_command *cmd)
35  {
36      struct virtio_gpu_resource_create_2d c2d;
37      struct virgl_renderer_resource_create_args args;
38  
39      VIRTIO_GPU_FILL_CMD(c2d);
40      trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
41                                         c2d.width, c2d.height);
42  
43      args.handle = c2d.resource_id;
44      args.target = 2;
45      args.format = c2d.format;
46      args.bind = (1 << 1);
47      args.width = c2d.width;
48      args.height = c2d.height;
49      args.depth = 1;
50      args.array_size = 1;
51      args.last_level = 0;
52      args.nr_samples = 0;
53      args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
54      virgl_renderer_resource_create(&args, NULL, 0);
55  }
56  
57  static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
58                                           struct virtio_gpu_ctrl_command *cmd)
59  {
60      struct virtio_gpu_resource_create_3d c3d;
61      struct virgl_renderer_resource_create_args args;
62  
63      VIRTIO_GPU_FILL_CMD(c3d);
64      trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
65                                         c3d.width, c3d.height, c3d.depth);
66  
67      args.handle = c3d.resource_id;
68      args.target = c3d.target;
69      args.format = c3d.format;
70      args.bind = c3d.bind;
71      args.width = c3d.width;
72      args.height = c3d.height;
73      args.depth = c3d.depth;
74      args.array_size = c3d.array_size;
75      args.last_level = c3d.last_level;
76      args.nr_samples = c3d.nr_samples;
77      args.flags = c3d.flags;
78      virgl_renderer_resource_create(&args, NULL, 0);
79  }
80  
81  static void virgl_cmd_resource_unref(VirtIOGPU *g,
82                                       struct virtio_gpu_ctrl_command *cmd)
83  {
84      struct virtio_gpu_resource_unref unref;
85      struct iovec *res_iovs = NULL;
86      int num_iovs = 0;
87  
88      VIRTIO_GPU_FILL_CMD(unref);
89      trace_virtio_gpu_cmd_res_unref(unref.resource_id);
90  
91      virgl_renderer_resource_detach_iov(unref.resource_id,
92                                         &res_iovs,
93                                         &num_iovs);
94      if (res_iovs != NULL && num_iovs != 0) {
95          virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
96      }
97      virgl_renderer_resource_unref(unref.resource_id);
98  }
99  
100  static void virgl_cmd_context_create(VirtIOGPU *g,
101                                       struct virtio_gpu_ctrl_command *cmd)
102  {
103      struct virtio_gpu_ctx_create cc;
104  
105      VIRTIO_GPU_FILL_CMD(cc);
106      trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id,
107                                      cc.debug_name);
108  
109      virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen,
110                                    cc.debug_name);
111  }
112  
113  static void virgl_cmd_context_destroy(VirtIOGPU *g,
114                                        struct virtio_gpu_ctrl_command *cmd)
115  {
116      struct virtio_gpu_ctx_destroy cd;
117  
118      VIRTIO_GPU_FILL_CMD(cd);
119      trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id);
120  
121      virgl_renderer_context_destroy(cd.hdr.ctx_id);
122  }
123  
124  static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y,
125                                  int width, int height)
126  {
127      if (!g->parent_obj.scanout[idx].con) {
128          return;
129      }
130  
131      dpy_gl_update(g->parent_obj.scanout[idx].con, x, y, width, height);
132  }
133  
134  static void virgl_cmd_resource_flush(VirtIOGPU *g,
135                                       struct virtio_gpu_ctrl_command *cmd)
136  {
137      struct virtio_gpu_resource_flush rf;
138      int i;
139  
140      VIRTIO_GPU_FILL_CMD(rf);
141      trace_virtio_gpu_cmd_res_flush(rf.resource_id,
142                                     rf.r.width, rf.r.height, rf.r.x, rf.r.y);
143  
144      for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
145          if (g->parent_obj.scanout[i].resource_id != rf.resource_id) {
146              continue;
147          }
148          virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
149      }
150  }
151  
152  static void virgl_cmd_set_scanout(VirtIOGPU *g,
153                                    struct virtio_gpu_ctrl_command *cmd)
154  {
155      struct virtio_gpu_set_scanout ss;
156      int ret;
157  
158      VIRTIO_GPU_FILL_CMD(ss);
159      trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
160                                       ss.r.width, ss.r.height, ss.r.x, ss.r.y);
161  
162      if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
163          qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
164                        __func__, ss.scanout_id);
165          cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
166          return;
167      }
168      g->parent_obj.enable = 1;
169  
170      if (ss.resource_id && ss.r.width && ss.r.height) {
171          struct virgl_renderer_resource_info info;
172          void *d3d_tex2d = NULL;
173  
174  #ifdef HAVE_VIRGL_D3D_INFO_EXT
175          struct virgl_renderer_resource_info_ext ext;
176          memset(&ext, 0, sizeof(ext));
177          ret = virgl_renderer_resource_get_info_ext(ss.resource_id, &ext);
178          info = ext.base;
179          d3d_tex2d = ext.d3d_tex2d;
180  #else
181          memset(&info, 0, sizeof(info));
182          ret = virgl_renderer_resource_get_info(ss.resource_id, &info);
183  #endif
184          if (ret) {
185              qemu_log_mask(LOG_GUEST_ERROR,
186                            "%s: illegal resource specified %d\n",
187                            __func__, ss.resource_id);
188              cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
189              return;
190          }
191          qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con,
192                              ss.r.width, ss.r.height);
193          virgl_renderer_force_ctx_0();
194          dpy_gl_scanout_texture(
195              g->parent_obj.scanout[ss.scanout_id].con, info.tex_id,
196              info.flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP,
197              info.width, info.height,
198              ss.r.x, ss.r.y, ss.r.width, ss.r.height,
199              d3d_tex2d);
200      } else {
201          dpy_gfx_replace_surface(
202              g->parent_obj.scanout[ss.scanout_id].con, NULL);
203          dpy_gl_scanout_disable(g->parent_obj.scanout[ss.scanout_id].con);
204      }
205      g->parent_obj.scanout[ss.scanout_id].resource_id = ss.resource_id;
206  }
207  
208  static void virgl_cmd_submit_3d(VirtIOGPU *g,
209                                  struct virtio_gpu_ctrl_command *cmd)
210  {
211      struct virtio_gpu_cmd_submit cs;
212      void *buf;
213      size_t s;
214  
215      VIRTIO_GPU_FILL_CMD(cs);
216      trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size);
217  
218      buf = g_malloc(cs.size);
219      s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
220                     sizeof(cs), buf, cs.size);
221      if (s != cs.size) {
222          qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)",
223                        __func__, s, cs.size);
224          cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
225          goto out;
226      }
227  
228      if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
229          g->stats.req_3d++;
230          g->stats.bytes_3d += cs.size;
231      }
232  
233      virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4);
234  
235  out:
236      g_free(buf);
237  }
238  
239  static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g,
240                                            struct virtio_gpu_ctrl_command *cmd)
241  {
242      struct virtio_gpu_transfer_to_host_2d t2d;
243      struct virtio_gpu_box box;
244  
245      VIRTIO_GPU_FILL_CMD(t2d);
246      trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
247  
248      box.x = t2d.r.x;
249      box.y = t2d.r.y;
250      box.z = 0;
251      box.w = t2d.r.width;
252      box.h = t2d.r.height;
253      box.d = 1;
254  
255      virgl_renderer_transfer_write_iov(t2d.resource_id,
256                                        0,
257                                        0,
258                                        0,
259                                        0,
260                                        (struct virgl_box *)&box,
261                                        t2d.offset, NULL, 0);
262  }
263  
264  static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g,
265                                            struct virtio_gpu_ctrl_command *cmd)
266  {
267      struct virtio_gpu_transfer_host_3d t3d;
268  
269      VIRTIO_GPU_FILL_CMD(t3d);
270      trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id);
271  
272      virgl_renderer_transfer_write_iov(t3d.resource_id,
273                                        t3d.hdr.ctx_id,
274                                        t3d.level,
275                                        t3d.stride,
276                                        t3d.layer_stride,
277                                        (struct virgl_box *)&t3d.box,
278                                        t3d.offset, NULL, 0);
279  }
280  
281  static void
282  virgl_cmd_transfer_from_host_3d(VirtIOGPU *g,
283                                  struct virtio_gpu_ctrl_command *cmd)
284  {
285      struct virtio_gpu_transfer_host_3d tf3d;
286  
287      VIRTIO_GPU_FILL_CMD(tf3d);
288      trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id);
289  
290      virgl_renderer_transfer_read_iov(tf3d.resource_id,
291                                       tf3d.hdr.ctx_id,
292                                       tf3d.level,
293                                       tf3d.stride,
294                                       tf3d.layer_stride,
295                                       (struct virgl_box *)&tf3d.box,
296                                       tf3d.offset, NULL, 0);
297  }
298  
299  
300  static void virgl_resource_attach_backing(VirtIOGPU *g,
301                                            struct virtio_gpu_ctrl_command *cmd)
302  {
303      struct virtio_gpu_resource_attach_backing att_rb;
304      struct iovec *res_iovs;
305      uint32_t res_niov;
306      int ret;
307  
308      VIRTIO_GPU_FILL_CMD(att_rb);
309      trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
310  
311      ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb),
312                                          cmd, NULL, &res_iovs, &res_niov);
313      if (ret != 0) {
314          cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
315          return;
316      }
317  
318      ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
319                                               res_iovs, res_niov);
320  
321      if (ret != 0)
322          virtio_gpu_cleanup_mapping_iov(g, res_iovs, res_niov);
323  }
324  
325  static void virgl_resource_detach_backing(VirtIOGPU *g,
326                                            struct virtio_gpu_ctrl_command *cmd)
327  {
328      struct virtio_gpu_resource_detach_backing detach_rb;
329      struct iovec *res_iovs = NULL;
330      int num_iovs = 0;
331  
332      VIRTIO_GPU_FILL_CMD(detach_rb);
333      trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id);
334  
335      virgl_renderer_resource_detach_iov(detach_rb.resource_id,
336                                         &res_iovs,
337                                         &num_iovs);
338      if (res_iovs == NULL || num_iovs == 0) {
339          return;
340      }
341      virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
342  }
343  
344  
345  static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g,
346                                            struct virtio_gpu_ctrl_command *cmd)
347  {
348      struct virtio_gpu_ctx_resource att_res;
349  
350      VIRTIO_GPU_FILL_CMD(att_res);
351      trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id,
352                                          att_res.resource_id);
353  
354      virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id);
355  }
356  
357  static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g,
358                                            struct virtio_gpu_ctrl_command *cmd)
359  {
360      struct virtio_gpu_ctx_resource det_res;
361  
362      VIRTIO_GPU_FILL_CMD(det_res);
363      trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id,
364                                          det_res.resource_id);
365  
366      virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id);
367  }
368  
369  static void virgl_cmd_get_capset_info(VirtIOGPU *g,
370                                        struct virtio_gpu_ctrl_command *cmd)
371  {
372      struct virtio_gpu_get_capset_info info;
373      struct virtio_gpu_resp_capset_info resp;
374  
375      VIRTIO_GPU_FILL_CMD(info);
376  
377      memset(&resp, 0, sizeof(resp));
378      if (info.capset_index == 0) {
379          resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL;
380          virgl_renderer_get_cap_set(resp.capset_id,
381                                     &resp.capset_max_version,
382                                     &resp.capset_max_size);
383      } else if (info.capset_index == 1) {
384          resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2;
385          virgl_renderer_get_cap_set(resp.capset_id,
386                                     &resp.capset_max_version,
387                                     &resp.capset_max_size);
388      } else {
389          resp.capset_max_version = 0;
390          resp.capset_max_size = 0;
391      }
392      resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
393      virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
394  }
395  
396  static void virgl_cmd_get_capset(VirtIOGPU *g,
397                                   struct virtio_gpu_ctrl_command *cmd)
398  {
399      struct virtio_gpu_get_capset gc;
400      struct virtio_gpu_resp_capset *resp;
401      uint32_t max_ver, max_size;
402      VIRTIO_GPU_FILL_CMD(gc);
403  
404      virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
405                                 &max_size);
406      if (!max_size) {
407          cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
408          return;
409      }
410  
411      resp = g_malloc0(sizeof(*resp) + max_size);
412      resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
413      virgl_renderer_fill_caps(gc.capset_id,
414                               gc.capset_version,
415                               (void *)resp->capset_data);
416      virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
417      g_free(resp);
418  }
419  
420  void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
421                                        struct virtio_gpu_ctrl_command *cmd)
422  {
423      VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
424  
425      virgl_renderer_force_ctx_0();
426      switch (cmd->cmd_hdr.type) {
427      case VIRTIO_GPU_CMD_CTX_CREATE:
428          virgl_cmd_context_create(g, cmd);
429          break;
430      case VIRTIO_GPU_CMD_CTX_DESTROY:
431          virgl_cmd_context_destroy(g, cmd);
432          break;
433      case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
434          virgl_cmd_create_resource_2d(g, cmd);
435          break;
436      case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
437          virgl_cmd_create_resource_3d(g, cmd);
438          break;
439      case VIRTIO_GPU_CMD_SUBMIT_3D:
440          virgl_cmd_submit_3d(g, cmd);
441          break;
442      case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
443          virgl_cmd_transfer_to_host_2d(g, cmd);
444          break;
445      case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
446          virgl_cmd_transfer_to_host_3d(g, cmd);
447          break;
448      case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
449          virgl_cmd_transfer_from_host_3d(g, cmd);
450          break;
451      case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
452          virgl_resource_attach_backing(g, cmd);
453          break;
454      case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
455          virgl_resource_detach_backing(g, cmd);
456          break;
457      case VIRTIO_GPU_CMD_SET_SCANOUT:
458          virgl_cmd_set_scanout(g, cmd);
459          break;
460      case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
461          virgl_cmd_resource_flush(g, cmd);
462          break;
463      case VIRTIO_GPU_CMD_RESOURCE_UNREF:
464          virgl_cmd_resource_unref(g, cmd);
465          break;
466      case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
467          /* TODO add security */
468          virgl_cmd_ctx_attach_resource(g, cmd);
469          break;
470      case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
471          /* TODO add security */
472          virgl_cmd_ctx_detach_resource(g, cmd);
473          break;
474      case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
475          virgl_cmd_get_capset_info(g, cmd);
476          break;
477      case VIRTIO_GPU_CMD_GET_CAPSET:
478          virgl_cmd_get_capset(g, cmd);
479          break;
480      case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
481          virtio_gpu_get_display_info(g, cmd);
482          break;
483      case VIRTIO_GPU_CMD_GET_EDID:
484          virtio_gpu_get_edid(g, cmd);
485          break;
486      default:
487          cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
488          break;
489      }
490  
491      if (cmd->finished) {
492          return;
493      }
494      if (cmd->error) {
495          fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n", __func__,
496                  cmd->cmd_hdr.type, cmd->error);
497          virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error);
498          return;
499      }
500      if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
501          virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
502          return;
503      }
504  
505      trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
506      virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
507  }
508  
509  static void virgl_write_fence(void *opaque, uint32_t fence)
510  {
511      VirtIOGPU *g = opaque;
512      struct virtio_gpu_ctrl_command *cmd, *tmp;
513  
514      QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
515          /*
516           * the guest can end up emitting fences out of order
517           * so we should check all fenced cmds not just the first one.
518           */
519          if (cmd->cmd_hdr.fence_id > fence) {
520              continue;
521          }
522          trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
523          virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
524          QTAILQ_REMOVE(&g->fenceq, cmd, next);
525          g_free(cmd);
526          g->inflight--;
527          if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
528              fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
529          }
530      }
531  }
532  
533  static virgl_renderer_gl_context
534  virgl_create_context(void *opaque, int scanout_idx,
535                       struct virgl_renderer_gl_ctx_param *params)
536  {
537      VirtIOGPU *g = opaque;
538      QEMUGLContext ctx;
539      QEMUGLParams qparams;
540  
541      qparams.major_ver = params->major_ver;
542      qparams.minor_ver = params->minor_ver;
543  
544      ctx = dpy_gl_ctx_create(g->parent_obj.scanout[scanout_idx].con, &qparams);
545      return (virgl_renderer_gl_context)ctx;
546  }
547  
548  static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx)
549  {
550      VirtIOGPU *g = opaque;
551      QEMUGLContext qctx = (QEMUGLContext)ctx;
552  
553      dpy_gl_ctx_destroy(g->parent_obj.scanout[0].con, qctx);
554  }
555  
556  static int virgl_make_context_current(void *opaque, int scanout_idx,
557                                        virgl_renderer_gl_context ctx)
558  {
559      VirtIOGPU *g = opaque;
560      QEMUGLContext qctx = (QEMUGLContext)ctx;
561  
562      return dpy_gl_ctx_make_current(g->parent_obj.scanout[scanout_idx].con,
563                                     qctx);
564  }
565  
566  static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = {
567      .version             = 1,
568      .write_fence         = virgl_write_fence,
569      .create_gl_context   = virgl_create_context,
570      .destroy_gl_context  = virgl_destroy_context,
571      .make_current        = virgl_make_context_current,
572  };
573  
574  static void virtio_gpu_print_stats(void *opaque)
575  {
576      VirtIOGPU *g = opaque;
577  
578      if (g->stats.requests) {
579          fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
580                  g->stats.requests,
581                  g->stats.max_inflight,
582                  g->stats.req_3d,
583                  g->stats.bytes_3d);
584          g->stats.requests     = 0;
585          g->stats.max_inflight = 0;
586          g->stats.req_3d       = 0;
587          g->stats.bytes_3d     = 0;
588      } else {
589          fprintf(stderr, "stats: idle\r");
590      }
591      timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
592  }
593  
594  static void virtio_gpu_fence_poll(void *opaque)
595  {
596      VirtIOGPU *g = opaque;
597  
598      virgl_renderer_poll();
599      virtio_gpu_process_cmdq(g);
600      if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) {
601          timer_mod(g->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10);
602      }
603  }
604  
605  void virtio_gpu_virgl_fence_poll(VirtIOGPU *g)
606  {
607      virtio_gpu_fence_poll(g);
608  }
609  
610  void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g)
611  {
612      int i;
613  
614      for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
615          dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
616          dpy_gl_scanout_disable(g->parent_obj.scanout[i].con);
617      }
618  }
619  
620  void virtio_gpu_virgl_reset(VirtIOGPU *g)
621  {
622      virgl_renderer_reset();
623  }
624  
625  int virtio_gpu_virgl_init(VirtIOGPU *g)
626  {
627      int ret;
628      uint32_t flags = 0;
629  
630  #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
631      if (qemu_egl_display) {
632          virtio_gpu_3d_cbs.version = 4;
633          virtio_gpu_3d_cbs.get_egl_display = virgl_get_egl_display;
634      }
635  #endif
636  #ifdef VIRGL_RENDERER_D3D11_SHARE_TEXTURE
637      if (qemu_egl_angle_d3d) {
638          flags |= VIRGL_RENDERER_D3D11_SHARE_TEXTURE;
639      }
640  #endif
641  
642      ret = virgl_renderer_init(g, flags, &virtio_gpu_3d_cbs);
643      if (ret != 0) {
644          error_report("virgl could not be initialized: %d", ret);
645          return ret;
646      }
647  
648      g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL,
649                                   virtio_gpu_fence_poll, g);
650  
651      if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
652          g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL,
653                                        virtio_gpu_print_stats, g);
654          timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
655      }
656      return 0;
657  }
658  
659  int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g)
660  {
661      uint32_t capset2_max_ver, capset2_max_size;
662      virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2,
663                                &capset2_max_ver,
664                                &capset2_max_size);
665  
666      return capset2_max_ver ? 2 : 1;
667  }
668