xref: /qemu/hw/display/virtio-gpu-udmabuf.c (revision ca61e75071c647cf93b3161a228c6a54178cd58c)
1  /*
2   * Virtio GPU Device
3   *
4   * Copyright Red Hat, Inc. 2013-2014
5   *
6   * Authors:
7   *     Dave Airlie <airlied@redhat.com>
8   *     Gerd Hoffmann <kraxel@redhat.com>
9   *
10   * This work is licensed under the terms of the GNU GPL, version 2 or later.
11   * See the COPYING file in the top-level directory.
12   */
13  
14  #include "qemu/osdep.h"
15  #include "qemu/units.h"
16  #include "qemu/iov.h"
17  #include "ui/console.h"
18  #include "hw/virtio/virtio-gpu.h"
19  #include "hw/virtio/virtio-gpu-pixman.h"
20  #include "trace.h"
21  #include "exec/ramblock.h"
22  #include "sysemu/hostmem.h"
23  #include <sys/ioctl.h>
24  #include <fcntl.h>
25  #include <linux/memfd.h>
26  #include "qemu/memfd.h"
27  #include "standard-headers/linux/udmabuf.h"
28  
29  static void virtio_gpu_create_udmabuf(struct virtio_gpu_simple_resource *res)
30  {
31      struct udmabuf_create_list *list;
32      RAMBlock *rb;
33      ram_addr_t offset;
34      int udmabuf, i;
35  
36      udmabuf = udmabuf_fd();
37      if (udmabuf < 0) {
38          return;
39      }
40  
41      list = g_malloc0(sizeof(struct udmabuf_create_list) +
42                       sizeof(struct udmabuf_create_item) * res->iov_cnt);
43  
44      for (i = 0; i < res->iov_cnt; i++) {
45          rcu_read_lock();
46          rb = qemu_ram_block_from_host(res->iov[i].iov_base, false, &offset);
47          rcu_read_unlock();
48  
49          if (!rb || rb->fd < 0) {
50              g_free(list);
51              return;
52          }
53  
54          list->list[i].memfd  = rb->fd;
55          list->list[i].offset = offset;
56          list->list[i].size   = res->iov[i].iov_len;
57      }
58  
59      list->count = res->iov_cnt;
60      list->flags = UDMABUF_FLAGS_CLOEXEC;
61  
62      res->dmabuf_fd = ioctl(udmabuf, UDMABUF_CREATE_LIST, list);
63      if (res->dmabuf_fd < 0) {
64          warn_report("%s: UDMABUF_CREATE_LIST: %s", __func__,
65                      strerror(errno));
66      }
67      g_free(list);
68  }
69  
70  static void virtio_gpu_remap_udmabuf(struct virtio_gpu_simple_resource *res)
71  {
72      res->remapped = mmap(NULL, res->blob_size, PROT_READ,
73                           MAP_SHARED, res->dmabuf_fd, 0);
74      if (res->remapped == MAP_FAILED) {
75          warn_report("%s: dmabuf mmap failed: %s", __func__,
76                      strerror(errno));
77          res->remapped = NULL;
78      }
79  }
80  
81  static void virtio_gpu_destroy_udmabuf(struct virtio_gpu_simple_resource *res)
82  {
83      if (res->remapped) {
84          munmap(res->remapped, res->blob_size);
85          res->remapped = NULL;
86      }
87      if (res->dmabuf_fd >= 0) {
88          close(res->dmabuf_fd);
89          res->dmabuf_fd = -1;
90      }
91  }
92  
93  static int find_memory_backend_type(Object *obj, void *opaque)
94  {
95      bool *memfd_backend = opaque;
96      int ret;
97  
98      if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
99          HostMemoryBackend *backend = MEMORY_BACKEND(obj);
100          RAMBlock *rb = backend->mr.ram_block;
101  
102          if (rb && rb->fd > 0) {
103              ret = fcntl(rb->fd, F_GET_SEALS);
104              if (ret > 0) {
105                  *memfd_backend = true;
106              }
107          }
108      }
109  
110      return 0;
111  }
112  
113  bool virtio_gpu_have_udmabuf(void)
114  {
115      Object *memdev_root;
116      int udmabuf;
117      bool memfd_backend = false;
118  
119      udmabuf = udmabuf_fd();
120      if (udmabuf < 0) {
121          return false;
122      }
123  
124      memdev_root = object_resolve_path("/objects", NULL);
125      object_child_foreach(memdev_root, find_memory_backend_type, &memfd_backend);
126  
127      return memfd_backend;
128  }
129  
130  void virtio_gpu_init_udmabuf(struct virtio_gpu_simple_resource *res)
131  {
132      void *pdata = NULL;
133  
134      res->dmabuf_fd = -1;
135      if (res->iov_cnt == 1) {
136          pdata = res->iov[0].iov_base;
137      } else {
138          virtio_gpu_create_udmabuf(res);
139          if (res->dmabuf_fd < 0) {
140              return;
141          }
142          virtio_gpu_remap_udmabuf(res);
143          if (!res->remapped) {
144              return;
145          }
146          pdata = res->remapped;
147      }
148  
149      res->blob = pdata;
150  }
151  
152  void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res)
153  {
154      if (res->remapped) {
155          virtio_gpu_destroy_udmabuf(res);
156      }
157  }
158  
159  static void virtio_gpu_free_dmabuf(VirtIOGPU *g, VGPUDMABuf *dmabuf)
160  {
161      struct virtio_gpu_scanout *scanout;
162  
163      scanout = &g->parent_obj.scanout[dmabuf->scanout_id];
164      dpy_gl_release_dmabuf(scanout->con, &dmabuf->buf);
165      QTAILQ_REMOVE(&g->dmabuf.bufs, dmabuf, next);
166      g_free(dmabuf);
167  }
168  
169  static VGPUDMABuf
170  *virtio_gpu_create_dmabuf(VirtIOGPU *g,
171                            uint32_t scanout_id,
172                            struct virtio_gpu_simple_resource *res,
173                            struct virtio_gpu_framebuffer *fb,
174                            struct virtio_gpu_rect *r)
175  {
176      VGPUDMABuf *dmabuf;
177  
178      if (res->dmabuf_fd < 0) {
179          return NULL;
180      }
181  
182      dmabuf = g_new0(VGPUDMABuf, 1);
183      dmabuf->buf.width = fb->width;
184      dmabuf->buf.height = fb->height;
185      dmabuf->buf.stride = fb->stride;
186      dmabuf->buf.x = r->x;
187      dmabuf->buf.y = r->y;
188      dmabuf->buf.scanout_width = r->width;
189      dmabuf->buf.scanout_height = r->height;
190      dmabuf->buf.fourcc = qemu_pixman_to_drm_format(fb->format);
191      dmabuf->buf.fd = res->dmabuf_fd;
192      dmabuf->buf.allow_fences = true;
193      dmabuf->buf.draw_submitted = false;
194      dmabuf->scanout_id = scanout_id;
195      QTAILQ_INSERT_HEAD(&g->dmabuf.bufs, dmabuf, next);
196  
197      return dmabuf;
198  }
199  
200  int virtio_gpu_update_dmabuf(VirtIOGPU *g,
201                               uint32_t scanout_id,
202                               struct virtio_gpu_simple_resource *res,
203                               struct virtio_gpu_framebuffer *fb,
204                               struct virtio_gpu_rect *r)
205  {
206      struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
207      VGPUDMABuf *new_primary, *old_primary = NULL;
208  
209      new_primary = virtio_gpu_create_dmabuf(g, scanout_id, res, fb, r);
210      if (!new_primary) {
211          return -EINVAL;
212      }
213  
214      if (g->dmabuf.primary[scanout_id]) {
215          old_primary = g->dmabuf.primary[scanout_id];
216      }
217  
218      g->dmabuf.primary[scanout_id] = new_primary;
219      qemu_console_resize(scanout->con,
220                          new_primary->buf.scanout_width,
221                          new_primary->buf.scanout_height);
222      dpy_gl_scanout_dmabuf(scanout->con, &new_primary->buf);
223  
224      if (old_primary) {
225          virtio_gpu_free_dmabuf(g, old_primary);
226      }
227  
228      return 0;
229  }
230