1d52c454aSMarc-André Lureau /* 2d52c454aSMarc-André Lureau * Virtio vhost-user GPU Device 3d52c454aSMarc-André Lureau * 4d52c454aSMarc-André Lureau * Copyright Red Hat, Inc. 2013-2018 5d52c454aSMarc-André Lureau * 6d52c454aSMarc-André Lureau * Authors: 7d52c454aSMarc-André Lureau * Dave Airlie <airlied@redhat.com> 8d52c454aSMarc-André Lureau * Gerd Hoffmann <kraxel@redhat.com> 9d52c454aSMarc-André Lureau * Marc-André Lureau <marcandre.lureau@redhat.com> 10d52c454aSMarc-André Lureau * 11d52c454aSMarc-André Lureau * This work is licensed under the terms of the GNU GPL, version 2 or later. 12d52c454aSMarc-André Lureau * See the COPYING file in the top-level directory. 13d52c454aSMarc-André Lureau */ 14d52c454aSMarc-André Lureau #include "qemu/osdep.h" 15d52c454aSMarc-André Lureau #include "qemu/drm.h" 16d52c454aSMarc-André Lureau #include "qapi/error.h" 17d52c454aSMarc-André Lureau #include "qemu/sockets.h" 18d52c454aSMarc-André Lureau 19d52c454aSMarc-André Lureau #include <pixman.h> 20d52c454aSMarc-André Lureau #include <glib-unix.h> 21d52c454aSMarc-André Lureau 22d52c454aSMarc-André Lureau #include "vugpu.h" 23d52c454aSMarc-André Lureau #include "hw/virtio/virtio-gpu-bswap.h" 24d52c454aSMarc-André Lureau #include "hw/virtio/virtio-gpu-pixman.h" 25d52c454aSMarc-André Lureau #include "virgl.h" 26d52c454aSMarc-André Lureau #include "vugbm.h" 27d52c454aSMarc-André Lureau 286f5fd837SStefan Hajnoczi enum { 296f5fd837SStefan Hajnoczi VHOST_USER_GPU_MAX_QUEUES = 2, 306f5fd837SStefan Hajnoczi }; 316f5fd837SStefan Hajnoczi 32d52c454aSMarc-André Lureau struct virtio_gpu_simple_resource { 33d52c454aSMarc-André Lureau uint32_t resource_id; 34d52c454aSMarc-André Lureau uint32_t width; 35d52c454aSMarc-André Lureau uint32_t height; 36d52c454aSMarc-André Lureau uint32_t format; 37d52c454aSMarc-André Lureau struct iovec *iov; 38d52c454aSMarc-André Lureau unsigned int iov_cnt; 39d52c454aSMarc-André Lureau uint32_t scanout_bitmask; 40d52c454aSMarc-André Lureau pixman_image_t *image; 41d52c454aSMarc-André Lureau struct vugbm_buffer buffer; 42d52c454aSMarc-André Lureau QTAILQ_ENTRY(virtio_gpu_simple_resource) next; 43d52c454aSMarc-André Lureau }; 44d52c454aSMarc-André Lureau 45d52c454aSMarc-André Lureau static gboolean opt_print_caps; 46d52c454aSMarc-André Lureau static int opt_fdnum = -1; 47d52c454aSMarc-André Lureau static char *opt_socket_path; 48d52c454aSMarc-André Lureau static char *opt_render_node; 49d52c454aSMarc-André Lureau static gboolean opt_virgl; 50d52c454aSMarc-André Lureau 51d52c454aSMarc-André Lureau static void vg_handle_ctrl(VuDev *dev, int qidx); 52d52c454aSMarc-André Lureau 53d52c454aSMarc-André Lureau static const char * 54d52c454aSMarc-André Lureau vg_cmd_to_string(int cmd) 55d52c454aSMarc-André Lureau { 56d52c454aSMarc-André Lureau #define CMD(cmd) [cmd] = #cmd 57d52c454aSMarc-André Lureau static const char *vg_cmd_str[] = { 58d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_UNDEFINED), 59d52c454aSMarc-André Lureau 60d52c454aSMarc-André Lureau /* 2d commands */ 61d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_GET_DISPLAY_INFO), 62d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D), 63d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_RESOURCE_UNREF), 64d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_SET_SCANOUT), 65d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_RESOURCE_FLUSH), 66d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D), 67d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING), 68d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING), 69d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_GET_CAPSET_INFO), 70d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_GET_CAPSET), 71d52c454aSMarc-André Lureau 72d52c454aSMarc-André Lureau /* 3d commands */ 73d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_CTX_CREATE), 74d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_CTX_DESTROY), 75d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE), 76d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE), 77d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D), 78d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D), 79d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D), 80d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_SUBMIT_3D), 81d52c454aSMarc-André Lureau 82d52c454aSMarc-André Lureau /* cursor commands */ 83d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_UPDATE_CURSOR), 84d52c454aSMarc-André Lureau CMD(VIRTIO_GPU_CMD_MOVE_CURSOR), 85d52c454aSMarc-André Lureau }; 86d52c454aSMarc-André Lureau #undef REQ 87d52c454aSMarc-André Lureau 88d52c454aSMarc-André Lureau if (cmd >= 0 && cmd < G_N_ELEMENTS(vg_cmd_str)) { 89d52c454aSMarc-André Lureau return vg_cmd_str[cmd]; 90d52c454aSMarc-André Lureau } else { 91d52c454aSMarc-André Lureau return "unknown"; 92d52c454aSMarc-André Lureau } 93d52c454aSMarc-André Lureau } 94d52c454aSMarc-André Lureau 95d52c454aSMarc-André Lureau static int 96d52c454aSMarc-André Lureau vg_sock_fd_read(int sock, void *buf, ssize_t buflen) 97d52c454aSMarc-André Lureau { 98d52c454aSMarc-André Lureau int ret; 99d52c454aSMarc-André Lureau 100d52c454aSMarc-André Lureau do { 101d52c454aSMarc-André Lureau ret = read(sock, buf, buflen); 102d52c454aSMarc-André Lureau } while (ret < 0 && (errno == EINTR || errno == EAGAIN)); 103d52c454aSMarc-André Lureau 104d52c454aSMarc-André Lureau g_warn_if_fail(ret == buflen); 105d52c454aSMarc-André Lureau return ret; 106d52c454aSMarc-André Lureau } 107d52c454aSMarc-André Lureau 108d52c454aSMarc-André Lureau static void 109d52c454aSMarc-André Lureau vg_sock_fd_close(VuGpu *g) 110d52c454aSMarc-André Lureau { 111d52c454aSMarc-André Lureau if (g->sock_fd >= 0) { 112d52c454aSMarc-André Lureau close(g->sock_fd); 113d52c454aSMarc-André Lureau g->sock_fd = -1; 114d52c454aSMarc-André Lureau } 115d52c454aSMarc-André Lureau } 116d52c454aSMarc-André Lureau 117d52c454aSMarc-André Lureau static gboolean 118d52c454aSMarc-André Lureau source_wait_cb(gint fd, GIOCondition condition, gpointer user_data) 119d52c454aSMarc-André Lureau { 120d52c454aSMarc-André Lureau VuGpu *g = user_data; 121d52c454aSMarc-André Lureau 122d52c454aSMarc-André Lureau if (!vg_recv_msg(g, VHOST_USER_GPU_DMABUF_UPDATE, 0, NULL)) { 123d52c454aSMarc-André Lureau return G_SOURCE_CONTINUE; 124d52c454aSMarc-André Lureau } 125d52c454aSMarc-André Lureau 126d52c454aSMarc-André Lureau /* resume */ 1271f83ea8dSMarc-André Lureau g->wait_in = 0; 128d52c454aSMarc-André Lureau vg_handle_ctrl(&g->dev.parent, 0); 129d52c454aSMarc-André Lureau 130d52c454aSMarc-André Lureau return G_SOURCE_REMOVE; 131d52c454aSMarc-André Lureau } 132d52c454aSMarc-André Lureau 133d52c454aSMarc-André Lureau void 134d52c454aSMarc-André Lureau vg_wait_ok(VuGpu *g) 135d52c454aSMarc-André Lureau { 1361f83ea8dSMarc-André Lureau assert(g->wait_in == 0); 1371f83ea8dSMarc-André Lureau g->wait_in = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP, 138d52c454aSMarc-André Lureau source_wait_cb, g); 139d52c454aSMarc-André Lureau } 140d52c454aSMarc-André Lureau 141d52c454aSMarc-André Lureau static int 142d52c454aSMarc-André Lureau vg_sock_fd_write(int sock, const void *buf, ssize_t buflen, int fd) 143d52c454aSMarc-André Lureau { 144d52c454aSMarc-André Lureau ssize_t ret; 145c715130aSMarc-André Lureau struct iovec iov = { 146c715130aSMarc-André Lureau .iov_base = (void *)buf, 147c715130aSMarc-André Lureau .iov_len = buflen, 148c715130aSMarc-André Lureau }; 149c715130aSMarc-André Lureau struct msghdr msg = { 150c715130aSMarc-André Lureau .msg_iov = &iov, 151c715130aSMarc-André Lureau .msg_iovlen = 1, 152c715130aSMarc-André Lureau }; 153d52c454aSMarc-André Lureau union { 154d52c454aSMarc-André Lureau struct cmsghdr cmsghdr; 155d52c454aSMarc-André Lureau char control[CMSG_SPACE(sizeof(int))]; 156d52c454aSMarc-André Lureau } cmsgu; 157d52c454aSMarc-André Lureau struct cmsghdr *cmsg; 158d52c454aSMarc-André Lureau 159d52c454aSMarc-André Lureau if (fd != -1) { 160d52c454aSMarc-André Lureau msg.msg_control = cmsgu.control; 161d52c454aSMarc-André Lureau msg.msg_controllen = sizeof(cmsgu.control); 162d52c454aSMarc-André Lureau 163d52c454aSMarc-André Lureau cmsg = CMSG_FIRSTHDR(&msg); 164d52c454aSMarc-André Lureau cmsg->cmsg_len = CMSG_LEN(sizeof(int)); 165d52c454aSMarc-André Lureau cmsg->cmsg_level = SOL_SOCKET; 166d52c454aSMarc-André Lureau cmsg->cmsg_type = SCM_RIGHTS; 167d52c454aSMarc-André Lureau 168d52c454aSMarc-André Lureau *((int *)CMSG_DATA(cmsg)) = fd; 169d52c454aSMarc-André Lureau } 170d52c454aSMarc-André Lureau 171d52c454aSMarc-André Lureau do { 172d52c454aSMarc-André Lureau ret = sendmsg(sock, &msg, 0); 173d52c454aSMarc-André Lureau } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); 174d52c454aSMarc-André Lureau 175d52c454aSMarc-André Lureau g_warn_if_fail(ret == buflen); 176d52c454aSMarc-André Lureau return ret; 177d52c454aSMarc-André Lureau } 178d52c454aSMarc-André Lureau 179d52c454aSMarc-André Lureau void 180d52c454aSMarc-André Lureau vg_send_msg(VuGpu *vg, const VhostUserGpuMsg *msg, int fd) 181d52c454aSMarc-André Lureau { 182d52c454aSMarc-André Lureau if (vg_sock_fd_write(vg->sock_fd, msg, 183d52c454aSMarc-André Lureau VHOST_USER_GPU_HDR_SIZE + msg->size, fd) < 0) { 184d52c454aSMarc-André Lureau vg_sock_fd_close(vg); 185d52c454aSMarc-André Lureau } 186d52c454aSMarc-André Lureau } 187d52c454aSMarc-André Lureau 188d52c454aSMarc-André Lureau bool 189d52c454aSMarc-André Lureau vg_recv_msg(VuGpu *g, uint32_t expect_req, uint32_t expect_size, 190d52c454aSMarc-André Lureau gpointer payload) 191d52c454aSMarc-André Lureau { 192d52c454aSMarc-André Lureau uint32_t req, flags, size; 193d52c454aSMarc-André Lureau 194d52c454aSMarc-André Lureau if (vg_sock_fd_read(g->sock_fd, &req, sizeof(req)) < 0 || 195d52c454aSMarc-André Lureau vg_sock_fd_read(g->sock_fd, &flags, sizeof(flags)) < 0 || 196d52c454aSMarc-André Lureau vg_sock_fd_read(g->sock_fd, &size, sizeof(size)) < 0) { 197d52c454aSMarc-André Lureau goto err; 198d52c454aSMarc-André Lureau } 199d52c454aSMarc-André Lureau 200d52c454aSMarc-André Lureau g_return_val_if_fail(req == expect_req, false); 201d52c454aSMarc-André Lureau g_return_val_if_fail(flags & VHOST_USER_GPU_MSG_FLAG_REPLY, false); 202d52c454aSMarc-André Lureau g_return_val_if_fail(size == expect_size, false); 203d52c454aSMarc-André Lureau 204d52c454aSMarc-André Lureau if (size && vg_sock_fd_read(g->sock_fd, payload, size) != size) { 205d52c454aSMarc-André Lureau goto err; 206d52c454aSMarc-André Lureau } 207d52c454aSMarc-André Lureau 208d52c454aSMarc-André Lureau return true; 209d52c454aSMarc-André Lureau 210d52c454aSMarc-André Lureau err: 211d52c454aSMarc-André Lureau vg_sock_fd_close(g); 212d52c454aSMarc-André Lureau return false; 213d52c454aSMarc-André Lureau } 214d52c454aSMarc-André Lureau 215d52c454aSMarc-André Lureau static struct virtio_gpu_simple_resource * 216d52c454aSMarc-André Lureau virtio_gpu_find_resource(VuGpu *g, uint32_t resource_id) 217d52c454aSMarc-André Lureau { 218d52c454aSMarc-André Lureau struct virtio_gpu_simple_resource *res; 219d52c454aSMarc-André Lureau 220d52c454aSMarc-André Lureau QTAILQ_FOREACH(res, &g->reslist, next) { 221d52c454aSMarc-André Lureau if (res->resource_id == resource_id) { 222d52c454aSMarc-André Lureau return res; 223d52c454aSMarc-André Lureau } 224d52c454aSMarc-André Lureau } 225d52c454aSMarc-André Lureau return NULL; 226d52c454aSMarc-André Lureau } 227d52c454aSMarc-André Lureau 228d52c454aSMarc-André Lureau void 229d52c454aSMarc-André Lureau vg_ctrl_response(VuGpu *g, 230d52c454aSMarc-André Lureau struct virtio_gpu_ctrl_command *cmd, 231d52c454aSMarc-André Lureau struct virtio_gpu_ctrl_hdr *resp, 232d52c454aSMarc-André Lureau size_t resp_len) 233d52c454aSMarc-André Lureau { 234d52c454aSMarc-André Lureau size_t s; 235d52c454aSMarc-André Lureau 236d52c454aSMarc-André Lureau if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 237d52c454aSMarc-André Lureau resp->flags |= VIRTIO_GPU_FLAG_FENCE; 238d52c454aSMarc-André Lureau resp->fence_id = cmd->cmd_hdr.fence_id; 239d52c454aSMarc-André Lureau resp->ctx_id = cmd->cmd_hdr.ctx_id; 240d52c454aSMarc-André Lureau } 241d52c454aSMarc-André Lureau virtio_gpu_ctrl_hdr_bswap(resp); 242d52c454aSMarc-André Lureau s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 243d52c454aSMarc-André Lureau if (s != resp_len) { 244d52c454aSMarc-André Lureau g_critical("%s: response size incorrect %zu vs %zu", 245d52c454aSMarc-André Lureau __func__, s, resp_len); 246d52c454aSMarc-André Lureau } 247d52c454aSMarc-André Lureau vu_queue_push(&g->dev.parent, cmd->vq, &cmd->elem, s); 248d52c454aSMarc-André Lureau vu_queue_notify(&g->dev.parent, cmd->vq); 24972e631c6SMarc-André Lureau cmd->state = VG_CMD_STATE_FINISHED; 250d52c454aSMarc-André Lureau } 251d52c454aSMarc-André Lureau 252d52c454aSMarc-André Lureau void 253d52c454aSMarc-André Lureau vg_ctrl_response_nodata(VuGpu *g, 254d52c454aSMarc-André Lureau struct virtio_gpu_ctrl_command *cmd, 255d52c454aSMarc-André Lureau enum virtio_gpu_ctrl_type type) 256d52c454aSMarc-André Lureau { 257d52c454aSMarc-André Lureau struct virtio_gpu_ctrl_hdr resp = { 258d52c454aSMarc-André Lureau .type = type, 259d52c454aSMarc-André Lureau }; 260d52c454aSMarc-André Lureau 261d52c454aSMarc-André Lureau vg_ctrl_response(g, cmd, &resp, sizeof(resp)); 262d52c454aSMarc-André Lureau } 263d52c454aSMarc-André Lureau 264*bd690febSMarc-André Lureau 265*bd690febSMarc-André Lureau static gboolean 266*bd690febSMarc-André Lureau get_display_info_cb(gint fd, GIOCondition condition, gpointer user_data) 267*bd690febSMarc-André Lureau { 268*bd690febSMarc-André Lureau struct virtio_gpu_resp_display_info dpy_info = { {} }; 269*bd690febSMarc-André Lureau VuGpu *vg = user_data; 270*bd690febSMarc-André Lureau struct virtio_gpu_ctrl_command *cmd = QTAILQ_LAST(&vg->fenceq); 271*bd690febSMarc-André Lureau 272*bd690febSMarc-André Lureau g_debug("disp info cb"); 273*bd690febSMarc-André Lureau assert(cmd->cmd_hdr.type == VIRTIO_GPU_CMD_GET_DISPLAY_INFO); 274*bd690febSMarc-André Lureau if (!vg_recv_msg(vg, VHOST_USER_GPU_GET_DISPLAY_INFO, 275*bd690febSMarc-André Lureau sizeof(dpy_info), &dpy_info)) { 276*bd690febSMarc-André Lureau return G_SOURCE_CONTINUE; 277*bd690febSMarc-André Lureau } 278*bd690febSMarc-André Lureau 279*bd690febSMarc-André Lureau QTAILQ_REMOVE(&vg->fenceq, cmd, next); 280*bd690febSMarc-André Lureau vg_ctrl_response(vg, cmd, &dpy_info.hdr, sizeof(dpy_info)); 281*bd690febSMarc-André Lureau 282*bd690febSMarc-André Lureau vg->wait_in = 0; 283*bd690febSMarc-André Lureau vg_handle_ctrl(&vg->dev.parent, 0); 284*bd690febSMarc-André Lureau 285*bd690febSMarc-André Lureau return G_SOURCE_REMOVE; 286*bd690febSMarc-André Lureau } 287*bd690febSMarc-André Lureau 288d52c454aSMarc-André Lureau void 289d52c454aSMarc-André Lureau vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd) 290d52c454aSMarc-André Lureau { 291d52c454aSMarc-André Lureau VhostUserGpuMsg msg = { 292d52c454aSMarc-André Lureau .request = VHOST_USER_GPU_GET_DISPLAY_INFO, 293d52c454aSMarc-André Lureau .size = 0, 294d52c454aSMarc-André Lureau }; 295d52c454aSMarc-André Lureau 2961f83ea8dSMarc-André Lureau assert(vg->wait_in == 0); 297d52c454aSMarc-André Lureau 298d52c454aSMarc-André Lureau vg_send_msg(vg, &msg, -1); 299*bd690febSMarc-André Lureau vg->wait_in = g_unix_fd_add(vg->sock_fd, G_IO_IN | G_IO_HUP, 300*bd690febSMarc-André Lureau get_display_info_cb, vg); 301*bd690febSMarc-André Lureau cmd->state = VG_CMD_STATE_PENDING; 302d52c454aSMarc-André Lureau } 303d52c454aSMarc-André Lureau 304d52c454aSMarc-André Lureau static void 305d52c454aSMarc-André Lureau vg_resource_create_2d(VuGpu *g, 306d52c454aSMarc-André Lureau struct virtio_gpu_ctrl_command *cmd) 307d52c454aSMarc-André Lureau { 308d52c454aSMarc-André Lureau pixman_format_code_t pformat; 309d52c454aSMarc-André Lureau struct virtio_gpu_simple_resource *res; 310d52c454aSMarc-André Lureau struct virtio_gpu_resource_create_2d c2d; 311d52c454aSMarc-André Lureau 312d52c454aSMarc-André Lureau VUGPU_FILL_CMD(c2d); 313d52c454aSMarc-André Lureau virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 314d52c454aSMarc-André Lureau 315d52c454aSMarc-André Lureau if (c2d.resource_id == 0) { 316d52c454aSMarc-André Lureau g_critical("%s: resource id 0 is not allowed", __func__); 317d52c454aSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 318d52c454aSMarc-André Lureau return; 319d52c454aSMarc-André Lureau } 320d52c454aSMarc-André Lureau 321d52c454aSMarc-André Lureau res = virtio_gpu_find_resource(g, c2d.resource_id); 322d52c454aSMarc-André Lureau if (res) { 323d52c454aSMarc-André Lureau g_critical("%s: resource already exists %d", __func__, c2d.resource_id); 324d52c454aSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 325d52c454aSMarc-André Lureau return; 326d52c454aSMarc-André Lureau } 327d52c454aSMarc-André Lureau 328d52c454aSMarc-André Lureau res = g_new0(struct virtio_gpu_simple_resource, 1); 329d52c454aSMarc-André Lureau res->width = c2d.width; 330d52c454aSMarc-André Lureau res->height = c2d.height; 331d52c454aSMarc-André Lureau res->format = c2d.format; 332d52c454aSMarc-André Lureau res->resource_id = c2d.resource_id; 333d52c454aSMarc-André Lureau 334d52c454aSMarc-André Lureau pformat = virtio_gpu_get_pixman_format(c2d.format); 335d52c454aSMarc-André Lureau if (!pformat) { 336d52c454aSMarc-André Lureau g_critical("%s: host couldn't handle guest format %d", 337d52c454aSMarc-André Lureau __func__, c2d.format); 338d52c454aSMarc-André Lureau g_free(res); 339d52c454aSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 340d52c454aSMarc-André Lureau return; 341d52c454aSMarc-André Lureau } 342d52c454aSMarc-André Lureau vugbm_buffer_create(&res->buffer, &g->gdev, c2d.width, c2d.height); 343d52c454aSMarc-André Lureau res->image = pixman_image_create_bits(pformat, 344d52c454aSMarc-André Lureau c2d.width, 345d52c454aSMarc-André Lureau c2d.height, 346d52c454aSMarc-André Lureau (uint32_t *)res->buffer.mmap, 347d52c454aSMarc-André Lureau res->buffer.stride); 348d52c454aSMarc-André Lureau if (!res->image) { 349d52c454aSMarc-André Lureau g_critical("%s: resource creation failed %d %d %d", 350d52c454aSMarc-André Lureau __func__, c2d.resource_id, c2d.width, c2d.height); 351d52c454aSMarc-André Lureau g_free(res); 352d52c454aSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 353d52c454aSMarc-André Lureau return; 354d52c454aSMarc-André Lureau } 355d52c454aSMarc-André Lureau 356d52c454aSMarc-André Lureau QTAILQ_INSERT_HEAD(&g->reslist, res, next); 357d52c454aSMarc-André Lureau } 358d52c454aSMarc-André Lureau 359d52c454aSMarc-André Lureau static void 360d52c454aSMarc-André Lureau vg_disable_scanout(VuGpu *g, int scanout_id) 361d52c454aSMarc-André Lureau { 362d52c454aSMarc-André Lureau struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id]; 363d52c454aSMarc-André Lureau struct virtio_gpu_simple_resource *res; 364d52c454aSMarc-André Lureau 365d52c454aSMarc-André Lureau if (scanout->resource_id == 0) { 366d52c454aSMarc-André Lureau return; 367d52c454aSMarc-André Lureau } 368d52c454aSMarc-André Lureau 369d52c454aSMarc-André Lureau res = virtio_gpu_find_resource(g, scanout->resource_id); 370d52c454aSMarc-André Lureau if (res) { 371d52c454aSMarc-André Lureau res->scanout_bitmask &= ~(1 << scanout_id); 372d52c454aSMarc-André Lureau } 373d52c454aSMarc-André Lureau 374d52c454aSMarc-André Lureau scanout->width = 0; 375d52c454aSMarc-André Lureau scanout->height = 0; 376d52c454aSMarc-André Lureau 3771e40d198SMarc-André Lureau if (g->sock_fd >= 0) { 378d52c454aSMarc-André Lureau VhostUserGpuMsg msg = { 379d52c454aSMarc-André Lureau .request = VHOST_USER_GPU_SCANOUT, 380d52c454aSMarc-André Lureau .size = sizeof(VhostUserGpuScanout), 381d52c454aSMarc-André Lureau .payload.scanout.scanout_id = scanout_id, 382d52c454aSMarc-André Lureau }; 383d52c454aSMarc-André Lureau vg_send_msg(g, &msg, -1); 384d52c454aSMarc-André Lureau } 385d52c454aSMarc-André Lureau } 386d52c454aSMarc-André Lureau 387d52c454aSMarc-André Lureau static void 388d52c454aSMarc-André Lureau vg_resource_destroy(VuGpu *g, 389d52c454aSMarc-André Lureau struct virtio_gpu_simple_resource *res) 390d52c454aSMarc-André Lureau { 391d52c454aSMarc-André Lureau int i; 392d52c454aSMarc-André Lureau 393d52c454aSMarc-André Lureau if (res->scanout_bitmask) { 394d52c454aSMarc-André Lureau for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) { 395d52c454aSMarc-André Lureau if (res->scanout_bitmask & (1 << i)) { 396d52c454aSMarc-André Lureau vg_disable_scanout(g, i); 397d52c454aSMarc-André Lureau } 398d52c454aSMarc-André Lureau } 399d52c454aSMarc-André Lureau } 400d52c454aSMarc-André Lureau 401d52c454aSMarc-André Lureau vugbm_buffer_destroy(&res->buffer); 402d52c454aSMarc-André Lureau pixman_image_unref(res->image); 403d52c454aSMarc-André Lureau QTAILQ_REMOVE(&g->reslist, res, next); 404d52c454aSMarc-André Lureau g_free(res); 405d52c454aSMarc-André Lureau } 406d52c454aSMarc-André Lureau 407d52c454aSMarc-André Lureau static void 408d52c454aSMarc-André Lureau vg_resource_unref(VuGpu *g, 409d52c454aSMarc-André Lureau struct virtio_gpu_ctrl_command *cmd) 410d52c454aSMarc-André Lureau { 411d52c454aSMarc-André Lureau struct virtio_gpu_simple_resource *res; 412d52c454aSMarc-André Lureau struct virtio_gpu_resource_unref unref; 413d52c454aSMarc-André Lureau 414d52c454aSMarc-André Lureau VUGPU_FILL_CMD(unref); 415d52c454aSMarc-André Lureau virtio_gpu_bswap_32(&unref, sizeof(unref)); 416d52c454aSMarc-André Lureau 417d52c454aSMarc-André Lureau res = virtio_gpu_find_resource(g, unref.resource_id); 418d52c454aSMarc-André Lureau if (!res) { 419d52c454aSMarc-André Lureau g_critical("%s: illegal resource specified %d", 420d52c454aSMarc-André Lureau __func__, unref.resource_id); 421d52c454aSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 422d52c454aSMarc-André Lureau return; 423d52c454aSMarc-André Lureau } 424d52c454aSMarc-André Lureau vg_resource_destroy(g, res); 425d52c454aSMarc-André Lureau } 426d52c454aSMarc-André Lureau 427d52c454aSMarc-André Lureau int 428d52c454aSMarc-André Lureau vg_create_mapping_iov(VuGpu *g, 429d52c454aSMarc-André Lureau struct virtio_gpu_resource_attach_backing *ab, 430d52c454aSMarc-André Lureau struct virtio_gpu_ctrl_command *cmd, 431d52c454aSMarc-André Lureau struct iovec **iov) 432d52c454aSMarc-André Lureau { 433d52c454aSMarc-André Lureau struct virtio_gpu_mem_entry *ents; 434d52c454aSMarc-André Lureau size_t esize, s; 435d52c454aSMarc-André Lureau int i; 436d52c454aSMarc-André Lureau 437d52c454aSMarc-André Lureau if (ab->nr_entries > 16384) { 438d52c454aSMarc-André Lureau g_critical("%s: nr_entries is too big (%d > 16384)", 439d52c454aSMarc-André Lureau __func__, ab->nr_entries); 440d52c454aSMarc-André Lureau return -1; 441d52c454aSMarc-André Lureau } 442d52c454aSMarc-André Lureau 443d52c454aSMarc-André Lureau esize = sizeof(*ents) * ab->nr_entries; 444d52c454aSMarc-André Lureau ents = g_malloc(esize); 445d52c454aSMarc-André Lureau s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 446d52c454aSMarc-André Lureau sizeof(*ab), ents, esize); 447d52c454aSMarc-André Lureau if (s != esize) { 448d52c454aSMarc-André Lureau g_critical("%s: command data size incorrect %zu vs %zu", 449d52c454aSMarc-André Lureau __func__, s, esize); 450d52c454aSMarc-André Lureau g_free(ents); 451d52c454aSMarc-André Lureau return -1; 452d52c454aSMarc-André Lureau } 453d52c454aSMarc-André Lureau 454d52c454aSMarc-André Lureau *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 455d52c454aSMarc-André Lureau for (i = 0; i < ab->nr_entries; i++) { 456d52c454aSMarc-André Lureau uint64_t len = ents[i].length; 457d52c454aSMarc-André Lureau (*iov)[i].iov_len = ents[i].length; 458d52c454aSMarc-André Lureau (*iov)[i].iov_base = vu_gpa_to_va(&g->dev.parent, &len, ents[i].addr); 459d52c454aSMarc-André Lureau if (!(*iov)[i].iov_base || len != ents[i].length) { 460d52c454aSMarc-André Lureau g_critical("%s: resource %d element %d", 461d52c454aSMarc-André Lureau __func__, ab->resource_id, i); 462d52c454aSMarc-André Lureau g_free(*iov); 463d52c454aSMarc-André Lureau g_free(ents); 464d52c454aSMarc-André Lureau *iov = NULL; 465d52c454aSMarc-André Lureau return -1; 466d52c454aSMarc-André Lureau } 467d52c454aSMarc-André Lureau } 468d52c454aSMarc-André Lureau g_free(ents); 469d52c454aSMarc-André Lureau return 0; 470d52c454aSMarc-André Lureau } 471d52c454aSMarc-André Lureau 472d52c454aSMarc-André Lureau static void 473d52c454aSMarc-André Lureau vg_resource_attach_backing(VuGpu *g, 474d52c454aSMarc-André Lureau struct virtio_gpu_ctrl_command *cmd) 475d52c454aSMarc-André Lureau { 476d52c454aSMarc-André Lureau struct virtio_gpu_simple_resource *res; 477d52c454aSMarc-André Lureau struct virtio_gpu_resource_attach_backing ab; 478d52c454aSMarc-André Lureau int ret; 479d52c454aSMarc-André Lureau 480d52c454aSMarc-André Lureau VUGPU_FILL_CMD(ab); 481d52c454aSMarc-André Lureau virtio_gpu_bswap_32(&ab, sizeof(ab)); 482d52c454aSMarc-André Lureau 483d52c454aSMarc-André Lureau res = virtio_gpu_find_resource(g, ab.resource_id); 484d52c454aSMarc-André Lureau if (!res) { 485d52c454aSMarc-André Lureau g_critical("%s: illegal resource specified %d", 486d52c454aSMarc-André Lureau __func__, ab.resource_id); 487d52c454aSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 488d52c454aSMarc-André Lureau return; 489d52c454aSMarc-André Lureau } 490d52c454aSMarc-André Lureau 491d52c454aSMarc-André Lureau ret = vg_create_mapping_iov(g, &ab, cmd, &res->iov); 492d52c454aSMarc-André Lureau if (ret != 0) { 493d52c454aSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 494d52c454aSMarc-André Lureau return; 495d52c454aSMarc-André Lureau } 496d52c454aSMarc-André Lureau 497d52c454aSMarc-André Lureau res->iov_cnt = ab.nr_entries; 498d52c454aSMarc-André Lureau } 499d52c454aSMarc-André Lureau 500d52c454aSMarc-André Lureau static void 501d52c454aSMarc-André Lureau vg_resource_detach_backing(VuGpu *g, 502d52c454aSMarc-André Lureau struct virtio_gpu_ctrl_command *cmd) 503d52c454aSMarc-André Lureau { 504d52c454aSMarc-André Lureau struct virtio_gpu_simple_resource *res; 505d52c454aSMarc-André Lureau struct virtio_gpu_resource_detach_backing detach; 506d52c454aSMarc-André Lureau 507d52c454aSMarc-André Lureau VUGPU_FILL_CMD(detach); 508d52c454aSMarc-André Lureau virtio_gpu_bswap_32(&detach, sizeof(detach)); 509d52c454aSMarc-André Lureau 510d52c454aSMarc-André Lureau res = virtio_gpu_find_resource(g, detach.resource_id); 511d52c454aSMarc-André Lureau if (!res || !res->iov) { 512d52c454aSMarc-André Lureau g_critical("%s: illegal resource specified %d", 513d52c454aSMarc-André Lureau __func__, detach.resource_id); 514d52c454aSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 515d52c454aSMarc-André Lureau return; 516d52c454aSMarc-André Lureau } 517d52c454aSMarc-André Lureau 518d52c454aSMarc-André Lureau g_free(res->iov); 519d52c454aSMarc-André Lureau res->iov = NULL; 520d52c454aSMarc-André Lureau res->iov_cnt = 0; 521d52c454aSMarc-André Lureau } 522d52c454aSMarc-André Lureau 523d52c454aSMarc-André Lureau static void 524d52c454aSMarc-André Lureau vg_transfer_to_host_2d(VuGpu *g, 525d52c454aSMarc-André Lureau struct virtio_gpu_ctrl_command *cmd) 526d52c454aSMarc-André Lureau { 527d52c454aSMarc-André Lureau struct virtio_gpu_simple_resource *res; 528d52c454aSMarc-André Lureau int h; 529d52c454aSMarc-André Lureau uint32_t src_offset, dst_offset, stride; 530d52c454aSMarc-André Lureau int bpp; 531d52c454aSMarc-André Lureau pixman_format_code_t format; 532d52c454aSMarc-André Lureau struct virtio_gpu_transfer_to_host_2d t2d; 533d52c454aSMarc-André Lureau 534d52c454aSMarc-André Lureau VUGPU_FILL_CMD(t2d); 535d52c454aSMarc-André Lureau virtio_gpu_t2d_bswap(&t2d); 536d52c454aSMarc-André Lureau 537d52c454aSMarc-André Lureau res = virtio_gpu_find_resource(g, t2d.resource_id); 538d52c454aSMarc-André Lureau if (!res || !res->iov) { 539d52c454aSMarc-André Lureau g_critical("%s: illegal resource specified %d", 540d52c454aSMarc-André Lureau __func__, t2d.resource_id); 541d52c454aSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 542d52c454aSMarc-André Lureau return; 543d52c454aSMarc-André Lureau } 544d52c454aSMarc-André Lureau 545d52c454aSMarc-André Lureau if (t2d.r.x > res->width || 546d52c454aSMarc-André Lureau t2d.r.y > res->height || 547d52c454aSMarc-André Lureau t2d.r.width > res->width || 548d52c454aSMarc-André Lureau t2d.r.height > res->height || 549d52c454aSMarc-André Lureau t2d.r.x + t2d.r.width > res->width || 550d52c454aSMarc-André Lureau t2d.r.y + t2d.r.height > res->height) { 551d52c454aSMarc-André Lureau g_critical("%s: transfer bounds outside resource" 552d52c454aSMarc-André Lureau " bounds for resource %d: %d %d %d %d vs %d %d", 553d52c454aSMarc-André Lureau __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 554d52c454aSMarc-André Lureau t2d.r.width, t2d.r.height, res->width, res->height); 555d52c454aSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 556d52c454aSMarc-André Lureau return; 557d52c454aSMarc-André Lureau } 558d52c454aSMarc-André Lureau 559d52c454aSMarc-André Lureau format = pixman_image_get_format(res->image); 560d52c454aSMarc-André Lureau bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 561d52c454aSMarc-André Lureau stride = pixman_image_get_stride(res->image); 562d52c454aSMarc-André Lureau 563d52c454aSMarc-André Lureau if (t2d.offset || t2d.r.x || t2d.r.y || 564d52c454aSMarc-André Lureau t2d.r.width != pixman_image_get_width(res->image)) { 565d52c454aSMarc-André Lureau void *img_data = pixman_image_get_data(res->image); 566d52c454aSMarc-André Lureau for (h = 0; h < t2d.r.height; h++) { 567d52c454aSMarc-André Lureau src_offset = t2d.offset + stride * h; 568d52c454aSMarc-André Lureau dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 569d52c454aSMarc-André Lureau 570d52c454aSMarc-André Lureau iov_to_buf(res->iov, res->iov_cnt, src_offset, 571d52c454aSMarc-André Lureau img_data 572d52c454aSMarc-André Lureau + dst_offset, t2d.r.width * bpp); 573d52c454aSMarc-André Lureau } 574d52c454aSMarc-André Lureau } else { 575d52c454aSMarc-André Lureau iov_to_buf(res->iov, res->iov_cnt, 0, 576d52c454aSMarc-André Lureau pixman_image_get_data(res->image), 577d52c454aSMarc-André Lureau pixman_image_get_stride(res->image) 578d52c454aSMarc-André Lureau * pixman_image_get_height(res->image)); 579d52c454aSMarc-André Lureau } 580d52c454aSMarc-André Lureau } 581d52c454aSMarc-André Lureau 582d52c454aSMarc-André Lureau static void 583d52c454aSMarc-André Lureau vg_set_scanout(VuGpu *g, 584d52c454aSMarc-André Lureau struct virtio_gpu_ctrl_command *cmd) 585d52c454aSMarc-André Lureau { 586d52c454aSMarc-André Lureau struct virtio_gpu_simple_resource *res, *ores; 587d52c454aSMarc-André Lureau struct virtio_gpu_scanout *scanout; 588d52c454aSMarc-André Lureau struct virtio_gpu_set_scanout ss; 589d52c454aSMarc-André Lureau int fd; 590d52c454aSMarc-André Lureau 591d52c454aSMarc-André Lureau VUGPU_FILL_CMD(ss); 592d52c454aSMarc-André Lureau virtio_gpu_bswap_32(&ss, sizeof(ss)); 593d52c454aSMarc-André Lureau 594d52c454aSMarc-André Lureau if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) { 595d52c454aSMarc-André Lureau g_critical("%s: illegal scanout id specified %d", 596d52c454aSMarc-André Lureau __func__, ss.scanout_id); 597d52c454aSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 598d52c454aSMarc-André Lureau return; 599d52c454aSMarc-André Lureau } 600d52c454aSMarc-André Lureau 601d52c454aSMarc-André Lureau if (ss.resource_id == 0) { 602d52c454aSMarc-André Lureau vg_disable_scanout(g, ss.scanout_id); 603d52c454aSMarc-André Lureau return; 604d52c454aSMarc-André Lureau } 605d52c454aSMarc-André Lureau 606d52c454aSMarc-André Lureau /* create a surface for this scanout */ 607d52c454aSMarc-André Lureau res = virtio_gpu_find_resource(g, ss.resource_id); 608d52c454aSMarc-André Lureau if (!res) { 609d52c454aSMarc-André Lureau g_critical("%s: illegal resource specified %d", 610d52c454aSMarc-André Lureau __func__, ss.resource_id); 611d52c454aSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 612d52c454aSMarc-André Lureau return; 613d52c454aSMarc-André Lureau } 614d52c454aSMarc-André Lureau 615d52c454aSMarc-André Lureau if (ss.r.x > res->width || 616d52c454aSMarc-André Lureau ss.r.y > res->height || 617d52c454aSMarc-André Lureau ss.r.width > res->width || 618d52c454aSMarc-André Lureau ss.r.height > res->height || 619d52c454aSMarc-André Lureau ss.r.x + ss.r.width > res->width || 620d52c454aSMarc-André Lureau ss.r.y + ss.r.height > res->height) { 621d52c454aSMarc-André Lureau g_critical("%s: illegal scanout %d bounds for" 622d52c454aSMarc-André Lureau " resource %d, (%d,%d)+%d,%d vs %d %d", 623d52c454aSMarc-André Lureau __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 624d52c454aSMarc-André Lureau ss.r.width, ss.r.height, res->width, res->height); 625d52c454aSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 626d52c454aSMarc-André Lureau return; 627d52c454aSMarc-André Lureau } 628d52c454aSMarc-André Lureau 629d52c454aSMarc-André Lureau scanout = &g->scanout[ss.scanout_id]; 630d52c454aSMarc-André Lureau 631d52c454aSMarc-André Lureau ores = virtio_gpu_find_resource(g, scanout->resource_id); 632d52c454aSMarc-André Lureau if (ores) { 633d52c454aSMarc-André Lureau ores->scanout_bitmask &= ~(1 << ss.scanout_id); 634d52c454aSMarc-André Lureau } 635d52c454aSMarc-André Lureau 636d52c454aSMarc-André Lureau res->scanout_bitmask |= (1 << ss.scanout_id); 637d52c454aSMarc-André Lureau scanout->resource_id = ss.resource_id; 638d52c454aSMarc-André Lureau scanout->x = ss.r.x; 639d52c454aSMarc-André Lureau scanout->y = ss.r.y; 640d52c454aSMarc-André Lureau scanout->width = ss.r.width; 641d52c454aSMarc-André Lureau scanout->height = ss.r.height; 642d52c454aSMarc-André Lureau 643d52c454aSMarc-André Lureau struct vugbm_buffer *buffer = &res->buffer; 644d52c454aSMarc-André Lureau 645d52c454aSMarc-André Lureau if (vugbm_buffer_can_get_dmabuf_fd(buffer)) { 646d52c454aSMarc-André Lureau VhostUserGpuMsg msg = { 647d52c454aSMarc-André Lureau .request = VHOST_USER_GPU_DMABUF_SCANOUT, 648d52c454aSMarc-André Lureau .size = sizeof(VhostUserGpuDMABUFScanout), 649d52c454aSMarc-André Lureau .payload.dmabuf_scanout = (VhostUserGpuDMABUFScanout) { 650d52c454aSMarc-André Lureau .scanout_id = ss.scanout_id, 651d52c454aSMarc-André Lureau .x = ss.r.x, 652d52c454aSMarc-André Lureau .y = ss.r.y, 653d52c454aSMarc-André Lureau .width = ss.r.width, 654d52c454aSMarc-André Lureau .height = ss.r.height, 655d52c454aSMarc-André Lureau .fd_width = buffer->width, 656d52c454aSMarc-André Lureau .fd_height = buffer->height, 657d52c454aSMarc-André Lureau .fd_stride = buffer->stride, 658d52c454aSMarc-André Lureau .fd_drm_fourcc = buffer->format 659d52c454aSMarc-André Lureau } 660d52c454aSMarc-André Lureau }; 661d52c454aSMarc-André Lureau 662d52c454aSMarc-André Lureau if (vugbm_buffer_get_dmabuf_fd(buffer, &fd)) { 663d52c454aSMarc-André Lureau vg_send_msg(g, &msg, fd); 664d52c454aSMarc-André Lureau close(fd); 665d52c454aSMarc-André Lureau } 666d52c454aSMarc-André Lureau } else { 667d52c454aSMarc-André Lureau VhostUserGpuMsg msg = { 668d52c454aSMarc-André Lureau .request = VHOST_USER_GPU_SCANOUT, 669d52c454aSMarc-André Lureau .size = sizeof(VhostUserGpuScanout), 670d52c454aSMarc-André Lureau .payload.scanout = (VhostUserGpuScanout) { 671d52c454aSMarc-André Lureau .scanout_id = ss.scanout_id, 672d52c454aSMarc-André Lureau .width = scanout->width, 673d52c454aSMarc-André Lureau .height = scanout->height 674d52c454aSMarc-André Lureau } 675d52c454aSMarc-André Lureau }; 676d52c454aSMarc-André Lureau vg_send_msg(g, &msg, -1); 677d52c454aSMarc-André Lureau } 678d52c454aSMarc-André Lureau } 679d52c454aSMarc-André Lureau 680d52c454aSMarc-André Lureau static void 681d52c454aSMarc-André Lureau vg_resource_flush(VuGpu *g, 682d52c454aSMarc-André Lureau struct virtio_gpu_ctrl_command *cmd) 683d52c454aSMarc-André Lureau { 684d52c454aSMarc-André Lureau struct virtio_gpu_simple_resource *res; 685d52c454aSMarc-André Lureau struct virtio_gpu_resource_flush rf; 686d52c454aSMarc-André Lureau pixman_region16_t flush_region; 687d52c454aSMarc-André Lureau int i; 688d52c454aSMarc-André Lureau 689d52c454aSMarc-André Lureau VUGPU_FILL_CMD(rf); 690d52c454aSMarc-André Lureau virtio_gpu_bswap_32(&rf, sizeof(rf)); 691d52c454aSMarc-André Lureau 692d52c454aSMarc-André Lureau res = virtio_gpu_find_resource(g, rf.resource_id); 693d52c454aSMarc-André Lureau if (!res) { 694d52c454aSMarc-André Lureau g_critical("%s: illegal resource specified %d\n", 695d52c454aSMarc-André Lureau __func__, rf.resource_id); 696d52c454aSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 697d52c454aSMarc-André Lureau return; 698d52c454aSMarc-André Lureau } 699d52c454aSMarc-André Lureau 700d52c454aSMarc-André Lureau if (rf.r.x > res->width || 701d52c454aSMarc-André Lureau rf.r.y > res->height || 702d52c454aSMarc-André Lureau rf.r.width > res->width || 703d52c454aSMarc-André Lureau rf.r.height > res->height || 704d52c454aSMarc-André Lureau rf.r.x + rf.r.width > res->width || 705d52c454aSMarc-André Lureau rf.r.y + rf.r.height > res->height) { 706d52c454aSMarc-André Lureau g_critical("%s: flush bounds outside resource" 707d52c454aSMarc-André Lureau " bounds for resource %d: %d %d %d %d vs %d %d\n", 708d52c454aSMarc-André Lureau __func__, rf.resource_id, rf.r.x, rf.r.y, 709d52c454aSMarc-André Lureau rf.r.width, rf.r.height, res->width, res->height); 710d52c454aSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 711d52c454aSMarc-André Lureau return; 712d52c454aSMarc-André Lureau } 713d52c454aSMarc-André Lureau 714d52c454aSMarc-André Lureau pixman_region_init_rect(&flush_region, 715d52c454aSMarc-André Lureau rf.r.x, rf.r.y, rf.r.width, rf.r.height); 716d52c454aSMarc-André Lureau for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) { 717d52c454aSMarc-André Lureau struct virtio_gpu_scanout *scanout; 718d52c454aSMarc-André Lureau pixman_region16_t region, finalregion; 719d52c454aSMarc-André Lureau pixman_box16_t *extents; 720d52c454aSMarc-André Lureau 721d52c454aSMarc-André Lureau if (!(res->scanout_bitmask & (1 << i))) { 722d52c454aSMarc-André Lureau continue; 723d52c454aSMarc-André Lureau } 724d52c454aSMarc-André Lureau scanout = &g->scanout[i]; 725d52c454aSMarc-André Lureau 726d52c454aSMarc-André Lureau pixman_region_init(&finalregion); 727d52c454aSMarc-André Lureau pixman_region_init_rect(®ion, scanout->x, scanout->y, 728d52c454aSMarc-André Lureau scanout->width, scanout->height); 729d52c454aSMarc-André Lureau 730d52c454aSMarc-André Lureau pixman_region_intersect(&finalregion, &flush_region, ®ion); 731d52c454aSMarc-André Lureau 732d52c454aSMarc-André Lureau extents = pixman_region_extents(&finalregion); 733d52c454aSMarc-André Lureau size_t width = extents->x2 - extents->x1; 734d52c454aSMarc-André Lureau size_t height = extents->y2 - extents->y1; 735d52c454aSMarc-André Lureau 736d52c454aSMarc-André Lureau if (vugbm_buffer_can_get_dmabuf_fd(&res->buffer)) { 737d52c454aSMarc-André Lureau VhostUserGpuMsg vmsg = { 738d52c454aSMarc-André Lureau .request = VHOST_USER_GPU_DMABUF_UPDATE, 739d52c454aSMarc-André Lureau .size = sizeof(VhostUserGpuUpdate), 740d52c454aSMarc-André Lureau .payload.update = (VhostUserGpuUpdate) { 741d52c454aSMarc-André Lureau .scanout_id = i, 742d52c454aSMarc-André Lureau .x = extents->x1, 743d52c454aSMarc-André Lureau .y = extents->y1, 744d52c454aSMarc-André Lureau .width = width, 745d52c454aSMarc-André Lureau .height = height, 746d52c454aSMarc-André Lureau } 747d52c454aSMarc-André Lureau }; 748d52c454aSMarc-André Lureau vg_send_msg(g, &vmsg, -1); 749d52c454aSMarc-André Lureau vg_wait_ok(g); 750d52c454aSMarc-André Lureau } else { 751d52c454aSMarc-André Lureau size_t bpp = 752d52c454aSMarc-André Lureau PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) / 8; 753d52c454aSMarc-André Lureau size_t size = width * height * bpp; 754d52c454aSMarc-André Lureau 755d52c454aSMarc-André Lureau void *p = g_malloc(VHOST_USER_GPU_HDR_SIZE + 756d52c454aSMarc-André Lureau sizeof(VhostUserGpuUpdate) + size); 757d52c454aSMarc-André Lureau VhostUserGpuMsg *msg = p; 758d52c454aSMarc-André Lureau msg->request = VHOST_USER_GPU_UPDATE; 759d52c454aSMarc-André Lureau msg->size = sizeof(VhostUserGpuUpdate) + size; 760d52c454aSMarc-André Lureau msg->payload.update = (VhostUserGpuUpdate) { 761d52c454aSMarc-André Lureau .scanout_id = i, 762d52c454aSMarc-André Lureau .x = extents->x1, 763d52c454aSMarc-André Lureau .y = extents->y1, 764d52c454aSMarc-André Lureau .width = width, 765d52c454aSMarc-André Lureau .height = height, 766d52c454aSMarc-André Lureau }; 767d52c454aSMarc-André Lureau pixman_image_t *i = 768d52c454aSMarc-André Lureau pixman_image_create_bits(pixman_image_get_format(res->image), 769d52c454aSMarc-André Lureau msg->payload.update.width, 770d52c454aSMarc-André Lureau msg->payload.update.height, 771d52c454aSMarc-André Lureau p + offsetof(VhostUserGpuMsg, 772d52c454aSMarc-André Lureau payload.update.data), 773d52c454aSMarc-André Lureau width * bpp); 774d52c454aSMarc-André Lureau pixman_image_composite(PIXMAN_OP_SRC, 775d52c454aSMarc-André Lureau res->image, NULL, i, 776d52c454aSMarc-André Lureau extents->x1, extents->y1, 777d52c454aSMarc-André Lureau 0, 0, 0, 0, 778d52c454aSMarc-André Lureau width, height); 779d52c454aSMarc-André Lureau pixman_image_unref(i); 780d52c454aSMarc-André Lureau vg_send_msg(g, msg, -1); 781d52c454aSMarc-André Lureau g_free(msg); 782d52c454aSMarc-André Lureau } 783d52c454aSMarc-André Lureau pixman_region_fini(®ion); 784d52c454aSMarc-André Lureau pixman_region_fini(&finalregion); 785d52c454aSMarc-André Lureau } 786d52c454aSMarc-André Lureau pixman_region_fini(&flush_region); 787d52c454aSMarc-André Lureau } 788d52c454aSMarc-André Lureau 789d52c454aSMarc-André Lureau static void 790d52c454aSMarc-André Lureau vg_process_cmd(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd) 791d52c454aSMarc-André Lureau { 792d52c454aSMarc-André Lureau switch (cmd->cmd_hdr.type) { 793d52c454aSMarc-André Lureau case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 794d52c454aSMarc-André Lureau vg_get_display_info(vg, cmd); 795d52c454aSMarc-André Lureau break; 796d52c454aSMarc-André Lureau case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 797d52c454aSMarc-André Lureau vg_resource_create_2d(vg, cmd); 798d52c454aSMarc-André Lureau break; 799d52c454aSMarc-André Lureau case VIRTIO_GPU_CMD_RESOURCE_UNREF: 800d52c454aSMarc-André Lureau vg_resource_unref(vg, cmd); 801d52c454aSMarc-André Lureau break; 802d52c454aSMarc-André Lureau case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 803d52c454aSMarc-André Lureau vg_resource_flush(vg, cmd); 804d52c454aSMarc-André Lureau break; 805d52c454aSMarc-André Lureau case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 806d52c454aSMarc-André Lureau vg_transfer_to_host_2d(vg, cmd); 807d52c454aSMarc-André Lureau break; 808d52c454aSMarc-André Lureau case VIRTIO_GPU_CMD_SET_SCANOUT: 809d52c454aSMarc-André Lureau vg_set_scanout(vg, cmd); 810d52c454aSMarc-André Lureau break; 811d52c454aSMarc-André Lureau case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 812d52c454aSMarc-André Lureau vg_resource_attach_backing(vg, cmd); 813d52c454aSMarc-André Lureau break; 814d52c454aSMarc-André Lureau case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 815d52c454aSMarc-André Lureau vg_resource_detach_backing(vg, cmd); 816d52c454aSMarc-André Lureau break; 817d52c454aSMarc-André Lureau /* case VIRTIO_GPU_CMD_GET_EDID: */ 818d52c454aSMarc-André Lureau /* break */ 819d52c454aSMarc-André Lureau default: 820d52c454aSMarc-André Lureau g_warning("TODO handle ctrl %x\n", cmd->cmd_hdr.type); 821d52c454aSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 822d52c454aSMarc-André Lureau break; 823d52c454aSMarc-André Lureau } 82472e631c6SMarc-André Lureau if (cmd->state == VG_CMD_STATE_NEW) { 825d52c454aSMarc-André Lureau vg_ctrl_response_nodata(vg, cmd, cmd->error ? cmd->error : 826d52c454aSMarc-André Lureau VIRTIO_GPU_RESP_OK_NODATA); 827d52c454aSMarc-André Lureau } 828d52c454aSMarc-André Lureau } 829d52c454aSMarc-André Lureau 830d52c454aSMarc-André Lureau static void 831d52c454aSMarc-André Lureau vg_handle_ctrl(VuDev *dev, int qidx) 832d52c454aSMarc-André Lureau { 833d52c454aSMarc-André Lureau VuGpu *vg = container_of(dev, VuGpu, dev.parent); 834d52c454aSMarc-André Lureau VuVirtq *vq = vu_get_queue(dev, qidx); 835d52c454aSMarc-André Lureau struct virtio_gpu_ctrl_command *cmd = NULL; 836d52c454aSMarc-André Lureau size_t len; 837d52c454aSMarc-André Lureau 838d52c454aSMarc-André Lureau for (;;) { 8391f83ea8dSMarc-André Lureau if (vg->wait_in != 0) { 840d52c454aSMarc-André Lureau return; 841d52c454aSMarc-André Lureau } 842d52c454aSMarc-André Lureau 843d52c454aSMarc-André Lureau cmd = vu_queue_pop(dev, vq, sizeof(struct virtio_gpu_ctrl_command)); 844d52c454aSMarc-André Lureau if (!cmd) { 845d52c454aSMarc-André Lureau break; 846d52c454aSMarc-André Lureau } 847d52c454aSMarc-André Lureau cmd->vq = vq; 848d52c454aSMarc-André Lureau cmd->error = 0; 84972e631c6SMarc-André Lureau cmd->state = VG_CMD_STATE_NEW; 850d52c454aSMarc-André Lureau 851d52c454aSMarc-André Lureau len = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 852d52c454aSMarc-André Lureau 0, &cmd->cmd_hdr, sizeof(cmd->cmd_hdr)); 853d52c454aSMarc-André Lureau if (len != sizeof(cmd->cmd_hdr)) { 854d52c454aSMarc-André Lureau g_warning("%s: command size incorrect %zu vs %zu\n", 855d52c454aSMarc-André Lureau __func__, len, sizeof(cmd->cmd_hdr)); 856d52c454aSMarc-André Lureau } 857d52c454aSMarc-André Lureau 858d52c454aSMarc-André Lureau virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 859d52c454aSMarc-André Lureau g_debug("%d %s\n", cmd->cmd_hdr.type, 860d52c454aSMarc-André Lureau vg_cmd_to_string(cmd->cmd_hdr.type)); 861d52c454aSMarc-André Lureau 862d52c454aSMarc-André Lureau if (vg->virgl) { 863d52c454aSMarc-André Lureau vg_virgl_process_cmd(vg, cmd); 864d52c454aSMarc-André Lureau } else { 865d52c454aSMarc-André Lureau vg_process_cmd(vg, cmd); 866d52c454aSMarc-André Lureau } 867d52c454aSMarc-André Lureau 86872e631c6SMarc-André Lureau if (cmd->state != VG_CMD_STATE_FINISHED) { 869d52c454aSMarc-André Lureau QTAILQ_INSERT_TAIL(&vg->fenceq, cmd, next); 870d52c454aSMarc-André Lureau vg->inflight++; 871d52c454aSMarc-André Lureau } else { 8724ff97121SPhilippe Mathieu-Daudé free(cmd); 873d52c454aSMarc-André Lureau } 874d52c454aSMarc-André Lureau } 875d52c454aSMarc-André Lureau } 876d52c454aSMarc-André Lureau 877d52c454aSMarc-André Lureau static void 878d52c454aSMarc-André Lureau update_cursor_data_simple(VuGpu *g, uint32_t resource_id, gpointer data) 879d52c454aSMarc-André Lureau { 880d52c454aSMarc-André Lureau struct virtio_gpu_simple_resource *res; 881d52c454aSMarc-André Lureau 882d52c454aSMarc-André Lureau res = virtio_gpu_find_resource(g, resource_id); 883d52c454aSMarc-André Lureau g_return_if_fail(res != NULL); 884d52c454aSMarc-André Lureau g_return_if_fail(pixman_image_get_width(res->image) == 64); 885d52c454aSMarc-André Lureau g_return_if_fail(pixman_image_get_height(res->image) == 64); 886d52c454aSMarc-André Lureau g_return_if_fail( 887d52c454aSMarc-André Lureau PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) == 32); 888d52c454aSMarc-André Lureau 889d52c454aSMarc-André Lureau memcpy(data, pixman_image_get_data(res->image), 64 * 64 * sizeof(uint32_t)); 890d52c454aSMarc-André Lureau } 891d52c454aSMarc-André Lureau 892d52c454aSMarc-André Lureau static void 893d52c454aSMarc-André Lureau vg_process_cursor_cmd(VuGpu *g, struct virtio_gpu_update_cursor *cursor) 894d52c454aSMarc-André Lureau { 895d52c454aSMarc-André Lureau bool move = cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR; 896d52c454aSMarc-André Lureau 897d52c454aSMarc-André Lureau g_debug("%s move:%d\n", G_STRFUNC, move); 898d52c454aSMarc-André Lureau 899d52c454aSMarc-André Lureau if (move) { 900d52c454aSMarc-André Lureau VhostUserGpuMsg msg = { 901d52c454aSMarc-André Lureau .request = cursor->resource_id ? 902d52c454aSMarc-André Lureau VHOST_USER_GPU_CURSOR_POS : VHOST_USER_GPU_CURSOR_POS_HIDE, 903d52c454aSMarc-André Lureau .size = sizeof(VhostUserGpuCursorPos), 904d52c454aSMarc-André Lureau .payload.cursor_pos = { 905d52c454aSMarc-André Lureau .scanout_id = cursor->pos.scanout_id, 906d52c454aSMarc-André Lureau .x = cursor->pos.x, 907d52c454aSMarc-André Lureau .y = cursor->pos.y, 908d52c454aSMarc-André Lureau } 909d52c454aSMarc-André Lureau }; 910d52c454aSMarc-André Lureau vg_send_msg(g, &msg, -1); 911d52c454aSMarc-André Lureau } else { 912d52c454aSMarc-André Lureau VhostUserGpuMsg msg = { 913d52c454aSMarc-André Lureau .request = VHOST_USER_GPU_CURSOR_UPDATE, 914d52c454aSMarc-André Lureau .size = sizeof(VhostUserGpuCursorUpdate), 915d52c454aSMarc-André Lureau .payload.cursor_update = { 916d52c454aSMarc-André Lureau .pos = { 917d52c454aSMarc-André Lureau .scanout_id = cursor->pos.scanout_id, 918d52c454aSMarc-André Lureau .x = cursor->pos.x, 919d52c454aSMarc-André Lureau .y = cursor->pos.y, 920d52c454aSMarc-André Lureau }, 921d52c454aSMarc-André Lureau .hot_x = cursor->hot_x, 922d52c454aSMarc-André Lureau .hot_y = cursor->hot_y, 923d52c454aSMarc-André Lureau } 924d52c454aSMarc-André Lureau }; 925d52c454aSMarc-André Lureau if (g->virgl) { 926d52c454aSMarc-André Lureau vg_virgl_update_cursor_data(g, cursor->resource_id, 927d52c454aSMarc-André Lureau msg.payload.cursor_update.data); 928d52c454aSMarc-André Lureau } else { 929d52c454aSMarc-André Lureau update_cursor_data_simple(g, cursor->resource_id, 930d52c454aSMarc-André Lureau msg.payload.cursor_update.data); 931d52c454aSMarc-André Lureau } 932d52c454aSMarc-André Lureau vg_send_msg(g, &msg, -1); 933d52c454aSMarc-André Lureau } 934d52c454aSMarc-André Lureau } 935d52c454aSMarc-André Lureau 936d52c454aSMarc-André Lureau static void 937d52c454aSMarc-André Lureau vg_handle_cursor(VuDev *dev, int qidx) 938d52c454aSMarc-André Lureau { 939d52c454aSMarc-André Lureau VuGpu *g = container_of(dev, VuGpu, dev.parent); 940d52c454aSMarc-André Lureau VuVirtq *vq = vu_get_queue(dev, qidx); 941d52c454aSMarc-André Lureau VuVirtqElement *elem; 942d52c454aSMarc-André Lureau size_t len; 943d52c454aSMarc-André Lureau struct virtio_gpu_update_cursor cursor; 944d52c454aSMarc-André Lureau 945d52c454aSMarc-André Lureau for (;;) { 946d52c454aSMarc-André Lureau elem = vu_queue_pop(dev, vq, sizeof(VuVirtqElement)); 947d52c454aSMarc-André Lureau if (!elem) { 948d52c454aSMarc-André Lureau break; 949d52c454aSMarc-André Lureau } 950d52c454aSMarc-André Lureau g_debug("cursor out:%d in:%d\n", elem->out_num, elem->in_num); 951d52c454aSMarc-André Lureau 952d52c454aSMarc-André Lureau len = iov_to_buf(elem->out_sg, elem->out_num, 953d52c454aSMarc-André Lureau 0, &cursor, sizeof(cursor)); 954d52c454aSMarc-André Lureau if (len != sizeof(cursor)) { 955d52c454aSMarc-André Lureau g_warning("%s: cursor size incorrect %zu vs %zu\n", 956d52c454aSMarc-André Lureau __func__, len, sizeof(cursor)); 957d52c454aSMarc-André Lureau } else { 958d52c454aSMarc-André Lureau virtio_gpu_bswap_32(&cursor, sizeof(cursor)); 959d52c454aSMarc-André Lureau vg_process_cursor_cmd(g, &cursor); 960d52c454aSMarc-André Lureau } 961d52c454aSMarc-André Lureau vu_queue_push(dev, vq, elem, 0); 962d52c454aSMarc-André Lureau vu_queue_notify(dev, vq); 9634ff97121SPhilippe Mathieu-Daudé free(elem); 964d52c454aSMarc-André Lureau } 965d52c454aSMarc-André Lureau } 966d52c454aSMarc-André Lureau 967d52c454aSMarc-André Lureau static void 968d52c454aSMarc-André Lureau vg_panic(VuDev *dev, const char *msg) 969d52c454aSMarc-André Lureau { 970d52c454aSMarc-André Lureau g_critical("%s\n", msg); 971d52c454aSMarc-André Lureau exit(1); 972d52c454aSMarc-André Lureau } 973d52c454aSMarc-André Lureau 974d52c454aSMarc-André Lureau static void 975d52c454aSMarc-André Lureau vg_queue_set_started(VuDev *dev, int qidx, bool started) 976d52c454aSMarc-André Lureau { 977d52c454aSMarc-André Lureau VuVirtq *vq = vu_get_queue(dev, qidx); 978d52c454aSMarc-André Lureau 979d52c454aSMarc-André Lureau g_debug("queue started %d:%d\n", qidx, started); 980d52c454aSMarc-André Lureau 981d52c454aSMarc-André Lureau switch (qidx) { 982d52c454aSMarc-André Lureau case 0: 983d52c454aSMarc-André Lureau vu_set_queue_handler(dev, vq, started ? vg_handle_ctrl : NULL); 984d52c454aSMarc-André Lureau break; 985d52c454aSMarc-André Lureau case 1: 986d52c454aSMarc-André Lureau vu_set_queue_handler(dev, vq, started ? vg_handle_cursor : NULL); 987d52c454aSMarc-André Lureau break; 988d52c454aSMarc-André Lureau default: 989d52c454aSMarc-André Lureau break; 990d52c454aSMarc-André Lureau } 991d52c454aSMarc-André Lureau } 992d52c454aSMarc-André Lureau 9931f83ea8dSMarc-André Lureau static gboolean 9941f83ea8dSMarc-André Lureau protocol_features_cb(gint fd, GIOCondition condition, gpointer user_data) 995d52c454aSMarc-André Lureau { 9961f83ea8dSMarc-André Lureau VuGpu *g = user_data; 997d52c454aSMarc-André Lureau uint64_t u64; 998d52c454aSMarc-André Lureau VhostUserGpuMsg msg = { 999d52c454aSMarc-André Lureau .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES 1000d52c454aSMarc-André Lureau }; 1001d52c454aSMarc-André Lureau 1002d52c454aSMarc-André Lureau if (!vg_recv_msg(g, msg.request, sizeof(u64), &u64)) { 10031f83ea8dSMarc-André Lureau return G_SOURCE_CONTINUE; 1004d52c454aSMarc-André Lureau } 1005d52c454aSMarc-André Lureau 1006d52c454aSMarc-André Lureau msg = (VhostUserGpuMsg) { 1007d52c454aSMarc-André Lureau .request = VHOST_USER_GPU_SET_PROTOCOL_FEATURES, 1008d52c454aSMarc-André Lureau .size = sizeof(uint64_t), 1009d52c454aSMarc-André Lureau .payload.u64 = 0 1010d52c454aSMarc-André Lureau }; 1011d52c454aSMarc-André Lureau vg_send_msg(g, &msg, -1); 10121f83ea8dSMarc-André Lureau 10131f83ea8dSMarc-André Lureau g->wait_in = 0; 10141f83ea8dSMarc-André Lureau vg_handle_ctrl(&g->dev.parent, 0); 10151f83ea8dSMarc-André Lureau 10161f83ea8dSMarc-André Lureau return G_SOURCE_REMOVE; 10171f83ea8dSMarc-André Lureau } 10181f83ea8dSMarc-André Lureau 10191f83ea8dSMarc-André Lureau static void 10201f83ea8dSMarc-André Lureau set_gpu_protocol_features(VuGpu *g) 10211f83ea8dSMarc-André Lureau { 10221f83ea8dSMarc-André Lureau VhostUserGpuMsg msg = { 10231f83ea8dSMarc-André Lureau .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES 10241f83ea8dSMarc-André Lureau }; 10251f83ea8dSMarc-André Lureau 10261f83ea8dSMarc-André Lureau vg_send_msg(g, &msg, -1); 10271f83ea8dSMarc-André Lureau assert(g->wait_in == 0); 10281f83ea8dSMarc-André Lureau g->wait_in = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP, 10291f83ea8dSMarc-André Lureau protocol_features_cb, g); 1030d52c454aSMarc-André Lureau } 1031d52c454aSMarc-André Lureau 1032d52c454aSMarc-André Lureau static int 1033d52c454aSMarc-André Lureau vg_process_msg(VuDev *dev, VhostUserMsg *msg, int *do_reply) 1034d52c454aSMarc-André Lureau { 1035d52c454aSMarc-André Lureau VuGpu *g = container_of(dev, VuGpu, dev.parent); 1036d52c454aSMarc-André Lureau 1037d52c454aSMarc-André Lureau switch (msg->request) { 1038d52c454aSMarc-André Lureau case VHOST_USER_GPU_SET_SOCKET: { 1039d52c454aSMarc-André Lureau g_return_val_if_fail(msg->fd_num == 1, 1); 1040d52c454aSMarc-André Lureau g_return_val_if_fail(g->sock_fd == -1, 1); 1041d52c454aSMarc-André Lureau g->sock_fd = msg->fds[0]; 1042d52c454aSMarc-André Lureau set_gpu_protocol_features(g); 1043d52c454aSMarc-André Lureau return 1; 1044d52c454aSMarc-André Lureau } 1045d52c454aSMarc-André Lureau default: 1046d52c454aSMarc-André Lureau return 0; 1047d52c454aSMarc-André Lureau } 1048d52c454aSMarc-André Lureau 1049d52c454aSMarc-André Lureau return 0; 1050d52c454aSMarc-André Lureau } 1051d52c454aSMarc-André Lureau 1052d52c454aSMarc-André Lureau static uint64_t 1053d52c454aSMarc-André Lureau vg_get_features(VuDev *dev) 1054d52c454aSMarc-André Lureau { 1055d52c454aSMarc-André Lureau uint64_t features = 0; 1056d52c454aSMarc-André Lureau 1057d52c454aSMarc-André Lureau if (opt_virgl) { 1058d52c454aSMarc-André Lureau features |= 1 << VIRTIO_GPU_F_VIRGL; 1059d52c454aSMarc-André Lureau } 1060d52c454aSMarc-André Lureau 1061d52c454aSMarc-André Lureau return features; 1062d52c454aSMarc-André Lureau } 1063d52c454aSMarc-André Lureau 1064d52c454aSMarc-André Lureau static void 1065d52c454aSMarc-André Lureau vg_set_features(VuDev *dev, uint64_t features) 1066d52c454aSMarc-André Lureau { 1067d52c454aSMarc-André Lureau VuGpu *g = container_of(dev, VuGpu, dev.parent); 1068d52c454aSMarc-André Lureau bool virgl = features & (1 << VIRTIO_GPU_F_VIRGL); 1069d52c454aSMarc-André Lureau 1070d52c454aSMarc-André Lureau if (virgl && !g->virgl_inited) { 1071d52c454aSMarc-André Lureau if (!vg_virgl_init(g)) { 1072d52c454aSMarc-André Lureau vg_panic(dev, "Failed to initialize virgl"); 1073d52c454aSMarc-André Lureau } 1074d52c454aSMarc-André Lureau g->virgl_inited = true; 1075d52c454aSMarc-André Lureau } 1076d52c454aSMarc-André Lureau 1077d52c454aSMarc-André Lureau g->virgl = virgl; 1078d52c454aSMarc-André Lureau } 1079d52c454aSMarc-André Lureau 1080d52c454aSMarc-André Lureau static int 1081d52c454aSMarc-André Lureau vg_get_config(VuDev *dev, uint8_t *config, uint32_t len) 1082d52c454aSMarc-André Lureau { 1083d52c454aSMarc-André Lureau VuGpu *g = container_of(dev, VuGpu, dev.parent); 1084d52c454aSMarc-André Lureau 1085fa77464fSStefan Hajnoczi if (len > sizeof(struct virtio_gpu_config)) { 1086fa77464fSStefan Hajnoczi return -1; 1087fa77464fSStefan Hajnoczi } 1088d52c454aSMarc-André Lureau 1089d52c454aSMarc-André Lureau if (opt_virgl) { 1090d52c454aSMarc-André Lureau g->virtio_config.num_capsets = vg_virgl_get_num_capsets(); 1091d52c454aSMarc-André Lureau } 1092d52c454aSMarc-André Lureau 1093d52c454aSMarc-André Lureau memcpy(config, &g->virtio_config, len); 1094d52c454aSMarc-André Lureau 1095d52c454aSMarc-André Lureau return 0; 1096d52c454aSMarc-André Lureau } 1097d52c454aSMarc-André Lureau 1098d52c454aSMarc-André Lureau static int 1099d52c454aSMarc-André Lureau vg_set_config(VuDev *dev, const uint8_t *data, 1100d52c454aSMarc-André Lureau uint32_t offset, uint32_t size, 1101d52c454aSMarc-André Lureau uint32_t flags) 1102d52c454aSMarc-André Lureau { 1103d52c454aSMarc-André Lureau VuGpu *g = container_of(dev, VuGpu, dev.parent); 1104d52c454aSMarc-André Lureau struct virtio_gpu_config *config = (struct virtio_gpu_config *)data; 1105d52c454aSMarc-André Lureau 1106d52c454aSMarc-André Lureau if (config->events_clear) { 1107d52c454aSMarc-André Lureau g->virtio_config.events_read &= ~config->events_clear; 1108d52c454aSMarc-André Lureau } 1109d52c454aSMarc-André Lureau 1110d52c454aSMarc-André Lureau return 0; 1111d52c454aSMarc-André Lureau } 1112d52c454aSMarc-André Lureau 1113d52c454aSMarc-André Lureau static const VuDevIface vuiface = { 1114d52c454aSMarc-André Lureau .set_features = vg_set_features, 1115d52c454aSMarc-André Lureau .get_features = vg_get_features, 1116d52c454aSMarc-André Lureau .queue_set_started = vg_queue_set_started, 1117d52c454aSMarc-André Lureau .process_msg = vg_process_msg, 1118d52c454aSMarc-André Lureau .get_config = vg_get_config, 1119d52c454aSMarc-André Lureau .set_config = vg_set_config, 1120d52c454aSMarc-André Lureau }; 1121d52c454aSMarc-André Lureau 1122d52c454aSMarc-André Lureau static void 1123d52c454aSMarc-André Lureau vg_destroy(VuGpu *g) 1124d52c454aSMarc-André Lureau { 1125d52c454aSMarc-André Lureau struct virtio_gpu_simple_resource *res, *tmp; 1126d52c454aSMarc-André Lureau 1127d52c454aSMarc-André Lureau vug_deinit(&g->dev); 1128d52c454aSMarc-André Lureau 1129d52c454aSMarc-André Lureau vg_sock_fd_close(g); 1130d52c454aSMarc-André Lureau 1131d52c454aSMarc-André Lureau QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1132d52c454aSMarc-André Lureau vg_resource_destroy(g, res); 1133d52c454aSMarc-André Lureau } 1134d52c454aSMarc-André Lureau 1135d52c454aSMarc-André Lureau vugbm_device_destroy(&g->gdev); 1136d52c454aSMarc-André Lureau } 1137d52c454aSMarc-André Lureau 1138d52c454aSMarc-André Lureau static GOptionEntry entries[] = { 1139d52c454aSMarc-André Lureau { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE, &opt_print_caps, 1140d52c454aSMarc-André Lureau "Print capabilities", NULL }, 1141d52c454aSMarc-André Lureau { "fd", 'f', 0, G_OPTION_ARG_INT, &opt_fdnum, 1142d52c454aSMarc-André Lureau "Use inherited fd socket", "FDNUM" }, 1143d52c454aSMarc-André Lureau { "socket-path", 's', 0, G_OPTION_ARG_FILENAME, &opt_socket_path, 1144d52c454aSMarc-André Lureau "Use UNIX socket path", "PATH" }, 1145d52c454aSMarc-André Lureau { "render-node", 'r', 0, G_OPTION_ARG_FILENAME, &opt_render_node, 1146d52c454aSMarc-André Lureau "Specify DRM render node", "PATH" }, 1147d52c454aSMarc-André Lureau { "virgl", 'v', 0, G_OPTION_ARG_NONE, &opt_virgl, 1148d52c454aSMarc-André Lureau "Turn virgl rendering on", NULL }, 1149d52c454aSMarc-André Lureau { NULL, } 1150d52c454aSMarc-André Lureau }; 1151d52c454aSMarc-André Lureau 1152d52c454aSMarc-André Lureau int 1153d52c454aSMarc-André Lureau main(int argc, char *argv[]) 1154d52c454aSMarc-André Lureau { 1155d52c454aSMarc-André Lureau GOptionContext *context; 1156d52c454aSMarc-André Lureau GError *error = NULL; 1157d52c454aSMarc-André Lureau GMainLoop *loop = NULL; 1158d52c454aSMarc-André Lureau int fd; 1159d52c454aSMarc-André Lureau VuGpu g = { .sock_fd = -1, .drm_rnode_fd = -1 }; 1160d52c454aSMarc-André Lureau 1161d52c454aSMarc-André Lureau QTAILQ_INIT(&g.reslist); 1162d52c454aSMarc-André Lureau QTAILQ_INIT(&g.fenceq); 1163d52c454aSMarc-André Lureau 1164d52c454aSMarc-André Lureau context = g_option_context_new("QEMU vhost-user-gpu"); 1165d52c454aSMarc-André Lureau g_option_context_add_main_entries(context, entries, NULL); 1166d52c454aSMarc-André Lureau if (!g_option_context_parse(context, &argc, &argv, &error)) { 1167d52c454aSMarc-André Lureau g_printerr("Option parsing failed: %s\n", error->message); 1168d52c454aSMarc-André Lureau exit(EXIT_FAILURE); 1169d52c454aSMarc-André Lureau } 1170d52c454aSMarc-André Lureau g_option_context_free(context); 1171d52c454aSMarc-André Lureau 1172d52c454aSMarc-André Lureau if (opt_print_caps) { 1173d52c454aSMarc-André Lureau g_print("{\n"); 1174d52c454aSMarc-André Lureau g_print(" \"type\": \"gpu\",\n"); 1175d52c454aSMarc-André Lureau g_print(" \"features\": [\n"); 1176d52c454aSMarc-André Lureau g_print(" \"render-node\",\n"); 1177d52c454aSMarc-André Lureau g_print(" \"virgl\"\n"); 1178d52c454aSMarc-André Lureau g_print(" ]\n"); 1179d52c454aSMarc-André Lureau g_print("}\n"); 1180d52c454aSMarc-André Lureau exit(EXIT_SUCCESS); 1181d52c454aSMarc-André Lureau } 1182d52c454aSMarc-André Lureau 1183d52c454aSMarc-André Lureau g.drm_rnode_fd = qemu_drm_rendernode_open(opt_render_node); 1184d52c454aSMarc-André Lureau if (opt_render_node && g.drm_rnode_fd == -1) { 1185d52c454aSMarc-André Lureau g_printerr("Failed to open DRM rendernode.\n"); 1186d52c454aSMarc-André Lureau exit(EXIT_FAILURE); 1187d52c454aSMarc-André Lureau } 1188d52c454aSMarc-André Lureau 1189d52c454aSMarc-André Lureau if (g.drm_rnode_fd >= 0) { 1190d52c454aSMarc-André Lureau if (!vugbm_device_init(&g.gdev, g.drm_rnode_fd)) { 1191d52c454aSMarc-André Lureau g_warning("Failed to init DRM device, using fallback path"); 1192d52c454aSMarc-André Lureau } 1193d52c454aSMarc-André Lureau } 1194d52c454aSMarc-André Lureau 1195d52c454aSMarc-André Lureau if ((!!opt_socket_path + (opt_fdnum != -1)) != 1) { 1196d52c454aSMarc-André Lureau g_printerr("Please specify either --fd or --socket-path\n"); 1197d52c454aSMarc-André Lureau exit(EXIT_FAILURE); 1198d52c454aSMarc-André Lureau } 1199d52c454aSMarc-André Lureau 1200d52c454aSMarc-André Lureau if (opt_socket_path) { 1201d52c454aSMarc-André Lureau int lsock = unix_listen(opt_socket_path, &error_fatal); 120224af03b9SMarc-André Lureau if (lsock < 0) { 120324af03b9SMarc-André Lureau g_printerr("Failed to listen on %s.\n", opt_socket_path); 120424af03b9SMarc-André Lureau exit(EXIT_FAILURE); 120524af03b9SMarc-André Lureau } 1206d52c454aSMarc-André Lureau fd = accept(lsock, NULL, NULL); 1207d52c454aSMarc-André Lureau close(lsock); 1208d52c454aSMarc-André Lureau } else { 1209d52c454aSMarc-André Lureau fd = opt_fdnum; 1210d52c454aSMarc-André Lureau } 1211d52c454aSMarc-André Lureau if (fd == -1) { 1212f55411cfSMarc-André Lureau g_printerr("Invalid vhost-user socket.\n"); 1213d52c454aSMarc-André Lureau exit(EXIT_FAILURE); 1214d52c454aSMarc-André Lureau } 1215d52c454aSMarc-André Lureau 12166f5fd837SStefan Hajnoczi if (!vug_init(&g.dev, VHOST_USER_GPU_MAX_QUEUES, fd, vg_panic, &vuiface)) { 12176f5fd837SStefan Hajnoczi g_printerr("Failed to initialize libvhost-user-glib.\n"); 12186f5fd837SStefan Hajnoczi exit(EXIT_FAILURE); 12196f5fd837SStefan Hajnoczi } 1220d52c454aSMarc-André Lureau 1221d52c454aSMarc-André Lureau loop = g_main_loop_new(NULL, FALSE); 1222d52c454aSMarc-André Lureau g_main_loop_run(loop); 1223d52c454aSMarc-André Lureau g_main_loop_unref(loop); 1224d52c454aSMarc-André Lureau 1225d52c454aSMarc-André Lureau vg_destroy(&g); 1226d52c454aSMarc-André Lureau if (g.drm_rnode_fd >= 0) { 1227d52c454aSMarc-André Lureau close(g.drm_rnode_fd); 1228d52c454aSMarc-André Lureau } 1229d52c454aSMarc-André Lureau 1230d52c454aSMarc-André Lureau return 0; 1231d52c454aSMarc-André Lureau } 1232