1 /*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
33
34 #include <drm/drm_edid.h>
35
36 #include "virtgpu_drv.h"
37 #include "virtgpu_trace.h"
38
39 #define MAX_INLINE_CMD_SIZE 96
40 #define MAX_INLINE_RESP_SIZE 24
41 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
42 + MAX_INLINE_CMD_SIZE \
43 + MAX_INLINE_RESP_SIZE)
44
convert_to_hw_box(struct virtio_gpu_box * dst,const struct drm_virtgpu_3d_box * src)45 static void convert_to_hw_box(struct virtio_gpu_box *dst,
46 const struct drm_virtgpu_3d_box *src)
47 {
48 dst->x = cpu_to_le32(src->x);
49 dst->y = cpu_to_le32(src->y);
50 dst->z = cpu_to_le32(src->z);
51 dst->w = cpu_to_le32(src->w);
52 dst->h = cpu_to_le32(src->h);
53 dst->d = cpu_to_le32(src->d);
54 }
55
virtio_gpu_ctrl_ack(struct virtqueue * vq)56 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
57 {
58 struct drm_device *dev = vq->vdev->priv;
59 struct virtio_gpu_device *vgdev = dev->dev_private;
60
61 schedule_work(&vgdev->ctrlq.dequeue_work);
62 }
63
virtio_gpu_cursor_ack(struct virtqueue * vq)64 void virtio_gpu_cursor_ack(struct virtqueue *vq)
65 {
66 struct drm_device *dev = vq->vdev->priv;
67 struct virtio_gpu_device *vgdev = dev->dev_private;
68
69 schedule_work(&vgdev->cursorq.dequeue_work);
70 }
71
virtio_gpu_alloc_vbufs(struct virtio_gpu_device * vgdev)72 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
73 {
74 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
75 VBUFFER_SIZE,
76 __alignof__(struct virtio_gpu_vbuffer),
77 0, NULL);
78 if (!vgdev->vbufs)
79 return -ENOMEM;
80 return 0;
81 }
82
virtio_gpu_free_vbufs(struct virtio_gpu_device * vgdev)83 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
84 {
85 kmem_cache_destroy(vgdev->vbufs);
86 vgdev->vbufs = NULL;
87 }
88
89 /* For drm_panic */
90 static struct virtio_gpu_vbuffer*
virtio_gpu_panic_get_vbuf(struct virtio_gpu_device * vgdev,int size)91 virtio_gpu_panic_get_vbuf(struct virtio_gpu_device *vgdev, int size)
92 {
93 struct virtio_gpu_vbuffer *vbuf;
94
95 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_ATOMIC);
96
97 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
98 vbuf->size = size;
99 vbuf->resp_cb = NULL;
100 vbuf->resp_size = sizeof(struct virtio_gpu_ctrl_hdr);
101 vbuf->resp_buf = (void *)vbuf->buf + size;
102 return vbuf;
103 }
104
105 static struct virtio_gpu_vbuffer*
virtio_gpu_get_vbuf(struct virtio_gpu_device * vgdev,int size,int resp_size,void * resp_buf,virtio_gpu_resp_cb resp_cb)106 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
107 int size, int resp_size, void *resp_buf,
108 virtio_gpu_resp_cb resp_cb)
109 {
110 struct virtio_gpu_vbuffer *vbuf;
111
112 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
113
114 BUG_ON(size > MAX_INLINE_CMD_SIZE ||
115 size < sizeof(struct virtio_gpu_ctrl_hdr));
116 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
117 vbuf->size = size;
118
119 vbuf->resp_cb = resp_cb;
120 vbuf->resp_size = resp_size;
121 if (resp_size <= MAX_INLINE_RESP_SIZE)
122 vbuf->resp_buf = (void *)vbuf->buf + size;
123 else
124 vbuf->resp_buf = resp_buf;
125 BUG_ON(!vbuf->resp_buf);
126 return vbuf;
127 }
128
129 static struct virtio_gpu_ctrl_hdr *
virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer * vbuf)130 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
131 {
132 /* this assumes a vbuf contains a command that starts with a
133 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
134 * virtqueues.
135 */
136 return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
137 }
138
139 static struct virtio_gpu_update_cursor*
virtio_gpu_alloc_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p)140 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
141 struct virtio_gpu_vbuffer **vbuffer_p)
142 {
143 struct virtio_gpu_vbuffer *vbuf;
144
145 vbuf = virtio_gpu_get_vbuf
146 (vgdev, sizeof(struct virtio_gpu_update_cursor),
147 0, NULL, NULL);
148 if (IS_ERR(vbuf)) {
149 *vbuffer_p = NULL;
150 return ERR_CAST(vbuf);
151 }
152 *vbuffer_p = vbuf;
153 return (struct virtio_gpu_update_cursor *)vbuf->buf;
154 }
155
156 /* For drm_panic */
virtio_gpu_panic_alloc_cmd_resp(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int cmd_size)157 static void *virtio_gpu_panic_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
158 struct virtio_gpu_vbuffer **vbuffer_p,
159 int cmd_size)
160 {
161 struct virtio_gpu_vbuffer *vbuf;
162
163 vbuf = virtio_gpu_panic_get_vbuf(vgdev, cmd_size);
164 *vbuffer_p = vbuf;
165 return (struct virtio_gpu_command *)vbuf->buf;
166 }
167
virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device * vgdev,virtio_gpu_resp_cb cb,struct virtio_gpu_vbuffer ** vbuffer_p,int cmd_size,int resp_size,void * resp_buf)168 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
169 virtio_gpu_resp_cb cb,
170 struct virtio_gpu_vbuffer **vbuffer_p,
171 int cmd_size, int resp_size,
172 void *resp_buf)
173 {
174 struct virtio_gpu_vbuffer *vbuf;
175
176 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
177 resp_size, resp_buf, cb);
178 *vbuffer_p = vbuf;
179 return (struct virtio_gpu_command *)vbuf->buf;
180 }
181
virtio_gpu_alloc_cmd(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int size)182 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
183 struct virtio_gpu_vbuffer **vbuffer_p,
184 int size)
185 {
186 return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
187 sizeof(struct virtio_gpu_ctrl_hdr),
188 NULL);
189 }
190
virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int size,virtio_gpu_resp_cb cb)191 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
192 struct virtio_gpu_vbuffer **vbuffer_p,
193 int size,
194 virtio_gpu_resp_cb cb)
195 {
196 return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
197 sizeof(struct virtio_gpu_ctrl_hdr),
198 NULL);
199 }
200
free_vbuf(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)201 static void free_vbuf(struct virtio_gpu_device *vgdev,
202 struct virtio_gpu_vbuffer *vbuf)
203 {
204 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
205 kfree(vbuf->resp_buf);
206 kvfree(vbuf->data_buf);
207 kmem_cache_free(vgdev->vbufs, vbuf);
208 }
209
reclaim_vbufs(struct virtqueue * vq,struct list_head * reclaim_list)210 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
211 {
212 struct virtio_gpu_vbuffer *vbuf;
213 unsigned int len;
214 int freed = 0;
215
216 while ((vbuf = virtqueue_get_buf(vq, &len))) {
217 list_add_tail(&vbuf->list, reclaim_list);
218 freed++;
219 }
220 if (freed == 0)
221 DRM_DEBUG("Huh? zero vbufs reclaimed");
222 }
223
virtio_gpu_dequeue_ctrl_func(struct work_struct * work)224 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
225 {
226 struct virtio_gpu_device *vgdev =
227 container_of(work, struct virtio_gpu_device,
228 ctrlq.dequeue_work);
229 struct list_head reclaim_list;
230 struct virtio_gpu_vbuffer *entry, *tmp;
231 struct virtio_gpu_ctrl_hdr *resp;
232 u64 fence_id;
233
234 INIT_LIST_HEAD(&reclaim_list);
235 spin_lock(&vgdev->ctrlq.qlock);
236 do {
237 virtqueue_disable_cb(vgdev->ctrlq.vq);
238 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
239
240 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
241 spin_unlock(&vgdev->ctrlq.qlock);
242
243 list_for_each_entry(entry, &reclaim_list, list) {
244 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
245
246 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp, entry->seqno);
247
248 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
249 if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
250 struct virtio_gpu_ctrl_hdr *cmd;
251 cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
252 DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
253 le32_to_cpu(resp->type),
254 le32_to_cpu(cmd->type));
255 } else
256 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
257 }
258 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
259 fence_id = le64_to_cpu(resp->fence_id);
260 virtio_gpu_fence_event_process(vgdev, fence_id);
261 }
262 if (entry->resp_cb)
263 entry->resp_cb(vgdev, entry);
264 }
265 wake_up(&vgdev->ctrlq.ack_queue);
266
267 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
268 if (entry->objs)
269 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
270 list_del(&entry->list);
271 free_vbuf(vgdev, entry);
272 }
273 }
274
virtio_gpu_dequeue_cursor_func(struct work_struct * work)275 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
276 {
277 struct virtio_gpu_device *vgdev =
278 container_of(work, struct virtio_gpu_device,
279 cursorq.dequeue_work);
280 struct list_head reclaim_list;
281 struct virtio_gpu_vbuffer *entry, *tmp;
282
283 INIT_LIST_HEAD(&reclaim_list);
284 spin_lock(&vgdev->cursorq.qlock);
285 do {
286 virtqueue_disable_cb(vgdev->cursorq.vq);
287 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
288 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
289 spin_unlock(&vgdev->cursorq.qlock);
290
291 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
292 struct virtio_gpu_ctrl_hdr *resp =
293 (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
294
295 trace_virtio_gpu_cmd_response(vgdev->cursorq.vq, resp, entry->seqno);
296 list_del(&entry->list);
297 free_vbuf(vgdev, entry);
298 }
299 wake_up(&vgdev->cursorq.ack_queue);
300 }
301
302 /* Create sg_table from a vmalloc'd buffer. */
vmalloc_to_sgt(char * data,uint32_t size,int * sg_ents)303 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
304 {
305 int ret, s, i;
306 struct sg_table *sgt;
307 struct scatterlist *sg;
308 struct page *pg;
309
310 if (WARN_ON(!PAGE_ALIGNED(data)))
311 return NULL;
312
313 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
314 if (!sgt)
315 return NULL;
316
317 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
318 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
319 if (ret) {
320 kfree(sgt);
321 return NULL;
322 }
323
324 for_each_sgtable_sg(sgt, sg, i) {
325 pg = vmalloc_to_page(data);
326 if (!pg) {
327 sg_free_table(sgt);
328 kfree(sgt);
329 return NULL;
330 }
331
332 s = min_t(int, PAGE_SIZE, size);
333 sg_set_page(sg, pg, s, 0);
334
335 size -= s;
336 data += s;
337 }
338
339 return sgt;
340 }
341
342 /* For drm_panic */
virtio_gpu_panic_queue_ctrl_sgs(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,int elemcnt,struct scatterlist ** sgs,int outcnt,int incnt)343 static int virtio_gpu_panic_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
344 struct virtio_gpu_vbuffer *vbuf,
345 int elemcnt,
346 struct scatterlist **sgs,
347 int outcnt,
348 int incnt)
349 {
350 struct virtqueue *vq = vgdev->ctrlq.vq;
351 int ret;
352
353 if (vgdev->has_indirect)
354 elemcnt = 1;
355
356 if (vq->num_free < elemcnt)
357 return -ENOMEM;
358
359 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
360 WARN_ON(ret);
361
362 vbuf->seqno = ++vgdev->ctrlq.seqno;
363 trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
364
365 atomic_inc(&vgdev->pending_commands);
366
367 return 0;
368 }
369
virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct virtio_gpu_fence * fence,int elemcnt,struct scatterlist ** sgs,int outcnt,int incnt)370 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
371 struct virtio_gpu_vbuffer *vbuf,
372 struct virtio_gpu_fence *fence,
373 int elemcnt,
374 struct scatterlist **sgs,
375 int outcnt,
376 int incnt)
377 {
378 struct virtqueue *vq = vgdev->ctrlq.vq;
379 int ret, idx;
380
381 if (!drm_dev_enter(vgdev->ddev, &idx)) {
382 if (fence && vbuf->objs)
383 virtio_gpu_array_unlock_resv(vbuf->objs);
384 free_vbuf(vgdev, vbuf);
385 return -ENODEV;
386 }
387
388 if (vgdev->has_indirect)
389 elemcnt = 1;
390
391 again:
392 spin_lock(&vgdev->ctrlq.qlock);
393
394 if (vq->num_free < elemcnt) {
395 spin_unlock(&vgdev->ctrlq.qlock);
396 virtio_gpu_notify(vgdev);
397 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
398 goto again;
399 }
400
401 /* now that the position of the vbuf in the virtqueue is known, we can
402 * finally set the fence id
403 */
404 if (fence) {
405 virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
406 fence);
407 if (vbuf->objs) {
408 virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
409 virtio_gpu_array_unlock_resv(vbuf->objs);
410 }
411 }
412
413 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
414 WARN_ON(ret);
415
416 vbuf->seqno = ++vgdev->ctrlq.seqno;
417 trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
418
419 atomic_inc(&vgdev->pending_commands);
420
421 spin_unlock(&vgdev->ctrlq.qlock);
422
423 drm_dev_exit(idx);
424 return 0;
425 }
426
427 /* For drm_panic */
virtio_gpu_panic_queue_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)428 static int virtio_gpu_panic_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
429 struct virtio_gpu_vbuffer *vbuf)
430 {
431 struct scatterlist *sgs[3], vcmd, vresp;
432 int elemcnt = 0, outcnt = 0, incnt = 0;
433
434 /* set up vcmd */
435 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
436 elemcnt++;
437 sgs[outcnt] = &vcmd;
438 outcnt++;
439
440 /* set up vresp */
441 if (vbuf->resp_size) {
442 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
443 elemcnt++;
444 sgs[outcnt + incnt] = &vresp;
445 incnt++;
446 }
447
448 return virtio_gpu_panic_queue_ctrl_sgs(vgdev, vbuf,
449 elemcnt, sgs,
450 outcnt, incnt);
451 }
452
virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct virtio_gpu_fence * fence)453 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
454 struct virtio_gpu_vbuffer *vbuf,
455 struct virtio_gpu_fence *fence)
456 {
457 struct scatterlist *sgs[3], vcmd, vout, vresp;
458 struct sg_table *sgt = NULL;
459 int elemcnt = 0, outcnt = 0, incnt = 0, ret;
460
461 /* set up vcmd */
462 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
463 elemcnt++;
464 sgs[outcnt] = &vcmd;
465 outcnt++;
466
467 /* set up vout */
468 if (vbuf->data_size) {
469 if (is_vmalloc_addr(vbuf->data_buf)) {
470 int sg_ents;
471 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
472 &sg_ents);
473 if (!sgt) {
474 if (fence && vbuf->objs)
475 virtio_gpu_array_unlock_resv(vbuf->objs);
476 return -ENOMEM;
477 }
478
479 elemcnt += sg_ents;
480 sgs[outcnt] = sgt->sgl;
481 } else {
482 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
483 elemcnt++;
484 sgs[outcnt] = &vout;
485 }
486 outcnt++;
487 }
488
489 /* set up vresp */
490 if (vbuf->resp_size) {
491 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
492 elemcnt++;
493 sgs[outcnt + incnt] = &vresp;
494 incnt++;
495 }
496
497 ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
498 incnt);
499
500 if (sgt) {
501 sg_free_table(sgt);
502 kfree(sgt);
503 }
504 return ret;
505 }
506
507 /* For drm_panic */
virtio_gpu_panic_notify(struct virtio_gpu_device * vgdev)508 void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev)
509 {
510 bool notify;
511
512 if (!atomic_read(&vgdev->pending_commands))
513 return;
514
515 atomic_set(&vgdev->pending_commands, 0);
516 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
517
518 if (notify)
519 virtqueue_notify(vgdev->ctrlq.vq);
520 }
521
virtio_gpu_notify(struct virtio_gpu_device * vgdev)522 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
523 {
524 bool notify;
525
526 if (!atomic_read(&vgdev->pending_commands))
527 return;
528
529 spin_lock(&vgdev->ctrlq.qlock);
530 atomic_set(&vgdev->pending_commands, 0);
531 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
532 spin_unlock(&vgdev->ctrlq.qlock);
533
534 if (notify)
535 virtqueue_notify(vgdev->ctrlq.vq);
536 }
537
virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)538 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
539 struct virtio_gpu_vbuffer *vbuf)
540 {
541 return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
542 }
543
virtio_gpu_queue_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)544 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
545 struct virtio_gpu_vbuffer *vbuf)
546 {
547 struct virtqueue *vq = vgdev->cursorq.vq;
548 struct scatterlist *sgs[1], ccmd;
549 int idx, ret, outcnt;
550 bool notify;
551
552 if (!drm_dev_enter(vgdev->ddev, &idx)) {
553 free_vbuf(vgdev, vbuf);
554 return;
555 }
556
557 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
558 sgs[0] = &ccmd;
559 outcnt = 1;
560
561 spin_lock(&vgdev->cursorq.qlock);
562 retry:
563 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
564 if (ret == -ENOSPC) {
565 spin_unlock(&vgdev->cursorq.qlock);
566 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
567 spin_lock(&vgdev->cursorq.qlock);
568 goto retry;
569 } else {
570 vbuf->seqno = ++vgdev->cursorq.seqno;
571 trace_virtio_gpu_cmd_queue(vq,
572 virtio_gpu_vbuf_ctrl_hdr(vbuf),
573 vbuf->seqno);
574
575 notify = virtqueue_kick_prepare(vq);
576 }
577
578 spin_unlock(&vgdev->cursorq.qlock);
579
580 if (notify)
581 virtqueue_notify(vq);
582
583 drm_dev_exit(idx);
584 }
585
586 /* just create gem objects for userspace and long lived objects,
587 * just use dma_alloced pages for the queue objects?
588 */
589
590 /* create a basic resource */
virtio_gpu_cmd_create_resource(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)591 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
592 struct virtio_gpu_object *bo,
593 struct virtio_gpu_object_params *params,
594 struct virtio_gpu_object_array *objs,
595 struct virtio_gpu_fence *fence)
596 {
597 struct virtio_gpu_resource_create_2d *cmd_p;
598 struct virtio_gpu_vbuffer *vbuf;
599
600 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
601 memset(cmd_p, 0, sizeof(*cmd_p));
602 vbuf->objs = objs;
603
604 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
605 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
606 cmd_p->format = cpu_to_le32(params->format);
607 cmd_p->width = cpu_to_le32(params->width);
608 cmd_p->height = cpu_to_le32(params->height);
609
610 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
611 bo->created = true;
612 }
613
virtio_gpu_cmd_unref_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)614 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
615 struct virtio_gpu_vbuffer *vbuf)
616 {
617 struct virtio_gpu_object *bo;
618
619 bo = vbuf->resp_cb_data;
620 vbuf->resp_cb_data = NULL;
621
622 virtio_gpu_cleanup_object(bo);
623 }
624
virtio_gpu_cmd_unref_resource(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)625 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
626 struct virtio_gpu_object *bo)
627 {
628 struct virtio_gpu_resource_unref *cmd_p;
629 struct virtio_gpu_vbuffer *vbuf;
630 int ret;
631
632 cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
633 virtio_gpu_cmd_unref_cb);
634 memset(cmd_p, 0, sizeof(*cmd_p));
635
636 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
637 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
638
639 vbuf->resp_cb_data = bo;
640 ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
641 if (ret < 0)
642 virtio_gpu_cleanup_object(bo);
643 }
644
virtio_gpu_cmd_set_scanout(struct virtio_gpu_device * vgdev,uint32_t scanout_id,uint32_t resource_id,uint32_t width,uint32_t height,uint32_t x,uint32_t y)645 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
646 uint32_t scanout_id, uint32_t resource_id,
647 uint32_t width, uint32_t height,
648 uint32_t x, uint32_t y)
649 {
650 struct virtio_gpu_set_scanout *cmd_p;
651 struct virtio_gpu_vbuffer *vbuf;
652
653 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
654 memset(cmd_p, 0, sizeof(*cmd_p));
655
656 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
657 cmd_p->resource_id = cpu_to_le32(resource_id);
658 cmd_p->scanout_id = cpu_to_le32(scanout_id);
659 cmd_p->r.width = cpu_to_le32(width);
660 cmd_p->r.height = cpu_to_le32(height);
661 cmd_p->r.x = cpu_to_le32(x);
662 cmd_p->r.y = cpu_to_le32(y);
663
664 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
665 }
666
667 /* For drm_panic */
virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device * vgdev,uint32_t resource_id,uint32_t x,uint32_t y,uint32_t width,uint32_t height)668 void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev,
669 uint32_t resource_id,
670 uint32_t x, uint32_t y,
671 uint32_t width, uint32_t height)
672 {
673 struct virtio_gpu_resource_flush *cmd_p;
674 struct virtio_gpu_vbuffer *vbuf;
675
676 cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p));
677 memset(cmd_p, 0, sizeof(*cmd_p));
678 vbuf->objs = NULL;
679
680 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
681 cmd_p->resource_id = cpu_to_le32(resource_id);
682 cmd_p->r.width = cpu_to_le32(width);
683 cmd_p->r.height = cpu_to_le32(height);
684 cmd_p->r.x = cpu_to_le32(x);
685 cmd_p->r.y = cpu_to_le32(y);
686
687 virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
688 }
689
virtio_gpu_cmd_resource_flush(struct virtio_gpu_device * vgdev,uint32_t resource_id,uint32_t x,uint32_t y,uint32_t width,uint32_t height,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)690 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
691 uint32_t resource_id,
692 uint32_t x, uint32_t y,
693 uint32_t width, uint32_t height,
694 struct virtio_gpu_object_array *objs,
695 struct virtio_gpu_fence *fence)
696 {
697 struct virtio_gpu_resource_flush *cmd_p;
698 struct virtio_gpu_vbuffer *vbuf;
699
700 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
701 memset(cmd_p, 0, sizeof(*cmd_p));
702 vbuf->objs = objs;
703
704 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
705 cmd_p->resource_id = cpu_to_le32(resource_id);
706 cmd_p->r.width = cpu_to_le32(width);
707 cmd_p->r.height = cpu_to_le32(height);
708 cmd_p->r.x = cpu_to_le32(x);
709 cmd_p->r.y = cpu_to_le32(y);
710
711 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
712 }
713
714 /* For drm_panic */
virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device * vgdev,uint64_t offset,uint32_t width,uint32_t height,uint32_t x,uint32_t y,struct virtio_gpu_object_array * objs)715 int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
716 uint64_t offset,
717 uint32_t width, uint32_t height,
718 uint32_t x, uint32_t y,
719 struct virtio_gpu_object_array *objs)
720 {
721 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
722 struct virtio_gpu_transfer_to_host_2d *cmd_p;
723 struct virtio_gpu_vbuffer *vbuf;
724 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
725
726 if (virtio_gpu_is_shmem(bo) && use_dma_api)
727 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
728 bo->base.sgt, DMA_TO_DEVICE);
729
730 cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p));
731 memset(cmd_p, 0, sizeof(*cmd_p));
732 vbuf->objs = objs;
733
734 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
735 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
736 cmd_p->offset = cpu_to_le64(offset);
737 cmd_p->r.width = cpu_to_le32(width);
738 cmd_p->r.height = cpu_to_le32(height);
739 cmd_p->r.x = cpu_to_le32(x);
740 cmd_p->r.y = cpu_to_le32(y);
741
742 return virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
743 }
744
virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device * vgdev,uint64_t offset,uint32_t width,uint32_t height,uint32_t x,uint32_t y,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)745 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
746 uint64_t offset,
747 uint32_t width, uint32_t height,
748 uint32_t x, uint32_t y,
749 struct virtio_gpu_object_array *objs,
750 struct virtio_gpu_fence *fence)
751 {
752 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
753 struct virtio_gpu_transfer_to_host_2d *cmd_p;
754 struct virtio_gpu_vbuffer *vbuf;
755 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
756
757 if (virtio_gpu_is_shmem(bo) && use_dma_api)
758 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
759 bo->base.sgt, DMA_TO_DEVICE);
760
761 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
762 memset(cmd_p, 0, sizeof(*cmd_p));
763 vbuf->objs = objs;
764
765 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
766 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
767 cmd_p->offset = cpu_to_le64(offset);
768 cmd_p->r.width = cpu_to_le32(width);
769 cmd_p->r.height = cpu_to_le32(height);
770 cmd_p->r.x = cpu_to_le32(x);
771 cmd_p->r.y = cpu_to_le32(y);
772
773 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
774 }
775
776 static void
virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device * vgdev,uint32_t resource_id,struct virtio_gpu_mem_entry * ents,uint32_t nents,struct virtio_gpu_fence * fence)777 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
778 uint32_t resource_id,
779 struct virtio_gpu_mem_entry *ents,
780 uint32_t nents,
781 struct virtio_gpu_fence *fence)
782 {
783 struct virtio_gpu_resource_attach_backing *cmd_p;
784 struct virtio_gpu_vbuffer *vbuf;
785
786 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
787 memset(cmd_p, 0, sizeof(*cmd_p));
788
789 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
790 cmd_p->resource_id = cpu_to_le32(resource_id);
791 cmd_p->nr_entries = cpu_to_le32(nents);
792
793 vbuf->data_buf = ents;
794 vbuf->data_size = sizeof(*ents) * nents;
795
796 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
797 }
798
799 static void
virtio_gpu_cmd_resource_detach_backing(struct virtio_gpu_device * vgdev,uint32_t resource_id,struct virtio_gpu_fence * fence)800 virtio_gpu_cmd_resource_detach_backing(struct virtio_gpu_device *vgdev,
801 uint32_t resource_id,
802 struct virtio_gpu_fence *fence)
803 {
804 struct virtio_gpu_resource_detach_backing *cmd_p;
805 struct virtio_gpu_vbuffer *vbuf;
806
807 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
808 memset(cmd_p, 0, sizeof(*cmd_p));
809
810 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
811 cmd_p->resource_id = cpu_to_le32(resource_id);
812
813 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
814 }
815
virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)816 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
817 struct virtio_gpu_vbuffer *vbuf)
818 {
819 struct virtio_gpu_resp_display_info *resp =
820 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
821 int i;
822
823 spin_lock(&vgdev->display_info_lock);
824 for (i = 0; i < vgdev->num_scanouts; i++) {
825 vgdev->outputs[i].info = resp->pmodes[i];
826 if (resp->pmodes[i].enabled) {
827 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
828 le32_to_cpu(resp->pmodes[i].r.width),
829 le32_to_cpu(resp->pmodes[i].r.height),
830 le32_to_cpu(resp->pmodes[i].r.x),
831 le32_to_cpu(resp->pmodes[i].r.y));
832 } else {
833 DRM_DEBUG("output %d: disabled", i);
834 }
835 }
836
837 vgdev->display_info_pending = false;
838 spin_unlock(&vgdev->display_info_lock);
839 wake_up(&vgdev->resp_wq);
840
841 if (!drm_helper_hpd_irq_event(vgdev->ddev))
842 drm_kms_helper_hotplug_event(vgdev->ddev);
843 }
844
virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)845 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
846 struct virtio_gpu_vbuffer *vbuf)
847 {
848 struct virtio_gpu_get_capset_info *cmd =
849 (struct virtio_gpu_get_capset_info *)vbuf->buf;
850 struct virtio_gpu_resp_capset_info *resp =
851 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
852 int i = le32_to_cpu(cmd->capset_index);
853
854 spin_lock(&vgdev->display_info_lock);
855 if (vgdev->capsets) {
856 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
857 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
858 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
859 } else {
860 DRM_ERROR("invalid capset memory.");
861 }
862 spin_unlock(&vgdev->display_info_lock);
863 wake_up(&vgdev->resp_wq);
864 }
865
virtio_gpu_cmd_capset_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)866 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
867 struct virtio_gpu_vbuffer *vbuf)
868 {
869 struct virtio_gpu_get_capset *cmd =
870 (struct virtio_gpu_get_capset *)vbuf->buf;
871 struct virtio_gpu_resp_capset *resp =
872 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
873 struct virtio_gpu_drv_cap_cache *cache_ent;
874
875 spin_lock(&vgdev->display_info_lock);
876 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
877 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
878 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
879 memcpy(cache_ent->caps_cache, resp->capset_data,
880 cache_ent->size);
881 /* Copy must occur before is_valid is signalled. */
882 smp_wmb();
883 atomic_set(&cache_ent->is_valid, 1);
884 break;
885 }
886 }
887 spin_unlock(&vgdev->display_info_lock);
888 wake_up_all(&vgdev->resp_wq);
889 }
890
virtio_get_edid_block(void * data,u8 * buf,unsigned int block,size_t len)891 static int virtio_get_edid_block(void *data, u8 *buf,
892 unsigned int block, size_t len)
893 {
894 struct virtio_gpu_resp_edid *resp = data;
895 size_t start = block * EDID_LENGTH;
896
897 if (start + len > le32_to_cpu(resp->size))
898 return -EINVAL;
899 memcpy(buf, resp->edid + start, len);
900 return 0;
901 }
902
virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)903 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
904 struct virtio_gpu_vbuffer *vbuf)
905 {
906 struct virtio_gpu_cmd_get_edid *cmd =
907 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
908 struct virtio_gpu_resp_edid *resp =
909 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
910 uint32_t scanout = le32_to_cpu(cmd->scanout);
911 struct virtio_gpu_output *output;
912 const struct drm_edid *new_edid, *old_edid;
913
914 if (scanout >= vgdev->num_scanouts)
915 return;
916 output = vgdev->outputs + scanout;
917
918 new_edid = drm_edid_read_custom(&output->conn, virtio_get_edid_block, resp);
919 drm_edid_connector_update(&output->conn, new_edid);
920
921 spin_lock(&vgdev->display_info_lock);
922 old_edid = output->drm_edid;
923 output->drm_edid = new_edid;
924 spin_unlock(&vgdev->display_info_lock);
925
926 drm_edid_free(old_edid);
927 wake_up(&vgdev->resp_wq);
928 }
929
virtio_gpu_cmd_get_display_info(struct virtio_gpu_device * vgdev)930 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
931 {
932 struct virtio_gpu_ctrl_hdr *cmd_p;
933 struct virtio_gpu_vbuffer *vbuf;
934 void *resp_buf;
935
936 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
937 GFP_KERNEL);
938 if (!resp_buf)
939 return -ENOMEM;
940
941 cmd_p = virtio_gpu_alloc_cmd_resp
942 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
943 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
944 resp_buf);
945 memset(cmd_p, 0, sizeof(*cmd_p));
946
947 vgdev->display_info_pending = true;
948 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
949 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
950 return 0;
951 }
952
virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device * vgdev,int idx)953 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
954 {
955 struct virtio_gpu_get_capset_info *cmd_p;
956 struct virtio_gpu_vbuffer *vbuf;
957 void *resp_buf;
958
959 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
960 GFP_KERNEL);
961 if (!resp_buf)
962 return -ENOMEM;
963
964 cmd_p = virtio_gpu_alloc_cmd_resp
965 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
966 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
967 resp_buf);
968 memset(cmd_p, 0, sizeof(*cmd_p));
969
970 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
971 cmd_p->capset_index = cpu_to_le32(idx);
972 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
973 return 0;
974 }
975
virtio_gpu_cmd_get_capset(struct virtio_gpu_device * vgdev,int idx,int version,struct virtio_gpu_drv_cap_cache ** cache_p)976 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
977 int idx, int version,
978 struct virtio_gpu_drv_cap_cache **cache_p)
979 {
980 struct virtio_gpu_get_capset *cmd_p;
981 struct virtio_gpu_vbuffer *vbuf;
982 int max_size;
983 struct virtio_gpu_drv_cap_cache *cache_ent;
984 struct virtio_gpu_drv_cap_cache *search_ent;
985 void *resp_buf;
986
987 *cache_p = NULL;
988
989 if (idx >= vgdev->num_capsets)
990 return -EINVAL;
991
992 if (version > vgdev->capsets[idx].max_version)
993 return -EINVAL;
994
995 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
996 if (!cache_ent)
997 return -ENOMEM;
998
999 max_size = vgdev->capsets[idx].max_size;
1000 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
1001 if (!cache_ent->caps_cache) {
1002 kfree(cache_ent);
1003 return -ENOMEM;
1004 }
1005
1006 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
1007 GFP_KERNEL);
1008 if (!resp_buf) {
1009 kfree(cache_ent->caps_cache);
1010 kfree(cache_ent);
1011 return -ENOMEM;
1012 }
1013
1014 cache_ent->version = version;
1015 cache_ent->id = vgdev->capsets[idx].id;
1016 atomic_set(&cache_ent->is_valid, 0);
1017 cache_ent->size = max_size;
1018 spin_lock(&vgdev->display_info_lock);
1019 /* Search while under lock in case it was added by another task. */
1020 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
1021 if (search_ent->id == vgdev->capsets[idx].id &&
1022 search_ent->version == version) {
1023 *cache_p = search_ent;
1024 break;
1025 }
1026 }
1027 if (!*cache_p)
1028 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
1029 spin_unlock(&vgdev->display_info_lock);
1030
1031 if (*cache_p) {
1032 /* Entry was found, so free everything that was just created. */
1033 kfree(resp_buf);
1034 kfree(cache_ent->caps_cache);
1035 kfree(cache_ent);
1036 return 0;
1037 }
1038
1039 cmd_p = virtio_gpu_alloc_cmd_resp
1040 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
1041 sizeof(struct virtio_gpu_resp_capset) + max_size,
1042 resp_buf);
1043 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
1044 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
1045 cmd_p->capset_version = cpu_to_le32(version);
1046 *cache_p = cache_ent;
1047 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1048
1049 return 0;
1050 }
1051
virtio_gpu_cmd_get_edids(struct virtio_gpu_device * vgdev)1052 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
1053 {
1054 struct virtio_gpu_cmd_get_edid *cmd_p;
1055 struct virtio_gpu_vbuffer *vbuf;
1056 void *resp_buf;
1057 int scanout;
1058
1059 if (WARN_ON(!vgdev->has_edid))
1060 return -EINVAL;
1061
1062 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
1063 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
1064 GFP_KERNEL);
1065 if (!resp_buf)
1066 return -ENOMEM;
1067
1068 cmd_p = virtio_gpu_alloc_cmd_resp
1069 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
1070 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
1071 resp_buf);
1072 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
1073 cmd_p->scanout = cpu_to_le32(scanout);
1074 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1075 }
1076
1077 return 0;
1078 }
1079
virtio_gpu_cmd_context_create(struct virtio_gpu_device * vgdev,uint32_t id,uint32_t context_init,uint32_t nlen,const char * name)1080 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
1081 uint32_t context_init, uint32_t nlen,
1082 const char *name)
1083 {
1084 struct virtio_gpu_ctx_create *cmd_p;
1085 struct virtio_gpu_vbuffer *vbuf;
1086
1087 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1088 memset(cmd_p, 0, sizeof(*cmd_p));
1089
1090 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
1091 cmd_p->hdr.ctx_id = cpu_to_le32(id);
1092 cmd_p->nlen = cpu_to_le32(nlen);
1093 cmd_p->context_init = cpu_to_le32(context_init);
1094 strscpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name));
1095 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1096 }
1097
virtio_gpu_cmd_context_destroy(struct virtio_gpu_device * vgdev,uint32_t id)1098 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
1099 uint32_t id)
1100 {
1101 struct virtio_gpu_ctx_destroy *cmd_p;
1102 struct virtio_gpu_vbuffer *vbuf;
1103
1104 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1105 memset(cmd_p, 0, sizeof(*cmd_p));
1106
1107 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
1108 cmd_p->hdr.ctx_id = cpu_to_le32(id);
1109 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1110 }
1111
virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,struct virtio_gpu_object_array * objs)1112 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
1113 uint32_t ctx_id,
1114 struct virtio_gpu_object_array *objs)
1115 {
1116 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1117 struct virtio_gpu_ctx_resource *cmd_p;
1118 struct virtio_gpu_vbuffer *vbuf;
1119
1120 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1121 memset(cmd_p, 0, sizeof(*cmd_p));
1122 vbuf->objs = objs;
1123
1124 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
1125 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1126 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1127 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1128 }
1129
virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,struct virtio_gpu_object_array * objs)1130 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
1131 uint32_t ctx_id,
1132 struct virtio_gpu_object_array *objs)
1133 {
1134 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1135 struct virtio_gpu_ctx_resource *cmd_p;
1136 struct virtio_gpu_vbuffer *vbuf;
1137
1138 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1139 memset(cmd_p, 0, sizeof(*cmd_p));
1140 vbuf->objs = objs;
1141
1142 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
1143 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1144 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1145 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1146 }
1147
1148 void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1149 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
1150 struct virtio_gpu_object *bo,
1151 struct virtio_gpu_object_params *params,
1152 struct virtio_gpu_object_array *objs,
1153 struct virtio_gpu_fence *fence)
1154 {
1155 struct virtio_gpu_resource_create_3d *cmd_p;
1156 struct virtio_gpu_vbuffer *vbuf;
1157
1158 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1159 memset(cmd_p, 0, sizeof(*cmd_p));
1160 vbuf->objs = objs;
1161
1162 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
1163 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1164 cmd_p->format = cpu_to_le32(params->format);
1165 cmd_p->width = cpu_to_le32(params->width);
1166 cmd_p->height = cpu_to_le32(params->height);
1167
1168 cmd_p->target = cpu_to_le32(params->target);
1169 cmd_p->bind = cpu_to_le32(params->bind);
1170 cmd_p->depth = cpu_to_le32(params->depth);
1171 cmd_p->array_size = cpu_to_le32(params->array_size);
1172 cmd_p->last_level = cpu_to_le32(params->last_level);
1173 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1174 cmd_p->flags = cpu_to_le32(params->flags);
1175
1176 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1177
1178 bo->created = true;
1179 }
1180
virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint64_t offset,uint32_t level,uint32_t stride,uint32_t layer_stride,struct drm_virtgpu_3d_box * box,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1181 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1182 uint32_t ctx_id,
1183 uint64_t offset, uint32_t level,
1184 uint32_t stride,
1185 uint32_t layer_stride,
1186 struct drm_virtgpu_3d_box *box,
1187 struct virtio_gpu_object_array *objs,
1188 struct virtio_gpu_fence *fence)
1189 {
1190 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1191 struct virtio_gpu_transfer_host_3d *cmd_p;
1192 struct virtio_gpu_vbuffer *vbuf;
1193 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1194
1195 if (virtio_gpu_is_shmem(bo) && use_dma_api)
1196 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1197 bo->base.sgt, DMA_TO_DEVICE);
1198
1199 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1200 memset(cmd_p, 0, sizeof(*cmd_p));
1201
1202 vbuf->objs = objs;
1203
1204 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1205 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1206 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1207 convert_to_hw_box(&cmd_p->box, box);
1208 cmd_p->offset = cpu_to_le64(offset);
1209 cmd_p->level = cpu_to_le32(level);
1210 cmd_p->stride = cpu_to_le32(stride);
1211 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1212
1213 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1214 }
1215
virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint64_t offset,uint32_t level,uint32_t stride,uint32_t layer_stride,struct drm_virtgpu_3d_box * box,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1216 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1217 uint32_t ctx_id,
1218 uint64_t offset, uint32_t level,
1219 uint32_t stride,
1220 uint32_t layer_stride,
1221 struct drm_virtgpu_3d_box *box,
1222 struct virtio_gpu_object_array *objs,
1223 struct virtio_gpu_fence *fence)
1224 {
1225 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1226 struct virtio_gpu_transfer_host_3d *cmd_p;
1227 struct virtio_gpu_vbuffer *vbuf;
1228
1229 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1230 memset(cmd_p, 0, sizeof(*cmd_p));
1231
1232 vbuf->objs = objs;
1233
1234 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1235 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1236 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1237 convert_to_hw_box(&cmd_p->box, box);
1238 cmd_p->offset = cpu_to_le64(offset);
1239 cmd_p->level = cpu_to_le32(level);
1240 cmd_p->stride = cpu_to_le32(stride);
1241 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1242
1243 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1244 }
1245
virtio_gpu_cmd_submit(struct virtio_gpu_device * vgdev,void * data,uint32_t data_size,uint32_t ctx_id,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1246 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1247 void *data, uint32_t data_size,
1248 uint32_t ctx_id,
1249 struct virtio_gpu_object_array *objs,
1250 struct virtio_gpu_fence *fence)
1251 {
1252 struct virtio_gpu_cmd_submit *cmd_p;
1253 struct virtio_gpu_vbuffer *vbuf;
1254
1255 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1256 memset(cmd_p, 0, sizeof(*cmd_p));
1257
1258 vbuf->data_buf = data;
1259 vbuf->data_size = data_size;
1260 vbuf->objs = objs;
1261
1262 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1263 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1264 cmd_p->size = cpu_to_le32(data_size);
1265
1266 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1267 }
1268
virtio_gpu_object_attach(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * obj,struct virtio_gpu_mem_entry * ents,unsigned int nents)1269 void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1270 struct virtio_gpu_object *obj,
1271 struct virtio_gpu_mem_entry *ents,
1272 unsigned int nents)
1273 {
1274 if (obj->attached)
1275 return;
1276
1277 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1278 ents, nents, NULL);
1279
1280 obj->attached = true;
1281 }
1282
virtio_gpu_object_detach(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * obj,struct virtio_gpu_fence * fence)1283 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1284 struct virtio_gpu_object *obj,
1285 struct virtio_gpu_fence *fence)
1286 {
1287 if (!obj->attached)
1288 return;
1289
1290 virtio_gpu_cmd_resource_detach_backing(vgdev, obj->hw_res_handle,
1291 fence);
1292
1293 obj->attached = false;
1294 }
1295
virtio_gpu_cursor_ping(struct virtio_gpu_device * vgdev,struct virtio_gpu_output * output)1296 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1297 struct virtio_gpu_output *output)
1298 {
1299 struct virtio_gpu_vbuffer *vbuf;
1300 struct virtio_gpu_update_cursor *cur_p;
1301
1302 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1303 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1304 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1305 virtio_gpu_queue_cursor(vgdev, vbuf);
1306 }
1307
virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)1308 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1309 struct virtio_gpu_vbuffer *vbuf)
1310 {
1311 struct virtio_gpu_object *obj =
1312 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1313 struct virtio_gpu_resp_resource_uuid *resp =
1314 (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1315 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1316
1317 spin_lock(&vgdev->resource_export_lock);
1318 WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1319
1320 if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1321 obj->uuid_state == STATE_INITIALIZING) {
1322 import_uuid(&obj->uuid, resp->uuid);
1323 obj->uuid_state = STATE_OK;
1324 } else {
1325 obj->uuid_state = STATE_ERR;
1326 }
1327 spin_unlock(&vgdev->resource_export_lock);
1328
1329 wake_up_all(&vgdev->resp_wq);
1330 }
1331
1332 int
virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_array * objs)1333 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1334 struct virtio_gpu_object_array *objs)
1335 {
1336 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1337 struct virtio_gpu_resource_assign_uuid *cmd_p;
1338 struct virtio_gpu_vbuffer *vbuf;
1339 struct virtio_gpu_resp_resource_uuid *resp_buf;
1340
1341 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1342 if (!resp_buf) {
1343 spin_lock(&vgdev->resource_export_lock);
1344 bo->uuid_state = STATE_ERR;
1345 spin_unlock(&vgdev->resource_export_lock);
1346 virtio_gpu_array_put_free(objs);
1347 return -ENOMEM;
1348 }
1349
1350 cmd_p = virtio_gpu_alloc_cmd_resp
1351 (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1352 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1353 memset(cmd_p, 0, sizeof(*cmd_p));
1354
1355 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1356 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1357
1358 vbuf->objs = objs;
1359 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1360 return 0;
1361 }
1362
virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)1363 static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
1364 struct virtio_gpu_vbuffer *vbuf)
1365 {
1366 struct virtio_gpu_object *bo =
1367 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1368 struct virtio_gpu_resp_map_info *resp =
1369 (struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
1370 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
1371 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1372
1373 spin_lock(&vgdev->host_visible_lock);
1374
1375 if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
1376 vram->map_info = resp->map_info;
1377 vram->map_state = STATE_OK;
1378 } else {
1379 vram->map_state = STATE_ERR;
1380 }
1381
1382 spin_unlock(&vgdev->host_visible_lock);
1383 wake_up_all(&vgdev->resp_wq);
1384 }
1385
virtio_gpu_cmd_map(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_array * objs,uint64_t offset)1386 int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
1387 struct virtio_gpu_object_array *objs, uint64_t offset)
1388 {
1389 struct virtio_gpu_resource_map_blob *cmd_p;
1390 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1391 struct virtio_gpu_vbuffer *vbuf;
1392 struct virtio_gpu_resp_map_info *resp_buf;
1393
1394 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1395 if (!resp_buf)
1396 return -ENOMEM;
1397
1398 cmd_p = virtio_gpu_alloc_cmd_resp
1399 (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
1400 sizeof(struct virtio_gpu_resp_map_info), resp_buf);
1401 memset(cmd_p, 0, sizeof(*cmd_p));
1402
1403 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
1404 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1405 cmd_p->offset = cpu_to_le64(offset);
1406 vbuf->objs = objs;
1407
1408 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1409 return 0;
1410 }
1411
virtio_gpu_cmd_unmap(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)1412 void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
1413 struct virtio_gpu_object *bo)
1414 {
1415 struct virtio_gpu_resource_unmap_blob *cmd_p;
1416 struct virtio_gpu_vbuffer *vbuf;
1417
1418 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1419 memset(cmd_p, 0, sizeof(*cmd_p));
1420
1421 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
1422 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1423
1424 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1425 }
1426
1427 void
virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_mem_entry * ents,uint32_t nents)1428 virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1429 struct virtio_gpu_object *bo,
1430 struct virtio_gpu_object_params *params,
1431 struct virtio_gpu_mem_entry *ents,
1432 uint32_t nents)
1433 {
1434 struct virtio_gpu_resource_create_blob *cmd_p;
1435 struct virtio_gpu_vbuffer *vbuf;
1436
1437 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1438 memset(cmd_p, 0, sizeof(*cmd_p));
1439
1440 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
1441 cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
1442 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1443 cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
1444 cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
1445 cmd_p->blob_id = cpu_to_le64(params->blob_id);
1446 cmd_p->size = cpu_to_le64(params->size);
1447 cmd_p->nr_entries = cpu_to_le32(nents);
1448
1449 vbuf->data_buf = ents;
1450 vbuf->data_size = sizeof(*ents) * nents;
1451
1452 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1453 bo->created = true;
1454
1455 if (nents)
1456 bo->attached = true;
1457 }
1458
virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device * vgdev,uint32_t scanout_id,struct virtio_gpu_object * bo,struct drm_framebuffer * fb,uint32_t width,uint32_t height,uint32_t x,uint32_t y)1459 void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1460 uint32_t scanout_id,
1461 struct virtio_gpu_object *bo,
1462 struct drm_framebuffer *fb,
1463 uint32_t width, uint32_t height,
1464 uint32_t x, uint32_t y)
1465 {
1466 uint32_t i;
1467 struct virtio_gpu_set_scanout_blob *cmd_p;
1468 struct virtio_gpu_vbuffer *vbuf;
1469 uint32_t format = virtio_gpu_translate_format(fb->format->format);
1470
1471 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1472 memset(cmd_p, 0, sizeof(*cmd_p));
1473
1474 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
1475 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1476 cmd_p->scanout_id = cpu_to_le32(scanout_id);
1477
1478 cmd_p->format = cpu_to_le32(format);
1479 cmd_p->width = cpu_to_le32(fb->width);
1480 cmd_p->height = cpu_to_le32(fb->height);
1481
1482 for (i = 0; i < 4; i++) {
1483 cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
1484 cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
1485 }
1486
1487 cmd_p->r.width = cpu_to_le32(width);
1488 cmd_p->r.height = cpu_to_le32(height);
1489 cmd_p->r.x = cpu_to_le32(x);
1490 cmd_p->r.y = cpu_to_le32(y);
1491
1492 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1493 }
1494