1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6 *
7 **************************************************************************/
8 #include "vmwgfx_cursor_plane.h"
9
10 #include "vmwgfx_bo.h"
11 #include "vmwgfx_drv.h"
12 #include "vmwgfx_kms.h"
13 #include "vmwgfx_resource_priv.h"
14 #include "vmw_surface_cache.h"
15
16 #include "drm/drm_atomic.h"
17 #include "drm/drm_atomic_helper.h"
18 #include "drm/drm_plane.h"
19 #include <asm/page.h>
20
21 #define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
22 #define VMW_CURSOR_SNOOP_WIDTH 64
23 #define VMW_CURSOR_SNOOP_HEIGHT 64
24
25 struct vmw_svga_fifo_cmd_define_cursor {
26 u32 cmd;
27 SVGAFifoCmdDefineAlphaCursor cursor;
28 };
29
30 /**
31 * vmw_send_define_cursor_cmd - queue a define cursor command
32 * @dev_priv: the private driver struct
33 * @image: buffer which holds the cursor image
34 * @width: width of the mouse cursor image
35 * @height: height of the mouse cursor image
36 * @hotspotX: the horizontal position of mouse hotspot
37 * @hotspotY: the vertical position of mouse hotspot
38 */
vmw_send_define_cursor_cmd(struct vmw_private * dev_priv,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)39 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
40 u32 *image, u32 width, u32 height,
41 u32 hotspotX, u32 hotspotY)
42 {
43 struct vmw_svga_fifo_cmd_define_cursor *cmd;
44 const u32 image_size = width * height * sizeof(*image);
45 const u32 cmd_size = sizeof(*cmd) + image_size;
46
47 /*
48 * Try to reserve fifocmd space and swallow any failures;
49 * such reservations cannot be left unconsumed for long
50 * under the risk of clogging other fifocmd users, so
51 * we treat reservations separtely from the way we treat
52 * other fallible KMS-atomic resources at prepare_fb
53 */
54 cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
55
56 if (unlikely(!cmd))
57 return;
58
59 memset(cmd, 0, sizeof(*cmd));
60
61 memcpy(&cmd[1], image, image_size);
62
63 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
64 cmd->cursor.id = 0;
65 cmd->cursor.width = width;
66 cmd->cursor.height = height;
67 cmd->cursor.hotspotX = hotspotX;
68 cmd->cursor.hotspotY = hotspotY;
69
70 vmw_cmd_commit_flush(dev_priv, cmd_size);
71 }
72
73 static void
vmw_cursor_plane_update_legacy(struct vmw_private * vmw,struct vmw_plane_state * vps)74 vmw_cursor_plane_update_legacy(struct vmw_private *vmw,
75 struct vmw_plane_state *vps)
76 {
77 struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
78 s32 hotspot_x = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
79 s32 hotspot_y = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
80
81 if (WARN_ON(!surface || !surface->snooper.image))
82 return;
83
84 if (vps->cursor.legacy.id != surface->snooper.id) {
85 vmw_send_define_cursor_cmd(vmw, surface->snooper.image,
86 vps->base.crtc_w, vps->base.crtc_h,
87 hotspot_x, hotspot_y);
88 vps->cursor.legacy.id = surface->snooper.id;
89 }
90 }
91
92 static enum vmw_cursor_update_type
vmw_cursor_update_type(struct vmw_private * vmw,struct vmw_plane_state * vps)93 vmw_cursor_update_type(struct vmw_private *vmw, struct vmw_plane_state *vps)
94 {
95 struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
96
97 if (surface && surface->snooper.image)
98 return VMW_CURSOR_UPDATE_LEGACY;
99
100 if (vmw->has_mob) {
101 if ((vmw->capabilities2 & SVGA_CAP2_CURSOR_MOB) != 0)
102 return VMW_CURSOR_UPDATE_MOB;
103 }
104
105 return VMW_CURSOR_UPDATE_NONE;
106 }
107
vmw_cursor_update_mob(struct vmw_private * vmw,struct vmw_plane_state * vps)108 static void vmw_cursor_update_mob(struct vmw_private *vmw,
109 struct vmw_plane_state *vps)
110 {
111 SVGAGBCursorHeader *header;
112 SVGAGBAlphaCursorHeader *alpha_header;
113 struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo);
114 u32 *image = vmw_bo_map_and_cache(bo);
115 const u32 image_size = vps->base.crtc_w * vps->base.crtc_h * sizeof(*image);
116
117 header = vmw_bo_map_and_cache(vps->cursor.mob);
118 alpha_header = &header->header.alphaHeader;
119
120 memset(header, 0, sizeof(*header));
121
122 header->type = SVGA_ALPHA_CURSOR;
123 header->sizeInBytes = image_size;
124
125 alpha_header->hotspotX = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
126 alpha_header->hotspotY = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
127 alpha_header->width = vps->base.crtc_w;
128 alpha_header->height = vps->base.crtc_h;
129
130 memcpy(header + 1, image, image_size);
131 vmw_write(vmw, SVGA_REG_CURSOR_MOBID, vmw_bo_mobid(vps->cursor.mob));
132
133 vmw_bo_unmap(bo);
134 vmw_bo_unmap(vps->cursor.mob);
135 }
136
vmw_cursor_mob_size(enum vmw_cursor_update_type update_type,u32 w,u32 h)137 static u32 vmw_cursor_mob_size(enum vmw_cursor_update_type update_type,
138 u32 w, u32 h)
139 {
140 switch (update_type) {
141 case VMW_CURSOR_UPDATE_LEGACY:
142 case VMW_CURSOR_UPDATE_NONE:
143 return 0;
144 case VMW_CURSOR_UPDATE_MOB:
145 return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
146 }
147 return 0;
148 }
149
vmw_cursor_mob_destroy(struct vmw_bo ** vbo)150 static void vmw_cursor_mob_destroy(struct vmw_bo **vbo)
151 {
152 if (!(*vbo))
153 return;
154
155 ttm_bo_unpin(&(*vbo)->tbo);
156 vmw_bo_unreference(vbo);
157 }
158
159 /**
160 * vmw_cursor_mob_unmap - Unmaps the cursor mobs.
161 *
162 * @vps: state of the cursor plane
163 *
164 * Returns 0 on success
165 */
166
167 static int
vmw_cursor_mob_unmap(struct vmw_plane_state * vps)168 vmw_cursor_mob_unmap(struct vmw_plane_state *vps)
169 {
170 int ret = 0;
171 struct vmw_bo *vbo = vps->cursor.mob;
172
173 if (!vbo || !vbo->map.virtual)
174 return 0;
175
176 ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
177 if (likely(ret == 0)) {
178 vmw_bo_unmap(vbo);
179 ttm_bo_unreserve(&vbo->tbo);
180 }
181
182 return ret;
183 }
184
vmw_cursor_mob_put(struct vmw_cursor_plane * vcp,struct vmw_plane_state * vps)185 static void vmw_cursor_mob_put(struct vmw_cursor_plane *vcp,
186 struct vmw_plane_state *vps)
187 {
188 u32 i;
189
190 if (!vps->cursor.mob)
191 return;
192
193 vmw_cursor_mob_unmap(vps);
194
195 /* Look for a free slot to return this mob to the cache. */
196 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
197 if (!vcp->cursor_mobs[i]) {
198 vcp->cursor_mobs[i] = vps->cursor.mob;
199 vps->cursor.mob = NULL;
200 return;
201 }
202 }
203
204 /* Cache is full: See if this mob is bigger than an existing mob. */
205 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
206 if (vcp->cursor_mobs[i]->tbo.base.size <
207 vps->cursor.mob->tbo.base.size) {
208 vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
209 vcp->cursor_mobs[i] = vps->cursor.mob;
210 vps->cursor.mob = NULL;
211 return;
212 }
213 }
214
215 /* Destroy it if it's not worth caching. */
216 vmw_cursor_mob_destroy(&vps->cursor.mob);
217 }
218
vmw_cursor_mob_get(struct vmw_cursor_plane * vcp,struct vmw_plane_state * vps)219 static int vmw_cursor_mob_get(struct vmw_cursor_plane *vcp,
220 struct vmw_plane_state *vps)
221 {
222 struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
223 u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
224 vps->base.crtc_w, vps->base.crtc_h);
225 u32 i;
226 u32 cursor_max_dim, mob_max_size;
227 struct vmw_fence_obj *fence = NULL;
228 int ret;
229
230 if (!dev_priv->has_mob ||
231 (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
232 return -EINVAL;
233
234 mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
235 cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
236
237 if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
238 vps->base.crtc_h > cursor_max_dim)
239 return -EINVAL;
240
241 if (vps->cursor.mob) {
242 if (vps->cursor.mob->tbo.base.size >= size)
243 return 0;
244 vmw_cursor_mob_put(vcp, vps);
245 }
246
247 /* Look for an unused mob in the cache. */
248 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
249 if (vcp->cursor_mobs[i] &&
250 vcp->cursor_mobs[i]->tbo.base.size >= size) {
251 vps->cursor.mob = vcp->cursor_mobs[i];
252 vcp->cursor_mobs[i] = NULL;
253 return 0;
254 }
255 }
256 /* Create a new mob if we can't find an existing one. */
257 ret = vmw_bo_create_and_populate(dev_priv, size, VMW_BO_DOMAIN_MOB,
258 &vps->cursor.mob);
259
260 if (ret != 0)
261 return ret;
262
263 /* Fence the mob creation so we are guarateed to have the mob */
264 ret = ttm_bo_reserve(&vps->cursor.mob->tbo, false, false, NULL);
265 if (ret != 0)
266 goto teardown;
267
268 ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
269 if (ret != 0) {
270 ttm_bo_unreserve(&vps->cursor.mob->tbo);
271 goto teardown;
272 }
273
274 dma_fence_wait(&fence->base, false);
275 dma_fence_put(&fence->base);
276
277 ttm_bo_unreserve(&vps->cursor.mob->tbo);
278
279 return 0;
280
281 teardown:
282 vmw_cursor_mob_destroy(&vps->cursor.mob);
283 return ret;
284 }
285
vmw_cursor_update_position(struct vmw_private * dev_priv,bool show,int x,int y)286 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
287 bool show, int x, int y)
288 {
289 const u32 svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
290 : SVGA_CURSOR_ON_HIDE;
291 u32 count;
292
293 spin_lock(&dev_priv->cursor_lock);
294 if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
295 vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
296 vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
297 vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
298 vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
299 vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
300 } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
301 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
302 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
303 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
304 count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
305 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
306 } else {
307 vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
308 vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
309 vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
310 }
311 spin_unlock(&dev_priv->cursor_lock);
312 }
313
vmw_kms_cursor_snoop(struct vmw_surface * srf,struct ttm_object_file * tfile,struct ttm_buffer_object * bo,SVGA3dCmdHeader * header)314 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
315 struct ttm_object_file *tfile,
316 struct ttm_buffer_object *bo,
317 SVGA3dCmdHeader *header)
318 {
319 struct ttm_bo_kmap_obj map;
320 unsigned long kmap_offset;
321 unsigned long kmap_num;
322 SVGA3dCopyBox *box;
323 u32 box_count;
324 void *virtual;
325 bool is_iomem;
326 struct vmw_dma_cmd {
327 SVGA3dCmdHeader header;
328 SVGA3dCmdSurfaceDMA dma;
329 } *cmd;
330 int i, ret;
331 const struct SVGA3dSurfaceDesc *desc =
332 vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
333 const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
334
335 cmd = container_of(header, struct vmw_dma_cmd, header);
336
337 /* No snooper installed, nothing to copy */
338 if (!srf->snooper.image)
339 return;
340
341 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
342 DRM_ERROR("face and mipmap for cursors should never != 0\n");
343 return;
344 }
345
346 if (cmd->header.size < 64) {
347 DRM_ERROR("at least one full copy box must be given\n");
348 return;
349 }
350
351 box = (SVGA3dCopyBox *)&cmd[1];
352 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
353 sizeof(SVGA3dCopyBox);
354
355 if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
356 box->x != 0 || box->y != 0 || box->z != 0 ||
357 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
358 box->d != 1 || box_count != 1 ||
359 box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
360 /* TODO handle none page aligned offsets */
361 /* TODO handle more dst & src != 0 */
362 /* TODO handle more then one copy */
363 DRM_ERROR("Can't snoop dma request for cursor!\n");
364 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
365 box->srcx, box->srcy, box->srcz,
366 box->x, box->y, box->z,
367 box->w, box->h, box->d, box_count,
368 cmd->dma.guest.ptr.offset);
369 return;
370 }
371
372 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
373 kmap_num = (VMW_CURSOR_SNOOP_HEIGHT * image_pitch) >> PAGE_SHIFT;
374
375 ret = ttm_bo_reserve(bo, true, false, NULL);
376 if (unlikely(ret != 0)) {
377 DRM_ERROR("reserve failed\n");
378 return;
379 }
380
381 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
382 if (unlikely(ret != 0))
383 goto err_unreserve;
384
385 virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
386
387 if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
388 memcpy(srf->snooper.image, virtual,
389 VMW_CURSOR_SNOOP_HEIGHT * image_pitch);
390 } else {
391 /* Image is unsigned pointer. */
392 for (i = 0; i < box->h; i++)
393 memcpy(srf->snooper.image + i * image_pitch,
394 virtual + i * cmd->dma.guest.pitch,
395 box->w * desc->pitchBytesPerBlock);
396 }
397 srf->snooper.id++;
398
399 ttm_bo_kunmap(&map);
400 err_unreserve:
401 ttm_bo_unreserve(bo);
402 }
403
vmw_cursor_plane_destroy(struct drm_plane * plane)404 void vmw_cursor_plane_destroy(struct drm_plane *plane)
405 {
406 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
407 u32 i;
408
409 vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
410
411 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
412 vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
413
414 drm_plane_cleanup(plane);
415 }
416
417 /**
418 * vmw_cursor_mob_map - Maps the cursor mobs.
419 *
420 * @vps: plane_state
421 *
422 * Returns 0 on success
423 */
424
425 static int
vmw_cursor_mob_map(struct vmw_plane_state * vps)426 vmw_cursor_mob_map(struct vmw_plane_state *vps)
427 {
428 int ret;
429 u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
430 vps->base.crtc_w, vps->base.crtc_h);
431 struct vmw_bo *vbo = vps->cursor.mob;
432
433 if (!vbo)
434 return -EINVAL;
435
436 if (vbo->tbo.base.size < size)
437 return -EINVAL;
438
439 if (vbo->map.virtual)
440 return 0;
441
442 ret = ttm_bo_reserve(&vbo->tbo, false, false, NULL);
443 if (unlikely(ret != 0))
444 return -ENOMEM;
445
446 vmw_bo_map_and_cache(vbo);
447
448 ttm_bo_unreserve(&vbo->tbo);
449
450 return 0;
451 }
452
453 /**
454 * vmw_cursor_plane_cleanup_fb - Unpins the plane surface
455 *
456 * @plane: cursor plane
457 * @old_state: contains the state to clean up
458 *
459 * Unmaps all cursor bo mappings and unpins the cursor surface
460 *
461 * Returns 0 on success
462 */
463 void
vmw_cursor_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)464 vmw_cursor_plane_cleanup_fb(struct drm_plane *plane,
465 struct drm_plane_state *old_state)
466 {
467 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
468 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
469
470 if (!vmw_user_object_is_null(&vps->uo))
471 vmw_user_object_unmap(&vps->uo);
472
473 vmw_cursor_mob_unmap(vps);
474 vmw_cursor_mob_put(vcp, vps);
475
476 vmw_du_plane_unpin_surf(vps);
477 vmw_user_object_unref(&vps->uo);
478 }
479
480 static bool
vmw_cursor_buffer_changed(struct vmw_plane_state * new_vps,struct vmw_plane_state * old_vps)481 vmw_cursor_buffer_changed(struct vmw_plane_state *new_vps,
482 struct vmw_plane_state *old_vps)
483 {
484 struct vmw_bo *new_bo = vmw_user_object_buffer(&new_vps->uo);
485 struct vmw_bo *old_bo = vmw_user_object_buffer(&old_vps->uo);
486 struct vmw_surface *surf;
487 bool dirty = false;
488 int ret;
489
490 if (new_bo != old_bo)
491 return true;
492
493 if (new_bo) {
494 if (!old_bo) {
495 return true;
496 } else if (new_bo->dirty) {
497 vmw_bo_dirty_scan(new_bo);
498 dirty = vmw_bo_is_dirty(new_bo);
499 if (dirty) {
500 surf = vmw_user_object_surface(&new_vps->uo);
501 if (surf)
502 vmw_bo_dirty_transfer_to_res(&surf->res);
503 else
504 vmw_bo_dirty_clear(new_bo);
505 }
506 return dirty;
507 } else if (new_bo != old_bo) {
508 /*
509 * Currently unused because the top exits right away.
510 * In most cases buffer being different will mean
511 * that the contents is different. For the few percent
512 * of cases where that's not true the cost of doing
513 * the memcmp on all other seems to outweight the
514 * benefits. Leave the conditional to be able to
515 * trivially validate it by removing the initial
516 * if (new_bo != old_bo) at the start.
517 */
518 void *old_image;
519 void *new_image;
520 bool changed = false;
521 struct ww_acquire_ctx ctx;
522 const u32 size = new_vps->base.crtc_w *
523 new_vps->base.crtc_h * sizeof(u32);
524
525 ww_acquire_init(&ctx, &reservation_ww_class);
526
527 ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
528 if (ret != 0) {
529 ww_acquire_fini(&ctx);
530 return true;
531 }
532
533 ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
534 if (ret != 0) {
535 ttm_bo_unreserve(&old_bo->tbo);
536 ww_acquire_fini(&ctx);
537 return true;
538 }
539
540 old_image = vmw_bo_map_and_cache(old_bo);
541 new_image = vmw_bo_map_and_cache(new_bo);
542
543 if (old_image && new_image && old_image != new_image)
544 changed = memcmp(old_image, new_image, size) !=
545 0;
546
547 ttm_bo_unreserve(&new_bo->tbo);
548 ttm_bo_unreserve(&old_bo->tbo);
549
550 ww_acquire_fini(&ctx);
551
552 return changed;
553 }
554 return false;
555 }
556
557 return false;
558 }
559
560 static bool
vmw_cursor_plane_changed(struct vmw_plane_state * new_vps,struct vmw_plane_state * old_vps)561 vmw_cursor_plane_changed(struct vmw_plane_state *new_vps,
562 struct vmw_plane_state *old_vps)
563 {
564 if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
565 old_vps->base.crtc_h != new_vps->base.crtc_h)
566 return true;
567
568 if (old_vps->base.hotspot_x != new_vps->base.hotspot_x ||
569 old_vps->base.hotspot_y != new_vps->base.hotspot_y)
570 return true;
571
572 if (old_vps->cursor.legacy.hotspot_x !=
573 new_vps->cursor.legacy.hotspot_x ||
574 old_vps->cursor.legacy.hotspot_y !=
575 new_vps->cursor.legacy.hotspot_y)
576 return true;
577
578 if (old_vps->base.fb != new_vps->base.fb)
579 return true;
580
581 return false;
582 }
583
584 /**
585 * vmw_cursor_plane_prepare_fb - Readies the cursor by referencing it
586 *
587 * @plane: display plane
588 * @new_state: info on the new plane state, including the FB
589 *
590 * Returns 0 on success
591 */
vmw_cursor_plane_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)592 int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
593 struct drm_plane_state *new_state)
594 {
595 struct drm_framebuffer *fb = new_state->fb;
596 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
597 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
598 struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(plane->state);
599 struct vmw_private *vmw = vmw_priv(plane->dev);
600 struct vmw_bo *bo = NULL;
601 struct vmw_surface *surface;
602 int ret = 0;
603
604 if (!vmw_user_object_is_null(&vps->uo)) {
605 vmw_user_object_unmap(&vps->uo);
606 vmw_user_object_unref(&vps->uo);
607 }
608
609 if (fb) {
610 if (vmw_framebuffer_to_vfb(fb)->bo) {
611 vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
612 vps->uo.surface = NULL;
613 } else {
614 memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
615 }
616 vmw_user_object_ref(&vps->uo);
617 }
618
619 vps->cursor.update_type = vmw_cursor_update_type(vmw, vps);
620 switch (vps->cursor.update_type) {
621 case VMW_CURSOR_UPDATE_LEGACY:
622 surface = vmw_user_object_surface(&vps->uo);
623 if (!surface || vps->cursor.legacy.id == surface->snooper.id)
624 vps->cursor.update_type = VMW_CURSOR_UPDATE_NONE;
625 break;
626 case VMW_CURSOR_UPDATE_MOB: {
627 bo = vmw_user_object_buffer(&vps->uo);
628 if (bo) {
629 struct ttm_operation_ctx ctx = { false, false };
630
631 ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
632 if (ret != 0)
633 return -ENOMEM;
634
635 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
636 if (ret != 0)
637 return -ENOMEM;
638
639 /*
640 * vmw_bo_pin_reserved also validates, so to skip
641 * the extra validation use ttm_bo_pin directly
642 */
643 if (!bo->tbo.pin_count)
644 ttm_bo_pin(&bo->tbo);
645
646 if (vmw_framebuffer_to_vfb(fb)->bo) {
647 const u32 size = new_state->crtc_w *
648 new_state->crtc_h *
649 sizeof(u32);
650
651 (void)vmw_bo_map_and_cache_size(bo, size);
652 } else {
653 vmw_bo_map_and_cache(bo);
654 }
655 ttm_bo_unreserve(&bo->tbo);
656 }
657 if (!vmw_user_object_is_null(&vps->uo)) {
658 if (!vmw_cursor_plane_changed(vps, old_vps) &&
659 !vmw_cursor_buffer_changed(vps, old_vps)) {
660 vps->cursor.update_type =
661 VMW_CURSOR_UPDATE_NONE;
662 } else {
663 vmw_cursor_mob_get(vcp, vps);
664 vmw_cursor_mob_map(vps);
665 }
666 }
667 }
668 break;
669 case VMW_CURSOR_UPDATE_NONE:
670 /* do nothing */
671 break;
672 }
673
674 return 0;
675 }
676
677 /**
678 * vmw_cursor_plane_atomic_check - check if the new state is okay
679 *
680 * @plane: cursor plane
681 * @state: info on the new plane state
682 *
683 * This is a chance to fail if the new cursor state does not fit
684 * our requirements.
685 *
686 * Returns 0 on success
687 */
vmw_cursor_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)688 int vmw_cursor_plane_atomic_check(struct drm_plane *plane,
689 struct drm_atomic_state *state)
690 {
691 struct drm_plane_state *new_state =
692 drm_atomic_get_new_plane_state(state, plane);
693 struct vmw_private *vmw = vmw_priv(plane->dev);
694 int ret = 0;
695 struct drm_crtc_state *crtc_state = NULL;
696 struct vmw_surface *surface = NULL;
697 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
698 enum vmw_cursor_update_type update_type;
699 struct drm_framebuffer *fb = new_state->fb;
700
701 if (new_state->crtc)
702 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
703 new_state->crtc);
704
705 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
706 DRM_PLANE_NO_SCALING,
707 DRM_PLANE_NO_SCALING, true,
708 true);
709 if (ret)
710 return ret;
711
712 /* Turning off */
713 if (!fb)
714 return 0;
715
716 update_type = vmw_cursor_update_type(vmw, vps);
717 if (update_type == VMW_CURSOR_UPDATE_LEGACY) {
718 if (new_state->crtc_w != VMW_CURSOR_SNOOP_WIDTH ||
719 new_state->crtc_h != VMW_CURSOR_SNOOP_HEIGHT) {
720 drm_warn(&vmw->drm,
721 "Invalid cursor dimensions (%d, %d)\n",
722 new_state->crtc_w, new_state->crtc_h);
723 return -EINVAL;
724 }
725 surface = vmw_user_object_surface(&vps->uo);
726 if (!surface || !surface->snooper.image) {
727 drm_warn(&vmw->drm,
728 "surface not suitable for cursor\n");
729 return -EINVAL;
730 }
731 }
732
733 return 0;
734 }
735
736 void
vmw_cursor_plane_atomic_update(struct drm_plane * plane,struct drm_atomic_state * state)737 vmw_cursor_plane_atomic_update(struct drm_plane *plane,
738 struct drm_atomic_state *state)
739 {
740 struct drm_plane_state *new_state =
741 drm_atomic_get_new_plane_state(state, plane);
742 struct drm_plane_state *old_state =
743 drm_atomic_get_old_plane_state(state, plane);
744 struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
745 struct vmw_private *dev_priv = vmw_priv(plane->dev);
746 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
747 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
748 s32 hotspot_x, hotspot_y, cursor_x, cursor_y;
749
750 /*
751 * Hide the cursor if the new bo is null
752 */
753 if (vmw_user_object_is_null(&vps->uo)) {
754 vmw_cursor_update_position(dev_priv, false, 0, 0);
755 return;
756 }
757
758 switch (vps->cursor.update_type) {
759 case VMW_CURSOR_UPDATE_LEGACY:
760 vmw_cursor_plane_update_legacy(dev_priv, vps);
761 break;
762 case VMW_CURSOR_UPDATE_MOB:
763 vmw_cursor_update_mob(dev_priv, vps);
764 break;
765 case VMW_CURSOR_UPDATE_NONE:
766 /* do nothing */
767 break;
768 }
769
770 /*
771 * For all update types update the cursor position
772 */
773 cursor_x = new_state->crtc_x + du->set_gui_x;
774 cursor_y = new_state->crtc_y + du->set_gui_y;
775
776 hotspot_x = vps->cursor.legacy.hotspot_x + new_state->hotspot_x;
777 hotspot_y = vps->cursor.legacy.hotspot_y + new_state->hotspot_y;
778
779 vmw_cursor_update_position(dev_priv, true, cursor_x + hotspot_x,
780 cursor_y + hotspot_y);
781 }
782
vmw_kms_cursor_bypass_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)783 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
784 struct drm_file *file_priv)
785 {
786 struct drm_vmw_cursor_bypass_arg *arg = data;
787 struct vmw_display_unit *du;
788 struct vmw_plane_state *vps;
789 struct drm_crtc *crtc;
790 int ret = 0;
791
792 mutex_lock(&dev->mode_config.mutex);
793 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
794 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
795 du = vmw_crtc_to_du(crtc);
796 vps = vmw_plane_state_to_vps(du->cursor.base.state);
797 vps->cursor.legacy.hotspot_x = arg->xhot;
798 vps->cursor.legacy.hotspot_y = arg->yhot;
799 }
800
801 mutex_unlock(&dev->mode_config.mutex);
802 return 0;
803 }
804
805 crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
806 if (!crtc) {
807 ret = -ENOENT;
808 goto out;
809 }
810
811 du = vmw_crtc_to_du(crtc);
812 vps = vmw_plane_state_to_vps(du->cursor.base.state);
813 vps->cursor.legacy.hotspot_x = arg->xhot;
814 vps->cursor.legacy.hotspot_y = arg->yhot;
815
816 out:
817 mutex_unlock(&dev->mode_config.mutex);
818
819 return ret;
820 }
821
vmw_cursor_snooper_create(struct drm_file * file_priv,struct vmw_surface_metadata * metadata)822 void *vmw_cursor_snooper_create(struct drm_file *file_priv,
823 struct vmw_surface_metadata *metadata)
824 {
825 if (!file_priv->atomic && metadata->scanout &&
826 metadata->num_sizes == 1 &&
827 metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
828 metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
829 metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
830 const struct SVGA3dSurfaceDesc *desc =
831 vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
832 const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
833 VMW_CURSOR_SNOOP_HEIGHT *
834 desc->pitchBytesPerBlock;
835 void *image = kzalloc(cursor_size_bytes, GFP_KERNEL);
836
837 if (!image) {
838 DRM_ERROR("Failed to allocate cursor_image\n");
839 return ERR_PTR(-ENOMEM);
840 }
841 return image;
842 }
843 return NULL;
844 }
845