1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6 *
7 **************************************************************************/
8
9 #include "vmwgfx_kms.h"
10
11 #include "vmwgfx_bo.h"
12 #include "vmwgfx_resource_priv.h"
13 #include "vmwgfx_vkms.h"
14 #include "vmw_surface_cache.h"
15
16 #include <drm/drm_atomic.h>
17 #include <drm/drm_atomic_helper.h>
18 #include <drm/drm_damage_helper.h>
19 #include <drm/drm_fourcc.h>
20 #include <drm/drm_rect.h>
21 #include <drm/drm_sysfs.h>
22 #include <drm/drm_edid.h>
23
vmw_du_init(struct vmw_display_unit * du)24 void vmw_du_init(struct vmw_display_unit *du)
25 {
26 vmw_vkms_crtc_init(&du->crtc);
27 }
28
vmw_du_cleanup(struct vmw_display_unit * du)29 void vmw_du_cleanup(struct vmw_display_unit *du)
30 {
31 struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
32
33 vmw_vkms_crtc_cleanup(&du->crtc);
34 drm_plane_cleanup(&du->primary);
35 if (vmw_cmd_supported(dev_priv))
36 drm_plane_cleanup(&du->cursor.base);
37
38 drm_connector_unregister(&du->connector);
39 drm_crtc_cleanup(&du->crtc);
40 drm_encoder_cleanup(&du->encoder);
41 drm_connector_cleanup(&du->connector);
42 }
43
44
vmw_du_primary_plane_destroy(struct drm_plane * plane)45 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
46 {
47 drm_plane_cleanup(plane);
48
49 /* Planes are static in our case so we don't free it */
50 }
51
52
53 /**
54 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
55 *
56 * @vps: plane state associated with the display surface
57 */
vmw_du_plane_unpin_surf(struct vmw_plane_state * vps)58 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps)
59 {
60 struct vmw_surface *surf = vmw_user_object_surface(&vps->uo);
61
62 if (surf) {
63 if (vps->pinned) {
64 vmw_resource_unpin(&surf->res);
65 vps->pinned--;
66 }
67 }
68 }
69
70
71 /**
72 * vmw_du_plane_cleanup_fb - Unpins the plane surface
73 *
74 * @plane: display plane
75 * @old_state: Contains the FB to clean up
76 *
77 * Unpins the framebuffer surface
78 *
79 * Returns 0 on success
80 */
81 void
vmw_du_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)82 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
83 struct drm_plane_state *old_state)
84 {
85 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
86
87 vmw_du_plane_unpin_surf(vps);
88 }
89
90
91 /**
92 * vmw_du_primary_plane_atomic_check - check if the new state is okay
93 *
94 * @plane: display plane
95 * @state: info on the new plane state, including the FB
96 *
97 * Check if the new state is settable given the current state. Other
98 * than what the atomic helper checks, we care about crtc fitting
99 * the FB and maintaining one active framebuffer.
100 *
101 * Returns 0 on success
102 */
vmw_du_primary_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)103 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
104 struct drm_atomic_state *state)
105 {
106 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
107 plane);
108 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
109 plane);
110 struct drm_crtc_state *crtc_state = NULL;
111 struct drm_framebuffer *new_fb = new_state->fb;
112 struct drm_framebuffer *old_fb = old_state->fb;
113 int ret;
114
115 /*
116 * Ignore damage clips if the framebuffer attached to the plane's state
117 * has changed since the last plane update (page-flip). In this case, a
118 * full plane update should happen because uploads are done per-buffer.
119 */
120 if (old_fb != new_fb)
121 new_state->ignore_damage_clips = true;
122
123 if (new_state->crtc)
124 crtc_state = drm_atomic_get_new_crtc_state(state,
125 new_state->crtc);
126
127 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
128 DRM_PLANE_NO_SCALING,
129 DRM_PLANE_NO_SCALING,
130 false, true);
131 return ret;
132 }
133
vmw_du_crtc_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)134 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
135 struct drm_atomic_state *state)
136 {
137 struct vmw_private *vmw = vmw_priv(crtc->dev);
138 struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
139 crtc);
140 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
141 int connector_mask = drm_connector_mask(&du->connector);
142 bool has_primary = new_state->plane_mask &
143 drm_plane_mask(crtc->primary);
144
145 /*
146 * This is fine in general, but broken userspace might expect
147 * some actual rendering so give a clue as why it's blank.
148 */
149 if (new_state->enable && !has_primary)
150 drm_dbg_driver(&vmw->drm,
151 "CRTC without a primary plane will be blank.\n");
152
153
154 if (new_state->connector_mask != connector_mask &&
155 new_state->connector_mask != 0) {
156 DRM_ERROR("Invalid connectors configuration\n");
157 return -EINVAL;
158 }
159
160 /*
161 * Our virtual device does not have a dot clock, so use the logical
162 * clock value as the dot clock.
163 */
164 if (new_state->mode.crtc_clock == 0)
165 new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
166
167 return 0;
168 }
169
170
vmw_du_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)171 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
172 struct drm_atomic_state *state)
173 {
174 vmw_vkms_crtc_atomic_begin(crtc, state);
175 }
176
177 /**
178 * vmw_du_crtc_duplicate_state - duplicate crtc state
179 * @crtc: DRM crtc
180 *
181 * Allocates and returns a copy of the crtc state (both common and
182 * vmw-specific) for the specified crtc.
183 *
184 * Returns: The newly allocated crtc state, or NULL on failure.
185 */
186 struct drm_crtc_state *
vmw_du_crtc_duplicate_state(struct drm_crtc * crtc)187 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
188 {
189 struct drm_crtc_state *state;
190 struct vmw_crtc_state *vcs;
191
192 if (WARN_ON(!crtc->state))
193 return NULL;
194
195 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
196
197 if (!vcs)
198 return NULL;
199
200 state = &vcs->base;
201
202 __drm_atomic_helper_crtc_duplicate_state(crtc, state);
203
204 return state;
205 }
206
207
208 /**
209 * vmw_du_crtc_reset - creates a blank vmw crtc state
210 * @crtc: DRM crtc
211 *
212 * Resets the atomic state for @crtc by freeing the state pointer (which
213 * might be NULL, e.g. at driver load time) and allocating a new empty state
214 * object.
215 */
vmw_du_crtc_reset(struct drm_crtc * crtc)216 void vmw_du_crtc_reset(struct drm_crtc *crtc)
217 {
218 struct vmw_crtc_state *vcs;
219
220
221 if (crtc->state) {
222 __drm_atomic_helper_crtc_destroy_state(crtc->state);
223
224 kfree(vmw_crtc_state_to_vcs(crtc->state));
225 }
226
227 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
228
229 if (!vcs) {
230 DRM_ERROR("Cannot allocate vmw_crtc_state\n");
231 return;
232 }
233
234 __drm_atomic_helper_crtc_reset(crtc, &vcs->base);
235 }
236
237
238 /**
239 * vmw_du_crtc_destroy_state - destroy crtc state
240 * @crtc: DRM crtc
241 * @state: state object to destroy
242 *
243 * Destroys the crtc state (both common and vmw-specific) for the
244 * specified plane.
245 */
246 void
vmw_du_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)247 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
248 struct drm_crtc_state *state)
249 {
250 drm_atomic_helper_crtc_destroy_state(crtc, state);
251 }
252
253
254 /**
255 * vmw_du_plane_duplicate_state - duplicate plane state
256 * @plane: drm plane
257 *
258 * Allocates and returns a copy of the plane state (both common and
259 * vmw-specific) for the specified plane.
260 *
261 * Returns: The newly allocated plane state, or NULL on failure.
262 */
263 struct drm_plane_state *
vmw_du_plane_duplicate_state(struct drm_plane * plane)264 vmw_du_plane_duplicate_state(struct drm_plane *plane)
265 {
266 struct drm_plane_state *state;
267 struct vmw_plane_state *vps;
268
269 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
270
271 if (!vps)
272 return NULL;
273
274 vps->pinned = 0;
275 vps->cpp = 0;
276
277 vps->cursor.mob = NULL;
278
279 /* Each ref counted resource needs to be acquired again */
280 vmw_user_object_ref(&vps->uo);
281 state = &vps->base;
282
283 __drm_atomic_helper_plane_duplicate_state(plane, state);
284
285 return state;
286 }
287
288
289 /**
290 * vmw_du_plane_reset - creates a blank vmw plane state
291 * @plane: drm plane
292 *
293 * Resets the atomic state for @plane by freeing the state pointer (which might
294 * be NULL, e.g. at driver load time) and allocating a new empty state object.
295 */
vmw_du_plane_reset(struct drm_plane * plane)296 void vmw_du_plane_reset(struct drm_plane *plane)
297 {
298 struct vmw_plane_state *vps;
299
300 if (plane->state)
301 vmw_du_plane_destroy_state(plane, plane->state);
302
303 vps = kzalloc(sizeof(*vps), GFP_KERNEL);
304
305 if (!vps) {
306 DRM_ERROR("Cannot allocate vmw_plane_state\n");
307 return;
308 }
309
310 __drm_atomic_helper_plane_reset(plane, &vps->base);
311 }
312
313
314 /**
315 * vmw_du_plane_destroy_state - destroy plane state
316 * @plane: DRM plane
317 * @state: state object to destroy
318 *
319 * Destroys the plane state (both common and vmw-specific) for the
320 * specified plane.
321 */
322 void
vmw_du_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)323 vmw_du_plane_destroy_state(struct drm_plane *plane,
324 struct drm_plane_state *state)
325 {
326 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
327
328 /* Should have been freed by cleanup_fb */
329 vmw_user_object_unref(&vps->uo);
330
331 drm_atomic_helper_plane_destroy_state(plane, state);
332 }
333
334
335 /**
336 * vmw_du_connector_duplicate_state - duplicate connector state
337 * @connector: DRM connector
338 *
339 * Allocates and returns a copy of the connector state (both common and
340 * vmw-specific) for the specified connector.
341 *
342 * Returns: The newly allocated connector state, or NULL on failure.
343 */
344 struct drm_connector_state *
vmw_du_connector_duplicate_state(struct drm_connector * connector)345 vmw_du_connector_duplicate_state(struct drm_connector *connector)
346 {
347 struct drm_connector_state *state;
348 struct vmw_connector_state *vcs;
349
350 if (WARN_ON(!connector->state))
351 return NULL;
352
353 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
354
355 if (!vcs)
356 return NULL;
357
358 state = &vcs->base;
359
360 __drm_atomic_helper_connector_duplicate_state(connector, state);
361
362 return state;
363 }
364
365
366 /**
367 * vmw_du_connector_reset - creates a blank vmw connector state
368 * @connector: DRM connector
369 *
370 * Resets the atomic state for @connector by freeing the state pointer (which
371 * might be NULL, e.g. at driver load time) and allocating a new empty state
372 * object.
373 */
vmw_du_connector_reset(struct drm_connector * connector)374 void vmw_du_connector_reset(struct drm_connector *connector)
375 {
376 struct vmw_connector_state *vcs;
377
378
379 if (connector->state) {
380 __drm_atomic_helper_connector_destroy_state(connector->state);
381
382 kfree(vmw_connector_state_to_vcs(connector->state));
383 }
384
385 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
386
387 if (!vcs) {
388 DRM_ERROR("Cannot allocate vmw_connector_state\n");
389 return;
390 }
391
392 __drm_atomic_helper_connector_reset(connector, &vcs->base);
393 }
394
395
396 /**
397 * vmw_du_connector_destroy_state - destroy connector state
398 * @connector: DRM connector
399 * @state: state object to destroy
400 *
401 * Destroys the connector state (both common and vmw-specific) for the
402 * specified plane.
403 */
404 void
vmw_du_connector_destroy_state(struct drm_connector * connector,struct drm_connector_state * state)405 vmw_du_connector_destroy_state(struct drm_connector *connector,
406 struct drm_connector_state *state)
407 {
408 drm_atomic_helper_connector_destroy_state(connector, state);
409 }
410 /*
411 * Generic framebuffer code
412 */
413
414 /*
415 * Surface framebuffer code
416 */
417
vmw_framebuffer_surface_destroy(struct drm_framebuffer * framebuffer)418 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
419 {
420 struct vmw_framebuffer_surface *vfbs =
421 vmw_framebuffer_to_vfbs(framebuffer);
422 struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
423 struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
424
425 if (bo) {
426 vmw_bo_dirty_release(bo);
427 /*
428 * bo->dirty is reference counted so it being NULL
429 * means that the surface wasn't coherent to begin
430 * with and so we have to free the dirty tracker
431 * in the vmw_resource
432 */
433 if (!bo->dirty && surf && surf->res.dirty)
434 surf->res.func->dirty_free(&surf->res);
435 }
436 drm_framebuffer_cleanup(framebuffer);
437 vmw_user_object_unref(&vfbs->uo);
438
439 kfree(vfbs);
440 }
441
442 /**
443 * vmw_kms_readback - Perform a readback from the screen system to
444 * a buffer-object backed framebuffer.
445 *
446 * @dev_priv: Pointer to the device private structure.
447 * @file_priv: Pointer to a struct drm_file identifying the caller.
448 * Must be set to NULL if @user_fence_rep is NULL.
449 * @vfb: Pointer to the buffer-object backed framebuffer.
450 * @user_fence_rep: User-space provided structure for fence information.
451 * Must be set to non-NULL if @file_priv is non-NULL.
452 * @vclips: Array of clip rects.
453 * @num_clips: Number of clip rects in @vclips.
454 *
455 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
456 * interrupted.
457 */
vmw_kms_readback(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct drm_vmw_fence_rep __user * user_fence_rep,struct drm_vmw_rect * vclips,uint32_t num_clips)458 int vmw_kms_readback(struct vmw_private *dev_priv,
459 struct drm_file *file_priv,
460 struct vmw_framebuffer *vfb,
461 struct drm_vmw_fence_rep __user *user_fence_rep,
462 struct drm_vmw_rect *vclips,
463 uint32_t num_clips)
464 {
465 switch (dev_priv->active_display_unit) {
466 case vmw_du_screen_object:
467 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
468 user_fence_rep, vclips, num_clips,
469 NULL);
470 case vmw_du_screen_target:
471 return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
472 user_fence_rep, NULL, vclips, num_clips,
473 1, NULL);
474 default:
475 WARN_ONCE(true,
476 "Readback called with invalid display system.\n");
477 }
478
479 return -ENOSYS;
480 }
481
vmw_framebuffer_surface_create_handle(struct drm_framebuffer * fb,struct drm_file * file_priv,unsigned int * handle)482 static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb,
483 struct drm_file *file_priv,
484 unsigned int *handle)
485 {
486 struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
487 struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
488
489 if (WARN_ON(!bo))
490 return -EINVAL;
491 return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
492 }
493
494 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
495 .create_handle = vmw_framebuffer_surface_create_handle,
496 .destroy = vmw_framebuffer_surface_destroy,
497 .dirty = drm_atomic_helper_dirtyfb,
498 };
499
vmw_kms_new_framebuffer_surface(struct vmw_private * dev_priv,struct vmw_user_object * uo,struct vmw_framebuffer ** out,const struct drm_format_info * info,const struct drm_mode_fb_cmd2 * mode_cmd)500 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
501 struct vmw_user_object *uo,
502 struct vmw_framebuffer **out,
503 const struct drm_format_info *info,
504 const struct drm_mode_fb_cmd2
505 *mode_cmd)
506
507 {
508 struct drm_device *dev = &dev_priv->drm;
509 struct vmw_framebuffer_surface *vfbs;
510 struct vmw_surface *surface;
511 int ret;
512
513 /* 3D is only supported on HWv8 and newer hosts */
514 if (dev_priv->active_display_unit == vmw_du_legacy)
515 return -ENOSYS;
516
517 surface = vmw_user_object_surface(uo);
518
519 /*
520 * Sanity checks.
521 */
522
523 if (!drm_any_plane_has_format(&dev_priv->drm,
524 mode_cmd->pixel_format,
525 mode_cmd->modifier[0])) {
526 drm_dbg(&dev_priv->drm,
527 "unsupported pixel format %p4cc / modifier 0x%llx\n",
528 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
529 return -EINVAL;
530 }
531
532 /* Surface must be marked as a scanout. */
533 if (unlikely(!surface->metadata.scanout))
534 return -EINVAL;
535
536 if (unlikely(surface->metadata.mip_levels[0] != 1 ||
537 surface->metadata.num_sizes != 1 ||
538 surface->metadata.base_size.width < mode_cmd->width ||
539 surface->metadata.base_size.height < mode_cmd->height ||
540 surface->metadata.base_size.depth != 1)) {
541 DRM_ERROR("Incompatible surface dimensions "
542 "for requested mode.\n");
543 return -EINVAL;
544 }
545
546 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
547 if (!vfbs) {
548 ret = -ENOMEM;
549 goto out_err1;
550 }
551
552 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, info, mode_cmd);
553 memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
554 vmw_user_object_ref(&vfbs->uo);
555
556 *out = &vfbs->base;
557
558 ret = drm_framebuffer_init(dev, &vfbs->base.base,
559 &vmw_framebuffer_surface_funcs);
560 if (ret)
561 goto out_err2;
562
563 return 0;
564
565 out_err2:
566 vmw_user_object_unref(&vfbs->uo);
567 kfree(vfbs);
568 out_err1:
569 return ret;
570 }
571
572 /*
573 * Buffer-object framebuffer code
574 */
575
vmw_framebuffer_bo_create_handle(struct drm_framebuffer * fb,struct drm_file * file_priv,unsigned int * handle)576 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
577 struct drm_file *file_priv,
578 unsigned int *handle)
579 {
580 struct vmw_framebuffer_bo *vfbd =
581 vmw_framebuffer_to_vfbd(fb);
582 return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
583 }
584
vmw_framebuffer_bo_destroy(struct drm_framebuffer * framebuffer)585 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
586 {
587 struct vmw_framebuffer_bo *vfbd =
588 vmw_framebuffer_to_vfbd(framebuffer);
589
590 vmw_bo_dirty_release(vfbd->buffer);
591 drm_framebuffer_cleanup(framebuffer);
592 vmw_bo_unreference(&vfbd->buffer);
593
594 kfree(vfbd);
595 }
596
597 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
598 .create_handle = vmw_framebuffer_bo_create_handle,
599 .destroy = vmw_framebuffer_bo_destroy,
600 .dirty = drm_atomic_helper_dirtyfb,
601 };
602
vmw_kms_new_framebuffer_bo(struct vmw_private * dev_priv,struct vmw_bo * bo,struct vmw_framebuffer ** out,const struct drm_format_info * info,const struct drm_mode_fb_cmd2 * mode_cmd)603 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
604 struct vmw_bo *bo,
605 struct vmw_framebuffer **out,
606 const struct drm_format_info *info,
607 const struct drm_mode_fb_cmd2
608 *mode_cmd)
609
610 {
611 struct drm_device *dev = &dev_priv->drm;
612 struct vmw_framebuffer_bo *vfbd;
613 unsigned int requested_size;
614 int ret;
615
616 requested_size = mode_cmd->height * mode_cmd->pitches[0];
617 if (unlikely(requested_size > bo->tbo.base.size)) {
618 DRM_ERROR("Screen buffer object size is too small "
619 "for requested mode.\n");
620 return -EINVAL;
621 }
622
623 if (!drm_any_plane_has_format(&dev_priv->drm,
624 mode_cmd->pixel_format,
625 mode_cmd->modifier[0])) {
626 drm_dbg(&dev_priv->drm,
627 "unsupported pixel format %p4cc / modifier 0x%llx\n",
628 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
629 return -EINVAL;
630 }
631
632 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
633 if (!vfbd) {
634 ret = -ENOMEM;
635 goto out_err1;
636 }
637
638 vfbd->base.base.obj[0] = &bo->tbo.base;
639 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, info, mode_cmd);
640 vfbd->base.bo = true;
641 vfbd->buffer = vmw_bo_reference(bo);
642 *out = &vfbd->base;
643
644 ret = drm_framebuffer_init(dev, &vfbd->base.base,
645 &vmw_framebuffer_bo_funcs);
646 if (ret)
647 goto out_err2;
648
649 return 0;
650
651 out_err2:
652 vmw_bo_unreference(&bo);
653 kfree(vfbd);
654 out_err1:
655 return ret;
656 }
657
658
659 /**
660 * vmw_kms_srf_ok - check if a surface can be created
661 *
662 * @dev_priv: Pointer to device private struct.
663 * @width: requested width
664 * @height: requested height
665 *
666 * Surfaces need to be less than texture size
667 */
668 static bool
vmw_kms_srf_ok(struct vmw_private * dev_priv,uint32_t width,uint32_t height)669 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
670 {
671 if (width > dev_priv->texture_max_width ||
672 height > dev_priv->texture_max_height)
673 return false;
674
675 return true;
676 }
677
678 /**
679 * vmw_kms_new_framebuffer - Create a new framebuffer.
680 *
681 * @dev_priv: Pointer to device private struct.
682 * @uo: Pointer to user object to wrap the kms framebuffer around.
683 * Either the buffer or surface inside the user object must be NULL.
684 * @info: pixel format information.
685 * @mode_cmd: Frame-buffer metadata.
686 */
687 struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private * dev_priv,struct vmw_user_object * uo,const struct drm_format_info * info,const struct drm_mode_fb_cmd2 * mode_cmd)688 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
689 struct vmw_user_object *uo,
690 const struct drm_format_info *info,
691 const struct drm_mode_fb_cmd2 *mode_cmd)
692 {
693 struct vmw_framebuffer *vfb = NULL;
694 int ret;
695
696 /* Create the new framebuffer depending one what we have */
697 if (vmw_user_object_surface(uo)) {
698 ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb,
699 info, mode_cmd);
700 } else if (uo->buffer) {
701 ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb,
702 info, mode_cmd);
703 } else {
704 BUG();
705 }
706
707 if (ret)
708 return ERR_PTR(ret);
709
710 return vfb;
711 }
712
713 /*
714 * Generic Kernel modesetting functions
715 */
716
vmw_kms_fb_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_format_info * info,const struct drm_mode_fb_cmd2 * mode_cmd)717 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
718 struct drm_file *file_priv,
719 const struct drm_format_info *info,
720 const struct drm_mode_fb_cmd2 *mode_cmd)
721 {
722 struct vmw_private *dev_priv = vmw_priv(dev);
723 struct vmw_framebuffer *vfb = NULL;
724 struct vmw_user_object uo = {0};
725 struct vmw_bo *bo;
726 struct vmw_surface *surface;
727 int ret;
728
729 /* returns either a bo or surface */
730 ret = vmw_user_object_lookup(dev_priv, file_priv, mode_cmd->handles[0],
731 &uo);
732 if (ret) {
733 DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
734 mode_cmd->handles[0], mode_cmd->handles[0]);
735 goto err_out;
736 }
737
738
739 if (vmw_user_object_surface(&uo) &&
740 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
741 DRM_ERROR("Surface size cannot exceed %dx%d\n",
742 dev_priv->texture_max_width,
743 dev_priv->texture_max_height);
744 ret = -EINVAL;
745 goto err_out;
746 }
747
748
749 vfb = vmw_kms_new_framebuffer(dev_priv, &uo, info, mode_cmd);
750 if (IS_ERR(vfb)) {
751 ret = PTR_ERR(vfb);
752 goto err_out;
753 }
754
755 err_out:
756 bo = vmw_user_object_buffer(&uo);
757 surface = vmw_user_object_surface(&uo);
758 /* vmw_user_object_lookup takes one ref so does new_fb */
759 vmw_user_object_unref(&uo);
760
761 if (ret) {
762 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
763 return ERR_PTR(ret);
764 }
765
766 ttm_bo_reserve(&bo->tbo, false, false, NULL);
767 ret = vmw_bo_dirty_add(bo);
768 if (!ret && surface && surface->res.func->dirty_alloc) {
769 surface->res.coherent = true;
770 ret = surface->res.func->dirty_alloc(&surface->res);
771 }
772 ttm_bo_unreserve(&bo->tbo);
773
774 return &vfb->base;
775 }
776
777 /**
778 * vmw_kms_check_display_memory - Validates display memory required for a
779 * topology
780 * @dev: DRM device
781 * @num_rects: number of drm_rect in rects
782 * @rects: array of drm_rect representing the topology to validate indexed by
783 * crtc index.
784 *
785 * Returns:
786 * 0 on success otherwise negative error code
787 */
vmw_kms_check_display_memory(struct drm_device * dev,uint32_t num_rects,struct drm_rect * rects)788 static int vmw_kms_check_display_memory(struct drm_device *dev,
789 uint32_t num_rects,
790 struct drm_rect *rects)
791 {
792 struct vmw_private *dev_priv = vmw_priv(dev);
793 struct drm_rect bounding_box = {0};
794 u64 total_pixels = 0, pixel_mem, bb_mem;
795 int i;
796
797 for (i = 0; i < num_rects; i++) {
798 /*
799 * For STDU only individual screen (screen target) is limited by
800 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
801 */
802 if (dev_priv->active_display_unit == vmw_du_screen_target &&
803 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
804 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
805 VMW_DEBUG_KMS("Screen size not supported.\n");
806 return -EINVAL;
807 }
808
809 /* Bounding box upper left is at (0,0). */
810 if (rects[i].x2 > bounding_box.x2)
811 bounding_box.x2 = rects[i].x2;
812
813 if (rects[i].y2 > bounding_box.y2)
814 bounding_box.y2 = rects[i].y2;
815
816 total_pixels += (u64) drm_rect_width(&rects[i]) *
817 (u64) drm_rect_height(&rects[i]);
818 }
819
820 /* Virtual svga device primary limits are always in 32-bpp. */
821 pixel_mem = total_pixels * 4;
822
823 /*
824 * For HV10 and below prim_bb_mem is vram size. When
825 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
826 * limit on primary bounding box
827 */
828 if (pixel_mem > dev_priv->max_primary_mem) {
829 VMW_DEBUG_KMS("Combined output size too large.\n");
830 return -EINVAL;
831 }
832
833 /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
834 if (dev_priv->active_display_unit != vmw_du_screen_target ||
835 !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
836 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
837
838 if (bb_mem > dev_priv->max_primary_mem) {
839 VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
840 return -EINVAL;
841 }
842 }
843
844 return 0;
845 }
846
847 /**
848 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
849 * crtc mutex
850 * @state: The atomic state pointer containing the new atomic state
851 * @crtc: The crtc
852 *
853 * This function returns the new crtc state if it's part of the state update.
854 * Otherwise returns the current crtc state. It also makes sure that the
855 * crtc mutex is locked.
856 *
857 * Returns: A valid crtc state pointer or NULL. It may also return a
858 * pointer error, in particular -EDEADLK if locking needs to be rerun.
859 */
860 static struct drm_crtc_state *
vmw_crtc_state_and_lock(struct drm_atomic_state * state,struct drm_crtc * crtc)861 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
862 {
863 struct drm_crtc_state *crtc_state;
864
865 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
866 if (crtc_state) {
867 lockdep_assert_held(&crtc->mutex.mutex.base);
868 } else {
869 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
870
871 if (ret != 0 && ret != -EALREADY)
872 return ERR_PTR(ret);
873
874 crtc_state = crtc->state;
875 }
876
877 return crtc_state;
878 }
879
880 /**
881 * vmw_kms_check_implicit - Verify that all implicit display units scan out
882 * from the same fb after the new state is committed.
883 * @dev: The drm_device.
884 * @state: The new state to be checked.
885 *
886 * Returns:
887 * Zero on success,
888 * -EINVAL on invalid state,
889 * -EDEADLK if modeset locking needs to be rerun.
890 */
vmw_kms_check_implicit(struct drm_device * dev,struct drm_atomic_state * state)891 static int vmw_kms_check_implicit(struct drm_device *dev,
892 struct drm_atomic_state *state)
893 {
894 struct drm_framebuffer *implicit_fb = NULL;
895 struct drm_crtc *crtc;
896 struct drm_crtc_state *crtc_state;
897 struct drm_plane_state *plane_state;
898
899 drm_for_each_crtc(crtc, dev) {
900 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
901
902 if (!du->is_implicit)
903 continue;
904
905 crtc_state = vmw_crtc_state_and_lock(state, crtc);
906 if (IS_ERR(crtc_state))
907 return PTR_ERR(crtc_state);
908
909 if (!crtc_state || !crtc_state->enable)
910 continue;
911
912 /*
913 * Can't move primary planes across crtcs, so this is OK.
914 * It also means we don't need to take the plane mutex.
915 */
916 plane_state = du->primary.state;
917 if (plane_state->crtc != crtc)
918 continue;
919
920 if (!implicit_fb)
921 implicit_fb = plane_state->fb;
922 else if (implicit_fb != plane_state->fb)
923 return -EINVAL;
924 }
925
926 return 0;
927 }
928
929 /**
930 * vmw_kms_check_topology - Validates topology in drm_atomic_state
931 * @dev: DRM device
932 * @state: the driver state object
933 *
934 * Returns:
935 * 0 on success otherwise negative error code
936 */
vmw_kms_check_topology(struct drm_device * dev,struct drm_atomic_state * state)937 static int vmw_kms_check_topology(struct drm_device *dev,
938 struct drm_atomic_state *state)
939 {
940 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
941 struct drm_rect *rects;
942 struct drm_crtc *crtc;
943 uint32_t i;
944 int ret = 0;
945
946 rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
947 GFP_KERNEL);
948 if (!rects)
949 return -ENOMEM;
950
951 drm_for_each_crtc(crtc, dev) {
952 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
953 struct drm_crtc_state *crtc_state;
954
955 i = drm_crtc_index(crtc);
956
957 crtc_state = vmw_crtc_state_and_lock(state, crtc);
958 if (IS_ERR(crtc_state)) {
959 ret = PTR_ERR(crtc_state);
960 goto clean;
961 }
962
963 if (!crtc_state)
964 continue;
965
966 if (crtc_state->enable) {
967 rects[i].x1 = du->gui_x;
968 rects[i].y1 = du->gui_y;
969 rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
970 rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
971 } else {
972 rects[i].x1 = 0;
973 rects[i].y1 = 0;
974 rects[i].x2 = 0;
975 rects[i].y2 = 0;
976 }
977 }
978
979 /* Determine change to topology due to new atomic state */
980 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
981 new_crtc_state, i) {
982 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
983 struct drm_connector *connector;
984 struct drm_connector_state *conn_state;
985 struct vmw_connector_state *vmw_conn_state;
986
987 if (!du->pref_active && new_crtc_state->enable) {
988 VMW_DEBUG_KMS("Enabling a disabled display unit\n");
989 ret = -EINVAL;
990 goto clean;
991 }
992
993 /*
994 * For vmwgfx each crtc has only one connector attached and it
995 * is not changed so don't really need to check the
996 * crtc->connector_mask and iterate over it.
997 */
998 connector = &du->connector;
999 conn_state = drm_atomic_get_connector_state(state, connector);
1000 if (IS_ERR(conn_state)) {
1001 ret = PTR_ERR(conn_state);
1002 goto clean;
1003 }
1004
1005 vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1006 vmw_conn_state->gui_x = du->gui_x;
1007 vmw_conn_state->gui_y = du->gui_y;
1008 }
1009
1010 ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1011 rects);
1012
1013 clean:
1014 kfree(rects);
1015 return ret;
1016 }
1017
1018 /**
1019 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1020 *
1021 * @dev: DRM device
1022 * @state: the driver state object
1023 *
1024 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1025 * us to assign a value to mode->crtc_clock so that
1026 * drm_calc_timestamping_constants() won't throw an error message
1027 *
1028 * Returns:
1029 * Zero for success or -errno
1030 */
1031 static int
vmw_kms_atomic_check_modeset(struct drm_device * dev,struct drm_atomic_state * state)1032 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1033 struct drm_atomic_state *state)
1034 {
1035 struct drm_crtc *crtc;
1036 struct drm_crtc_state *crtc_state;
1037 bool need_modeset = false;
1038 int i, ret;
1039
1040 ret = drm_atomic_helper_check(dev, state);
1041 if (ret)
1042 return ret;
1043
1044 ret = vmw_kms_check_implicit(dev, state);
1045 if (ret) {
1046 VMW_DEBUG_KMS("Invalid implicit state\n");
1047 return ret;
1048 }
1049
1050 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1051 if (drm_atomic_crtc_needs_modeset(crtc_state))
1052 need_modeset = true;
1053 }
1054
1055 if (need_modeset)
1056 return vmw_kms_check_topology(dev, state);
1057
1058 return ret;
1059 }
1060
1061 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1062 .fb_create = vmw_kms_fb_create,
1063 .atomic_check = vmw_kms_atomic_check_modeset,
1064 .atomic_commit = drm_atomic_helper_commit,
1065 };
1066
vmw_kms_generic_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1067 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1068 struct drm_file *file_priv,
1069 struct vmw_framebuffer *vfb,
1070 struct vmw_surface *surface,
1071 uint32_t sid,
1072 int32_t destX, int32_t destY,
1073 struct drm_vmw_rect *clips,
1074 uint32_t num_clips)
1075 {
1076 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1077 &surface->res, destX, destY,
1078 num_clips, 1, NULL, NULL);
1079 }
1080
1081
vmw_kms_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1082 int vmw_kms_present(struct vmw_private *dev_priv,
1083 struct drm_file *file_priv,
1084 struct vmw_framebuffer *vfb,
1085 struct vmw_surface *surface,
1086 uint32_t sid,
1087 int32_t destX, int32_t destY,
1088 struct drm_vmw_rect *clips,
1089 uint32_t num_clips)
1090 {
1091 int ret;
1092
1093 switch (dev_priv->active_display_unit) {
1094 case vmw_du_screen_target:
1095 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1096 &surface->res, destX, destY,
1097 num_clips, 1, NULL, NULL);
1098 break;
1099 case vmw_du_screen_object:
1100 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1101 sid, destX, destY, clips,
1102 num_clips);
1103 break;
1104 default:
1105 WARN_ONCE(true,
1106 "Present called with invalid display system.\n");
1107 ret = -ENOSYS;
1108 break;
1109 }
1110 if (ret)
1111 return ret;
1112
1113 vmw_cmd_flush(dev_priv, false);
1114
1115 return 0;
1116 }
1117
1118 static void
vmw_kms_create_hotplug_mode_update_property(struct vmw_private * dev_priv)1119 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1120 {
1121 if (dev_priv->hotplug_mode_update_property)
1122 return;
1123
1124 dev_priv->hotplug_mode_update_property =
1125 drm_property_create_range(&dev_priv->drm,
1126 DRM_MODE_PROP_IMMUTABLE,
1127 "hotplug_mode_update", 0, 1);
1128 }
1129
1130 static void
vmw_atomic_commit_tail(struct drm_atomic_state * old_state)1131 vmw_atomic_commit_tail(struct drm_atomic_state *old_state)
1132 {
1133 struct vmw_private *vmw = vmw_priv(old_state->dev);
1134 struct drm_crtc *crtc;
1135 struct drm_crtc_state *old_crtc_state;
1136 int i;
1137
1138 drm_atomic_helper_commit_tail(old_state);
1139
1140 if (vmw->vkms_enabled) {
1141 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1142 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1143 (void)old_crtc_state;
1144 flush_work(&du->vkms.crc_generator_work);
1145 }
1146 }
1147 }
1148
1149 static const struct drm_mode_config_helper_funcs vmw_mode_config_helpers = {
1150 .atomic_commit_tail = vmw_atomic_commit_tail,
1151 };
1152
vmw_kms_init(struct vmw_private * dev_priv)1153 int vmw_kms_init(struct vmw_private *dev_priv)
1154 {
1155 struct drm_device *dev = &dev_priv->drm;
1156 int ret;
1157 static const char *display_unit_names[] = {
1158 "Invalid",
1159 "Legacy",
1160 "Screen Object",
1161 "Screen Target",
1162 "Invalid (max)"
1163 };
1164
1165 drm_mode_config_init(dev);
1166 dev->mode_config.funcs = &vmw_kms_funcs;
1167 dev->mode_config.min_width = 1;
1168 dev->mode_config.min_height = 1;
1169 dev->mode_config.max_width = dev_priv->texture_max_width;
1170 dev->mode_config.max_height = dev_priv->texture_max_height;
1171 dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
1172 dev->mode_config.helper_private = &vmw_mode_config_helpers;
1173
1174 drm_mode_create_suggested_offset_properties(dev);
1175 vmw_kms_create_hotplug_mode_update_property(dev_priv);
1176
1177 ret = vmw_kms_stdu_init_display(dev_priv);
1178 if (ret) {
1179 ret = vmw_kms_sou_init_display(dev_priv);
1180 if (ret) /* Fallback */
1181 ret = vmw_kms_ldu_init_display(dev_priv);
1182 }
1183 BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
1184 drm_info(&dev_priv->drm, "%s display unit initialized\n",
1185 display_unit_names[dev_priv->active_display_unit]);
1186
1187 return ret;
1188 }
1189
vmw_kms_close(struct vmw_private * dev_priv)1190 int vmw_kms_close(struct vmw_private *dev_priv)
1191 {
1192 int ret = 0;
1193
1194 /*
1195 * Docs says we should take the lock before calling this function
1196 * but since it destroys encoders and our destructor calls
1197 * drm_encoder_cleanup which takes the lock we deadlock.
1198 */
1199 drm_mode_config_cleanup(&dev_priv->drm);
1200 if (dev_priv->active_display_unit == vmw_du_legacy)
1201 ret = vmw_kms_ldu_close_display(dev_priv);
1202
1203 return ret;
1204 }
1205
vmw_kms_write_svga(struct vmw_private * vmw_priv,unsigned width,unsigned height,unsigned pitch,unsigned bpp,unsigned depth)1206 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1207 unsigned width, unsigned height, unsigned pitch,
1208 unsigned bpp, unsigned depth)
1209 {
1210 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1211 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
1212 else if (vmw_fifo_have_pitchlock(vmw_priv))
1213 vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
1214 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
1215 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
1216 if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
1217 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
1218
1219 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
1220 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1221 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
1222 return -EINVAL;
1223 }
1224
1225 return 0;
1226 }
1227
1228 static
vmw_kms_validate_mode_vram(struct vmw_private * dev_priv,u64 pitch,u64 height)1229 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1230 u64 pitch,
1231 u64 height)
1232 {
1233 return (pitch * height) < (u64)dev_priv->vram_size;
1234 }
1235
1236 /**
1237 * vmw_du_update_layout - Update the display unit with topology from resolution
1238 * plugin and generate DRM uevent
1239 * @dev_priv: device private
1240 * @num_rects: number of drm_rect in rects
1241 * @rects: toplogy to update
1242 */
vmw_du_update_layout(struct vmw_private * dev_priv,unsigned int num_rects,struct drm_rect * rects)1243 static int vmw_du_update_layout(struct vmw_private *dev_priv,
1244 unsigned int num_rects, struct drm_rect *rects)
1245 {
1246 struct drm_device *dev = &dev_priv->drm;
1247 struct vmw_display_unit *du;
1248 struct drm_connector *con;
1249 struct drm_connector_list_iter conn_iter;
1250 struct drm_modeset_acquire_ctx ctx;
1251 struct drm_crtc *crtc;
1252 int ret;
1253
1254 /* Currently gui_x/y is protected with the crtc mutex */
1255 mutex_lock(&dev->mode_config.mutex);
1256 drm_modeset_acquire_init(&ctx, 0);
1257 retry:
1258 drm_for_each_crtc(crtc, dev) {
1259 ret = drm_modeset_lock(&crtc->mutex, &ctx);
1260 if (ret < 0) {
1261 if (ret == -EDEADLK) {
1262 drm_modeset_backoff(&ctx);
1263 goto retry;
1264 }
1265 goto out_fini;
1266 }
1267 }
1268
1269 drm_connector_list_iter_begin(dev, &conn_iter);
1270 drm_for_each_connector_iter(con, &conn_iter) {
1271 du = vmw_connector_to_du(con);
1272 if (num_rects > du->unit) {
1273 du->pref_width = drm_rect_width(&rects[du->unit]);
1274 du->pref_height = drm_rect_height(&rects[du->unit]);
1275 du->pref_active = true;
1276 du->gui_x = rects[du->unit].x1;
1277 du->gui_y = rects[du->unit].y1;
1278 } else {
1279 du->pref_width = VMWGFX_MIN_INITIAL_WIDTH;
1280 du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
1281 du->pref_active = false;
1282 du->gui_x = 0;
1283 du->gui_y = 0;
1284 }
1285 }
1286 drm_connector_list_iter_end(&conn_iter);
1287
1288 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
1289 du = vmw_connector_to_du(con);
1290 if (num_rects > du->unit) {
1291 drm_object_property_set_value
1292 (&con->base, dev->mode_config.suggested_x_property,
1293 du->gui_x);
1294 drm_object_property_set_value
1295 (&con->base, dev->mode_config.suggested_y_property,
1296 du->gui_y);
1297 } else {
1298 drm_object_property_set_value
1299 (&con->base, dev->mode_config.suggested_x_property,
1300 0);
1301 drm_object_property_set_value
1302 (&con->base, dev->mode_config.suggested_y_property,
1303 0);
1304 }
1305 con->status = vmw_du_connector_detect(con, true);
1306 }
1307 out_fini:
1308 drm_modeset_drop_locks(&ctx);
1309 drm_modeset_acquire_fini(&ctx);
1310 mutex_unlock(&dev->mode_config.mutex);
1311
1312 drm_sysfs_hotplug_event(dev);
1313
1314 return 0;
1315 }
1316
vmw_du_crtc_gamma_set(struct drm_crtc * crtc,u16 * r,u16 * g,u16 * b,uint32_t size,struct drm_modeset_acquire_ctx * ctx)1317 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1318 u16 *r, u16 *g, u16 *b,
1319 uint32_t size,
1320 struct drm_modeset_acquire_ctx *ctx)
1321 {
1322 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1323 int i;
1324
1325 for (i = 0; i < size; i++) {
1326 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
1327 r[i], g[i], b[i]);
1328 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
1329 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
1330 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
1331 }
1332
1333 return 0;
1334 }
1335
vmw_du_connector_dpms(struct drm_connector * connector,int mode)1336 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
1337 {
1338 return 0;
1339 }
1340
1341 enum drm_connector_status
vmw_du_connector_detect(struct drm_connector * connector,bool force)1342 vmw_du_connector_detect(struct drm_connector *connector, bool force)
1343 {
1344 uint32_t num_displays;
1345 struct drm_device *dev = connector->dev;
1346 struct vmw_private *dev_priv = vmw_priv(dev);
1347 struct vmw_display_unit *du = vmw_connector_to_du(connector);
1348
1349 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
1350
1351 return ((vmw_connector_to_du(connector)->unit < num_displays &&
1352 du->pref_active) ?
1353 connector_status_connected : connector_status_disconnected);
1354 }
1355
1356 /**
1357 * vmw_guess_mode_timing - Provide fake timings for a
1358 * 60Hz vrefresh mode.
1359 *
1360 * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
1361 * members filled in.
1362 */
vmw_guess_mode_timing(struct drm_display_mode * mode)1363 void vmw_guess_mode_timing(struct drm_display_mode *mode)
1364 {
1365 mode->hsync_start = mode->hdisplay + 50;
1366 mode->hsync_end = mode->hsync_start + 50;
1367 mode->htotal = mode->hsync_end + 50;
1368
1369 mode->vsync_start = mode->vdisplay + 50;
1370 mode->vsync_end = mode->vsync_start + 50;
1371 mode->vtotal = mode->vsync_end + 50;
1372
1373 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
1374 }
1375
1376
1377 /**
1378 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
1379 * @dev: drm device for the ioctl
1380 * @data: data pointer for the ioctl
1381 * @file_priv: drm file for the ioctl call
1382 *
1383 * Update preferred topology of display unit as per ioctl request. The topology
1384 * is expressed as array of drm_vmw_rect.
1385 * e.g.
1386 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
1387 *
1388 * NOTE:
1389 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
1390 * device limit on topology, x + w and y + h (lower right) cannot be greater
1391 * than INT_MAX. So topology beyond these limits will return with error.
1392 *
1393 * Returns:
1394 * Zero on success, negative errno on failure.
1395 */
vmw_kms_update_layout_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1396 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1397 struct drm_file *file_priv)
1398 {
1399 struct vmw_private *dev_priv = vmw_priv(dev);
1400 struct drm_mode_config *mode_config = &dev->mode_config;
1401 struct drm_vmw_update_layout_arg *arg =
1402 (struct drm_vmw_update_layout_arg *)data;
1403 const void __user *user_rects;
1404 struct drm_vmw_rect *rects;
1405 struct drm_rect *drm_rects;
1406 unsigned rects_size;
1407 int ret, i;
1408
1409 if (!arg->num_outputs) {
1410 struct drm_rect def_rect = {0, 0,
1411 VMWGFX_MIN_INITIAL_WIDTH,
1412 VMWGFX_MIN_INITIAL_HEIGHT};
1413 vmw_du_update_layout(dev_priv, 1, &def_rect);
1414 return 0;
1415 } else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) {
1416 return -E2BIG;
1417 }
1418
1419 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
1420 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
1421 GFP_KERNEL);
1422 if (unlikely(!rects))
1423 return -ENOMEM;
1424
1425 user_rects = (void __user *)(unsigned long)arg->rects;
1426 ret = copy_from_user(rects, user_rects, rects_size);
1427 if (unlikely(ret != 0)) {
1428 DRM_ERROR("Failed to get rects.\n");
1429 ret = -EFAULT;
1430 goto out_free;
1431 }
1432
1433 drm_rects = (struct drm_rect *)rects;
1434
1435 VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
1436 for (i = 0; i < arg->num_outputs; i++) {
1437 struct drm_vmw_rect curr_rect;
1438
1439 /* Verify user-space for overflow as kernel use drm_rect */
1440 if ((rects[i].x + rects[i].w > INT_MAX) ||
1441 (rects[i].y + rects[i].h > INT_MAX)) {
1442 ret = -ERANGE;
1443 goto out_free;
1444 }
1445
1446 curr_rect = rects[i];
1447 drm_rects[i].x1 = curr_rect.x;
1448 drm_rects[i].y1 = curr_rect.y;
1449 drm_rects[i].x2 = curr_rect.x + curr_rect.w;
1450 drm_rects[i].y2 = curr_rect.y + curr_rect.h;
1451
1452 VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n",
1453 drm_rects[i].x1, drm_rects[i].y1,
1454 drm_rects[i].x2, drm_rects[i].y2);
1455
1456 /*
1457 * Currently this check is limiting the topology within
1458 * mode_config->max (which actually is max texture size
1459 * supported by virtual device). This limit is here to address
1460 * window managers that create a big framebuffer for whole
1461 * topology.
1462 */
1463 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
1464 drm_rects[i].x2 > mode_config->max_width ||
1465 drm_rects[i].y2 > mode_config->max_height) {
1466 VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
1467 drm_rects[i].x1, drm_rects[i].y1,
1468 drm_rects[i].x2, drm_rects[i].y2);
1469 ret = -EINVAL;
1470 goto out_free;
1471 }
1472 }
1473
1474 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
1475
1476 if (ret == 0)
1477 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
1478
1479 out_free:
1480 kfree(rects);
1481 return ret;
1482 }
1483
1484 /**
1485 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
1486 * on a set of cliprects and a set of display units.
1487 *
1488 * @dev_priv: Pointer to a device private structure.
1489 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
1490 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
1491 * Cliprects are given in framebuffer coordinates.
1492 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
1493 * be NULL. Cliprects are given in source coordinates.
1494 * @dest_x: X coordinate offset for the crtc / destination clip rects.
1495 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
1496 * @num_clips: Number of cliprects in the @clips or @vclips array.
1497 * @increment: Integer with which to increment the clip counter when looping.
1498 * Used to skip a predetermined number of clip rects.
1499 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
1500 */
vmw_kms_helper_dirty(struct vmw_private * dev_priv,struct vmw_framebuffer * framebuffer,const struct drm_clip_rect * clips,const struct drm_vmw_rect * vclips,s32 dest_x,s32 dest_y,int num_clips,int increment,struct vmw_kms_dirty * dirty)1501 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
1502 struct vmw_framebuffer *framebuffer,
1503 const struct drm_clip_rect *clips,
1504 const struct drm_vmw_rect *vclips,
1505 s32 dest_x, s32 dest_y,
1506 int num_clips,
1507 int increment,
1508 struct vmw_kms_dirty *dirty)
1509 {
1510 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
1511 struct drm_crtc *crtc;
1512 u32 num_units = 0;
1513 u32 i, k;
1514
1515 dirty->dev_priv = dev_priv;
1516
1517 /* If crtc is passed, no need to iterate over other display units */
1518 if (dirty->crtc) {
1519 units[num_units++] = vmw_crtc_to_du(dirty->crtc);
1520 } else {
1521 list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
1522 head) {
1523 struct drm_plane *plane = crtc->primary;
1524
1525 if (plane->state->fb == &framebuffer->base)
1526 units[num_units++] = vmw_crtc_to_du(crtc);
1527 }
1528 }
1529
1530 for (k = 0; k < num_units; k++) {
1531 struct vmw_display_unit *unit = units[k];
1532 s32 crtc_x = unit->crtc.x;
1533 s32 crtc_y = unit->crtc.y;
1534 s32 crtc_width = unit->crtc.mode.hdisplay;
1535 s32 crtc_height = unit->crtc.mode.vdisplay;
1536 const struct drm_clip_rect *clips_ptr = clips;
1537 const struct drm_vmw_rect *vclips_ptr = vclips;
1538
1539 dirty->unit = unit;
1540 if (dirty->fifo_reserve_size > 0) {
1541 dirty->cmd = VMW_CMD_RESERVE(dev_priv,
1542 dirty->fifo_reserve_size);
1543 if (!dirty->cmd)
1544 return -ENOMEM;
1545
1546 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
1547 }
1548 dirty->num_hits = 0;
1549 for (i = 0; i < num_clips; i++, clips_ptr += increment,
1550 vclips_ptr += increment) {
1551 s32 clip_left;
1552 s32 clip_top;
1553
1554 /*
1555 * Select clip array type. Note that integer type
1556 * in @clips is unsigned short, whereas in @vclips
1557 * it's 32-bit.
1558 */
1559 if (clips) {
1560 dirty->fb_x = (s32) clips_ptr->x1;
1561 dirty->fb_y = (s32) clips_ptr->y1;
1562 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
1563 crtc_x;
1564 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
1565 crtc_y;
1566 } else {
1567 dirty->fb_x = vclips_ptr->x;
1568 dirty->fb_y = vclips_ptr->y;
1569 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
1570 dest_x - crtc_x;
1571 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
1572 dest_y - crtc_y;
1573 }
1574
1575 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
1576 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
1577
1578 /* Skip this clip if it's outside the crtc region */
1579 if (dirty->unit_x1 >= crtc_width ||
1580 dirty->unit_y1 >= crtc_height ||
1581 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
1582 continue;
1583
1584 /* Clip right and bottom to crtc limits */
1585 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
1586 crtc_width);
1587 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
1588 crtc_height);
1589
1590 /* Clip left and top to crtc limits */
1591 clip_left = min_t(s32, dirty->unit_x1, 0);
1592 clip_top = min_t(s32, dirty->unit_y1, 0);
1593 dirty->unit_x1 -= clip_left;
1594 dirty->unit_y1 -= clip_top;
1595 dirty->fb_x -= clip_left;
1596 dirty->fb_y -= clip_top;
1597
1598 dirty->clip(dirty);
1599 }
1600
1601 dirty->fifo_commit(dirty);
1602 }
1603
1604 return 0;
1605 }
1606
1607 /**
1608 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
1609 * cleanup and fencing
1610 * @dev_priv: Pointer to the device-private struct
1611 * @file_priv: Pointer identifying the client when user-space fencing is used
1612 * @ctx: Pointer to the validation context
1613 * @out_fence: If non-NULL, returned refcounted fence-pointer
1614 * @user_fence_rep: If non-NULL, pointer to user-space address area
1615 * in which to copy user-space fence info
1616 */
vmw_kms_helper_validation_finish(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_validation_context * ctx,struct vmw_fence_obj ** out_fence,struct drm_vmw_fence_rep __user * user_fence_rep)1617 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
1618 struct drm_file *file_priv,
1619 struct vmw_validation_context *ctx,
1620 struct vmw_fence_obj **out_fence,
1621 struct drm_vmw_fence_rep __user *
1622 user_fence_rep)
1623 {
1624 struct vmw_fence_obj *fence = NULL;
1625 uint32_t handle = 0;
1626 int ret = 0;
1627
1628 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
1629 out_fence)
1630 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
1631 file_priv ? &handle : NULL);
1632 vmw_validation_done(ctx, fence);
1633 if (file_priv)
1634 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
1635 ret, user_fence_rep, fence,
1636 handle, -1);
1637 if (out_fence)
1638 *out_fence = fence;
1639 else
1640 vmw_fence_obj_unreference(&fence);
1641 }
1642
1643 /**
1644 * vmw_kms_create_implicit_placement_property - Set up the implicit placement
1645 * property.
1646 *
1647 * @dev_priv: Pointer to a device private struct.
1648 *
1649 * Sets up the implicit placement property unless it's already set up.
1650 */
1651 void
vmw_kms_create_implicit_placement_property(struct vmw_private * dev_priv)1652 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
1653 {
1654 if (dev_priv->implicit_placement_property)
1655 return;
1656
1657 dev_priv->implicit_placement_property =
1658 drm_property_create_range(&dev_priv->drm,
1659 DRM_MODE_PROP_IMMUTABLE,
1660 "implicit_placement", 0, 1);
1661 }
1662
1663 /**
1664 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
1665 *
1666 * @dev: Pointer to the drm device
1667 * Return: 0 on success. Negative error code on failure.
1668 */
vmw_kms_suspend(struct drm_device * dev)1669 int vmw_kms_suspend(struct drm_device *dev)
1670 {
1671 struct vmw_private *dev_priv = vmw_priv(dev);
1672
1673 dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
1674 if (IS_ERR(dev_priv->suspend_state)) {
1675 int ret = PTR_ERR(dev_priv->suspend_state);
1676
1677 DRM_ERROR("Failed kms suspend: %d\n", ret);
1678 dev_priv->suspend_state = NULL;
1679
1680 return ret;
1681 }
1682
1683 return 0;
1684 }
1685
1686
1687 /**
1688 * vmw_kms_resume - Re-enable modesetting and restore state
1689 *
1690 * @dev: Pointer to the drm device
1691 * Return: 0 on success. Negative error code on failure.
1692 *
1693 * State is resumed from a previous vmw_kms_suspend(). It's illegal
1694 * to call this function without a previous vmw_kms_suspend().
1695 */
vmw_kms_resume(struct drm_device * dev)1696 int vmw_kms_resume(struct drm_device *dev)
1697 {
1698 struct vmw_private *dev_priv = vmw_priv(dev);
1699 int ret;
1700
1701 if (WARN_ON(!dev_priv->suspend_state))
1702 return 0;
1703
1704 ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
1705 dev_priv->suspend_state = NULL;
1706
1707 return ret;
1708 }
1709
1710 /**
1711 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
1712 *
1713 * @dev: Pointer to the drm device
1714 */
vmw_kms_lost_device(struct drm_device * dev)1715 void vmw_kms_lost_device(struct drm_device *dev)
1716 {
1717 drm_atomic_helper_shutdown(dev);
1718 }
1719
1720 /**
1721 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
1722 * @update: The closure structure.
1723 *
1724 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
1725 * update on display unit.
1726 *
1727 * Return: 0 on success or a negative error code on failure.
1728 */
vmw_du_helper_plane_update(struct vmw_du_update_plane * update)1729 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
1730 {
1731 struct drm_plane_state *state = update->plane->state;
1732 struct drm_plane_state *old_state = update->old_state;
1733 struct drm_atomic_helper_damage_iter iter;
1734 struct drm_rect clip;
1735 struct drm_rect bb;
1736 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
1737 uint32_t reserved_size = 0;
1738 uint32_t submit_size = 0;
1739 uint32_t curr_size = 0;
1740 uint32_t num_hits = 0;
1741 void *cmd_start;
1742 char *cmd_next;
1743 int ret;
1744
1745 /*
1746 * Iterate in advance to check if really need plane update and find the
1747 * number of clips that actually are in plane src for fifo allocation.
1748 */
1749 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
1750 drm_atomic_for_each_plane_damage(&iter, &clip)
1751 num_hits++;
1752
1753 if (num_hits == 0)
1754 return 0;
1755
1756 if (update->vfb->bo) {
1757 struct vmw_framebuffer_bo *vfbbo =
1758 container_of(update->vfb, typeof(*vfbbo), base);
1759
1760 /*
1761 * For screen targets we want a mappable bo, for everything else we want
1762 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
1763 * is not screen target then mob's shouldn't be available.
1764 */
1765 if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
1766 vmw_bo_placement_set(vfbbo->buffer,
1767 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
1768 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
1769 } else {
1770 WARN_ON(update->dev_priv->has_mob);
1771 vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
1772 }
1773 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
1774 } else {
1775 struct vmw_framebuffer_surface *vfbs =
1776 container_of(update->vfb, typeof(*vfbs), base);
1777 struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
1778
1779 ret = vmw_validation_add_resource(&val_ctx, &surf->res,
1780 0, VMW_RES_DIRTY_NONE, NULL,
1781 NULL);
1782 }
1783
1784 if (ret)
1785 return ret;
1786
1787 ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
1788 if (ret)
1789 goto out_unref;
1790
1791 reserved_size = update->calc_fifo_size(update, num_hits);
1792 cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
1793 if (!cmd_start) {
1794 ret = -ENOMEM;
1795 goto out_revert;
1796 }
1797
1798 cmd_next = cmd_start;
1799
1800 if (update->post_prepare) {
1801 curr_size = update->post_prepare(update, cmd_next);
1802 cmd_next += curr_size;
1803 submit_size += curr_size;
1804 }
1805
1806 if (update->pre_clip) {
1807 curr_size = update->pre_clip(update, cmd_next, num_hits);
1808 cmd_next += curr_size;
1809 submit_size += curr_size;
1810 }
1811
1812 bb.x1 = INT_MAX;
1813 bb.y1 = INT_MAX;
1814 bb.x2 = INT_MIN;
1815 bb.y2 = INT_MIN;
1816
1817 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
1818 drm_atomic_for_each_plane_damage(&iter, &clip) {
1819 uint32_t fb_x = clip.x1;
1820 uint32_t fb_y = clip.y1;
1821
1822 vmw_du_translate_to_crtc(state, &clip);
1823 if (update->clip) {
1824 curr_size = update->clip(update, cmd_next, &clip, fb_x,
1825 fb_y);
1826 cmd_next += curr_size;
1827 submit_size += curr_size;
1828 }
1829 bb.x1 = min_t(int, bb.x1, clip.x1);
1830 bb.y1 = min_t(int, bb.y1, clip.y1);
1831 bb.x2 = max_t(int, bb.x2, clip.x2);
1832 bb.y2 = max_t(int, bb.y2, clip.y2);
1833 }
1834
1835 curr_size = update->post_clip(update, cmd_next, &bb);
1836 submit_size += curr_size;
1837
1838 if (reserved_size < submit_size)
1839 submit_size = 0;
1840
1841 vmw_cmd_commit(update->dev_priv, submit_size);
1842
1843 vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
1844 update->out_fence, NULL);
1845 return ret;
1846
1847 out_revert:
1848 vmw_validation_revert(&val_ctx);
1849
1850 out_unref:
1851 vmw_validation_unref_lists(&val_ctx);
1852 return ret;
1853 }
1854
1855 /**
1856 * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
1857 *
1858 * @connector: the drm connector, part of a DU container
1859 * @mode: drm mode to check
1860 *
1861 * Returns MODE_OK on success, or a drm_mode_status error code.
1862 */
vmw_connector_mode_valid(struct drm_connector * connector,const struct drm_display_mode * mode)1863 enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
1864 const struct drm_display_mode *mode)
1865 {
1866 enum drm_mode_status ret;
1867 struct drm_device *dev = connector->dev;
1868 struct vmw_private *dev_priv = vmw_priv(dev);
1869 u32 assumed_cpp = 4;
1870
1871 if (dev_priv->assume_16bpp)
1872 assumed_cpp = 2;
1873
1874 ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
1875 dev_priv->texture_max_height);
1876 if (ret != MODE_OK)
1877 return ret;
1878
1879 if (!vmw_kms_validate_mode_vram(dev_priv,
1880 mode->hdisplay * assumed_cpp,
1881 mode->vdisplay))
1882 return MODE_MEM;
1883
1884 return MODE_OK;
1885 }
1886
1887 /**
1888 * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
1889 *
1890 * @connector: the drm connector, part of a DU container
1891 *
1892 * Returns the number of added modes.
1893 */
vmw_connector_get_modes(struct drm_connector * connector)1894 int vmw_connector_get_modes(struct drm_connector *connector)
1895 {
1896 struct vmw_display_unit *du = vmw_connector_to_du(connector);
1897 struct drm_device *dev = connector->dev;
1898 struct vmw_private *dev_priv = vmw_priv(dev);
1899 struct drm_display_mode *mode = NULL;
1900 struct drm_display_mode prefmode = { DRM_MODE("preferred",
1901 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
1902 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1903 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
1904 };
1905 u32 max_width;
1906 u32 max_height;
1907 u32 num_modes;
1908
1909 /* Add preferred mode */
1910 mode = drm_mode_duplicate(dev, &prefmode);
1911 if (!mode)
1912 return 0;
1913
1914 mode->hdisplay = du->pref_width;
1915 mode->vdisplay = du->pref_height;
1916 vmw_guess_mode_timing(mode);
1917 drm_mode_set_name(mode);
1918
1919 drm_mode_probed_add(connector, mode);
1920 drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
1921
1922 /* Probe connector for all modes not exceeding our geom limits */
1923 max_width = dev_priv->texture_max_width;
1924 max_height = dev_priv->texture_max_height;
1925
1926 if (dev_priv->active_display_unit == vmw_du_screen_target) {
1927 max_width = min(dev_priv->stdu_max_width, max_width);
1928 max_height = min(dev_priv->stdu_max_height, max_height);
1929 }
1930
1931 num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
1932
1933 return num_modes;
1934 }
1935
vmw_user_object_ref(struct vmw_user_object * uo)1936 struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo)
1937 {
1938 if (uo->buffer)
1939 vmw_user_bo_ref(uo->buffer);
1940 else if (uo->surface)
1941 vmw_surface_reference(uo->surface);
1942 return uo;
1943 }
1944
vmw_user_object_unref(struct vmw_user_object * uo)1945 void vmw_user_object_unref(struct vmw_user_object *uo)
1946 {
1947 if (uo->buffer)
1948 vmw_user_bo_unref(&uo->buffer);
1949 else if (uo->surface)
1950 vmw_surface_unreference(&uo->surface);
1951 }
1952
1953 struct vmw_bo *
vmw_user_object_buffer(struct vmw_user_object * uo)1954 vmw_user_object_buffer(struct vmw_user_object *uo)
1955 {
1956 if (uo->buffer)
1957 return uo->buffer;
1958 else if (uo->surface)
1959 return uo->surface->res.guest_memory_bo;
1960 return NULL;
1961 }
1962
1963 struct vmw_surface *
vmw_user_object_surface(struct vmw_user_object * uo)1964 vmw_user_object_surface(struct vmw_user_object *uo)
1965 {
1966 if (uo->buffer)
1967 return uo->buffer->dumb_surface;
1968 return uo->surface;
1969 }
1970
vmw_user_object_map(struct vmw_user_object * uo)1971 void *vmw_user_object_map(struct vmw_user_object *uo)
1972 {
1973 struct vmw_bo *bo = vmw_user_object_buffer(uo);
1974
1975 WARN_ON(!bo);
1976 return vmw_bo_map_and_cache(bo);
1977 }
1978
vmw_user_object_map_size(struct vmw_user_object * uo,size_t size)1979 void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size)
1980 {
1981 struct vmw_bo *bo = vmw_user_object_buffer(uo);
1982
1983 WARN_ON(!bo);
1984 return vmw_bo_map_and_cache_size(bo, size);
1985 }
1986
vmw_user_object_unmap(struct vmw_user_object * uo)1987 void vmw_user_object_unmap(struct vmw_user_object *uo)
1988 {
1989 struct vmw_bo *bo = vmw_user_object_buffer(uo);
1990 int ret;
1991
1992 WARN_ON(!bo);
1993
1994 /* Fence the mob creation so we are guarateed to have the mob */
1995 ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
1996 if (ret != 0)
1997 return;
1998
1999 vmw_bo_unmap(bo);
2000 vmw_bo_pin_reserved(bo, false);
2001
2002 ttm_bo_unreserve(&bo->tbo);
2003 }
2004
vmw_user_object_is_mapped(struct vmw_user_object * uo)2005 bool vmw_user_object_is_mapped(struct vmw_user_object *uo)
2006 {
2007 struct vmw_bo *bo;
2008
2009 if (!uo || vmw_user_object_is_null(uo))
2010 return false;
2011
2012 bo = vmw_user_object_buffer(uo);
2013
2014 if (WARN_ON(!bo))
2015 return false;
2016
2017 WARN_ON(bo->map.bo && !bo->map.virtual);
2018 return bo->map.virtual;
2019 }
2020
vmw_user_object_is_null(struct vmw_user_object * uo)2021 bool vmw_user_object_is_null(struct vmw_user_object *uo)
2022 {
2023 return !uo->buffer && !uo->surface;
2024 }
2025