1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022-2023 Intel Corporation
4 *
5 * High level display driver entry points. This is a layer between top level
6 * driver code and low level display functionality; no low level display code or
7 * details here.
8 */
9
10 #include <linux/vga_switcheroo.h>
11 #include <acpi/video.h>
12 #include <drm/display/drm_dp_mst_helper.h>
13 #include <drm/drm_atomic_helper.h>
14 #include <drm/drm_client_event.h>
15 #include <drm/drm_mode_config.h>
16 #include <drm/drm_privacy_screen_consumer.h>
17 #include <drm/drm_probe_helper.h>
18 #include <drm/drm_vblank.h>
19
20 #include "i915_drv.h"
21 #include "i9xx_wm.h"
22 #include "intel_acpi.h"
23 #include "intel_atomic.h"
24 #include "intel_audio.h"
25 #include "intel_bios.h"
26 #include "intel_bw.h"
27 #include "intel_cdclk.h"
28 #include "intel_color.h"
29 #include "intel_crtc.h"
30 #include "intel_display_core.h"
31 #include "intel_display_debugfs.h"
32 #include "intel_display_driver.h"
33 #include "intel_display_irq.h"
34 #include "intel_display_power.h"
35 #include "intel_display_types.h"
36 #include "intel_display_wa.h"
37 #include "intel_dkl_phy.h"
38 #include "intel_dmc.h"
39 #include "intel_dp.h"
40 #include "intel_dp_tunnel.h"
41 #include "intel_dpll.h"
42 #include "intel_dpll_mgr.h"
43 #include "intel_fb.h"
44 #include "intel_fbc.h"
45 #include "intel_fbdev.h"
46 #include "intel_fdi.h"
47 #include "intel_flipq.h"
48 #include "intel_gmbus.h"
49 #include "intel_hdcp.h"
50 #include "intel_hotplug.h"
51 #include "intel_hti.h"
52 #include "intel_modeset_lock.h"
53 #include "intel_modeset_setup.h"
54 #include "intel_opregion.h"
55 #include "intel_overlay.h"
56 #include "intel_plane_initial.h"
57 #include "intel_pmdemand.h"
58 #include "intel_pps.h"
59 #include "intel_psr.h"
60 #include "intel_quirks.h"
61 #include "intel_vga.h"
62 #include "intel_wm.h"
63 #include "skl_watermark.h"
64
intel_display_driver_probe_defer(struct pci_dev * pdev)65 bool intel_display_driver_probe_defer(struct pci_dev *pdev)
66 {
67 struct drm_privacy_screen *privacy_screen;
68
69 /*
70 * apple-gmux is needed on dual GPU MacBook Pro
71 * to probe the panel if we're the inactive GPU.
72 */
73 if (vga_switcheroo_client_probe_defer(pdev))
74 return true;
75
76 /* If the LCD panel has a privacy-screen, wait for it */
77 privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
78 if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
79 return true;
80
81 drm_privacy_screen_put(privacy_screen);
82
83 return false;
84 }
85
intel_display_driver_init_hw(struct intel_display * display)86 void intel_display_driver_init_hw(struct intel_display *display)
87 {
88 if (!HAS_DISPLAY(display))
89 return;
90
91 intel_cdclk_read_hw(display);
92
93 intel_display_wa_apply(display);
94 }
95
96 static const struct drm_mode_config_funcs intel_mode_funcs = {
97 .fb_create = intel_user_framebuffer_create,
98 .get_format_info = intel_fb_get_format_info,
99 .mode_valid = intel_mode_valid,
100 .atomic_check = intel_atomic_check,
101 .atomic_commit = intel_atomic_commit,
102 .atomic_state_alloc = intel_atomic_state_alloc,
103 .atomic_state_clear = intel_atomic_state_clear,
104 .atomic_state_free = intel_atomic_state_free,
105 };
106
107 static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
108 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
109 };
110
intel_mode_config_init(struct intel_display * display)111 static void intel_mode_config_init(struct intel_display *display)
112 {
113 struct drm_mode_config *mode_config = &display->drm->mode_config;
114
115 drm_mode_config_init(display->drm);
116 INIT_LIST_HEAD(&display->global.obj_list);
117
118 mode_config->min_width = 0;
119 mode_config->min_height = 0;
120
121 mode_config->preferred_depth = 24;
122 mode_config->prefer_shadow = 1;
123
124 mode_config->funcs = &intel_mode_funcs;
125 mode_config->helper_private = &intel_mode_config_funcs;
126
127 mode_config->async_page_flip = HAS_ASYNC_FLIPS(display);
128
129 /*
130 * Maximum framebuffer dimensions, chosen to match
131 * the maximum render engine surface size on gen4+.
132 */
133 if (DISPLAY_VER(display) >= 7) {
134 mode_config->max_width = 16384;
135 mode_config->max_height = 16384;
136 } else if (DISPLAY_VER(display) >= 4) {
137 mode_config->max_width = 8192;
138 mode_config->max_height = 8192;
139 } else if (DISPLAY_VER(display) == 3) {
140 mode_config->max_width = 4096;
141 mode_config->max_height = 4096;
142 } else {
143 mode_config->max_width = 2048;
144 mode_config->max_height = 2048;
145 }
146
147 if (display->platform.i845g || display->platform.i865g) {
148 mode_config->cursor_width = display->platform.i845g ? 64 : 512;
149 mode_config->cursor_height = 1023;
150 } else if (display->platform.i830 || display->platform.i85x ||
151 display->platform.i915g || display->platform.i915gm) {
152 mode_config->cursor_width = 64;
153 mode_config->cursor_height = 64;
154 } else {
155 mode_config->cursor_width = 256;
156 mode_config->cursor_height = 256;
157 }
158 }
159
intel_mode_config_cleanup(struct intel_display * display)160 static void intel_mode_config_cleanup(struct intel_display *display)
161 {
162 intel_atomic_global_obj_cleanup(display);
163 drm_mode_config_cleanup(display->drm);
164 }
165
intel_plane_possible_crtcs_init(struct intel_display * display)166 static void intel_plane_possible_crtcs_init(struct intel_display *display)
167 {
168 struct intel_plane *plane;
169
170 for_each_intel_plane(display->drm, plane) {
171 struct intel_crtc *crtc = intel_crtc_for_pipe(display,
172 plane->pipe);
173
174 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
175 }
176 }
177
intel_display_driver_early_probe(struct intel_display * display)178 void intel_display_driver_early_probe(struct intel_display *display)
179 {
180 /* This must be called before any calls to HAS_PCH_* */
181 intel_pch_detect(display);
182
183 if (!HAS_DISPLAY(display))
184 return;
185
186 spin_lock_init(&display->fb_tracking.lock);
187 mutex_init(&display->backlight.lock);
188 mutex_init(&display->audio.mutex);
189 mutex_init(&display->wm.wm_mutex);
190 mutex_init(&display->pps.mutex);
191 mutex_init(&display->hdcp.hdcp_mutex);
192
193 intel_display_irq_init(display);
194 intel_dkl_phy_init(display);
195 intel_color_init_hooks(display);
196 intel_init_cdclk_hooks(display);
197 intel_audio_hooks_init(display);
198 intel_dpll_init_clock_hook(display);
199 intel_init_display_hooks(display);
200 intel_fdi_init_hook(display);
201 intel_dmc_wl_init(display);
202 }
203
204 /* part #1: call before irq install */
intel_display_driver_probe_noirq(struct intel_display * display)205 int intel_display_driver_probe_noirq(struct intel_display *display)
206 {
207 struct drm_i915_private *i915 = to_i915(display->drm);
208 int ret;
209
210 if (i915_inject_probe_failure(i915))
211 return -ENODEV;
212
213 if (HAS_DISPLAY(display)) {
214 ret = drm_vblank_init(display->drm,
215 INTEL_NUM_PIPES(display));
216 if (ret)
217 return ret;
218 }
219
220 intel_bios_init(display);
221
222 ret = intel_vga_register(display);
223 if (ret)
224 goto cleanup_bios;
225
226 intel_psr_dc5_dc6_wa_init(display);
227
228 /* FIXME: completely on the wrong abstraction layer */
229 ret = intel_power_domains_init(display);
230 if (ret < 0)
231 goto cleanup_vga;
232
233 intel_pmdemand_init_early(display);
234
235 intel_power_domains_init_hw(display, false);
236
237 if (!HAS_DISPLAY(display))
238 return 0;
239
240 display->hotplug.dp_wq = alloc_ordered_workqueue("intel-dp", 0);
241 if (!display->hotplug.dp_wq) {
242 ret = -ENOMEM;
243 goto cleanup_vga_client_pw_domain_dmc;
244 }
245
246 display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
247 if (!display->wq.modeset) {
248 ret = -ENOMEM;
249 goto cleanup_wq_dp;
250 }
251
252 display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
253 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
254 if (!display->wq.flip) {
255 ret = -ENOMEM;
256 goto cleanup_wq_modeset;
257 }
258
259 display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0);
260 if (!display->wq.cleanup) {
261 ret = -ENOMEM;
262 goto cleanup_wq_flip;
263 }
264
265 display->wq.unordered = alloc_workqueue("display_unordered", 0, 0);
266 if (!display->wq.unordered) {
267 ret = -ENOMEM;
268 goto cleanup_wq_cleanup;
269 }
270
271 intel_dmc_init(display);
272
273 intel_mode_config_init(display);
274
275 ret = intel_cdclk_init(display);
276 if (ret)
277 goto cleanup_wq_unordered;
278
279 ret = intel_color_init(display);
280 if (ret)
281 goto cleanup_wq_unordered;
282
283 ret = intel_dbuf_init(display);
284 if (ret)
285 goto cleanup_wq_unordered;
286
287 ret = intel_bw_init(display);
288 if (ret)
289 goto cleanup_wq_unordered;
290
291 ret = intel_pmdemand_init(display);
292 if (ret)
293 goto cleanup_wq_unordered;
294
295 intel_init_quirks(display);
296
297 intel_fbc_init(display);
298
299 return 0;
300
301 cleanup_wq_unordered:
302 destroy_workqueue(display->wq.unordered);
303 cleanup_wq_cleanup:
304 destroy_workqueue(display->wq.cleanup);
305 cleanup_wq_flip:
306 destroy_workqueue(display->wq.flip);
307 cleanup_wq_modeset:
308 destroy_workqueue(display->wq.modeset);
309 cleanup_wq_dp:
310 destroy_workqueue(display->hotplug.dp_wq);
311 cleanup_vga_client_pw_domain_dmc:
312 intel_dmc_fini(display);
313 intel_power_domains_driver_remove(display);
314 cleanup_vga:
315 intel_vga_unregister(display);
316 cleanup_bios:
317 intel_bios_driver_remove(display);
318
319 return ret;
320 }
321
set_display_access(struct intel_display * display,bool any_task_allowed,struct task_struct * allowed_task)322 static void set_display_access(struct intel_display *display,
323 bool any_task_allowed,
324 struct task_struct *allowed_task)
325 {
326 struct drm_modeset_acquire_ctx ctx;
327 int err;
328
329 intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) {
330 err = drm_modeset_lock_all_ctx(display->drm, &ctx);
331 if (err)
332 continue;
333
334 display->access.any_task_allowed = any_task_allowed;
335 display->access.allowed_task = allowed_task;
336 }
337
338 drm_WARN_ON(display->drm, err);
339 }
340
341 /**
342 * intel_display_driver_enable_user_access - Enable display HW access for all threads
343 * @display: display device instance
344 *
345 * Enable the display HW access for all threads. Examples for such accesses
346 * are modeset commits and connector probing.
347 *
348 * This function should be called during driver loading and system resume once
349 * all the HW initialization steps are done.
350 */
intel_display_driver_enable_user_access(struct intel_display * display)351 void intel_display_driver_enable_user_access(struct intel_display *display)
352 {
353 set_display_access(display, true, NULL);
354
355 intel_hpd_enable_detection_work(display);
356 }
357
358 /**
359 * intel_display_driver_disable_user_access - Disable display HW access for user threads
360 * @display: display device instance
361 *
362 * Disable the display HW access for user threads. Examples for such accesses
363 * are modeset commits and connector probing. For the current thread the
364 * access is still enabled, which should only perform HW init/deinit
365 * programming (as the initial modeset during driver loading or the disabling
366 * modeset during driver unloading and system suspend/shutdown). This function
367 * should be followed by calling either intel_display_driver_enable_user_access()
368 * after completing the HW init programming or
369 * intel_display_driver_suspend_access() after completing the HW deinit
370 * programming.
371 *
372 * This function should be called during driver loading/unloading and system
373 * suspend/shutdown before starting the HW init/deinit programming.
374 */
intel_display_driver_disable_user_access(struct intel_display * display)375 void intel_display_driver_disable_user_access(struct intel_display *display)
376 {
377 intel_hpd_disable_detection_work(display);
378
379 set_display_access(display, false, current);
380 }
381
382 /**
383 * intel_display_driver_suspend_access - Suspend display HW access for all threads
384 * @display: display device instance
385 *
386 * Disable the display HW access for all threads. Examples for such accesses
387 * are modeset commits and connector probing. This call should be either
388 * followed by calling intel_display_driver_resume_access(), or the driver
389 * should be unloaded/shutdown.
390 *
391 * This function should be called during driver unloading and system
392 * suspend/shutdown after completing the HW deinit programming.
393 */
intel_display_driver_suspend_access(struct intel_display * display)394 void intel_display_driver_suspend_access(struct intel_display *display)
395 {
396 set_display_access(display, false, NULL);
397 }
398
399 /**
400 * intel_display_driver_resume_access - Resume display HW access for the resume thread
401 * @display: display device instance
402 *
403 * Enable the display HW access for the current resume thread, keeping the
404 * access disabled for all other (user) threads. Examples for such accesses
405 * are modeset commits and connector probing. The resume thread should only
406 * perform HW init programming (as the restoring modeset). This function
407 * should be followed by calling intel_display_driver_enable_user_access(),
408 * after completing the HW init programming steps.
409 *
410 * This function should be called during system resume before starting the HW
411 * init steps.
412 */
intel_display_driver_resume_access(struct intel_display * display)413 void intel_display_driver_resume_access(struct intel_display *display)
414 {
415 set_display_access(display, false, current);
416 }
417
418 /**
419 * intel_display_driver_check_access - Check if the current thread has disaplay HW access
420 * @display: display device instance
421 *
422 * Check whether the current thread has display HW access, print a debug
423 * message if it doesn't. Such accesses are modeset commits and connector
424 * probing. If the function returns %false any HW access should be prevented.
425 *
426 * Returns %true if the current thread has display HW access, %false
427 * otherwise.
428 */
intel_display_driver_check_access(struct intel_display * display)429 bool intel_display_driver_check_access(struct intel_display *display)
430 {
431 char current_task[TASK_COMM_LEN + 16];
432 char allowed_task[TASK_COMM_LEN + 16] = "none";
433
434 if (display->access.any_task_allowed ||
435 display->access.allowed_task == current)
436 return true;
437
438 snprintf(current_task, sizeof(current_task), "%s[%d]",
439 current->comm, task_pid_vnr(current));
440
441 if (display->access.allowed_task)
442 snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
443 display->access.allowed_task->comm,
444 task_pid_vnr(display->access.allowed_task));
445
446 drm_dbg_kms(display->drm,
447 "Reject display access from task %s (allowed to %s)\n",
448 current_task, allowed_task);
449
450 return false;
451 }
452
453 /* part #2: call after irq install, but before gem init */
intel_display_driver_probe_nogem(struct intel_display * display)454 int intel_display_driver_probe_nogem(struct intel_display *display)
455 {
456 enum pipe pipe;
457 int ret;
458
459 if (!HAS_DISPLAY(display))
460 return 0;
461
462 intel_wm_init(display);
463
464 intel_panel_sanitize_ssc(display);
465
466 intel_pps_setup(display);
467
468 intel_gmbus_setup(display);
469
470 drm_dbg_kms(display->drm, "%d display pipe%s available.\n",
471 INTEL_NUM_PIPES(display),
472 INTEL_NUM_PIPES(display) > 1 ? "s" : "");
473
474 for_each_pipe(display, pipe) {
475 ret = intel_crtc_init(display, pipe);
476 if (ret)
477 goto err_mode_config;
478 }
479
480 intel_plane_possible_crtcs_init(display);
481 intel_dpll_init(display);
482 intel_fdi_pll_freq_update(display);
483
484 intel_update_czclk(display);
485 intel_display_driver_init_hw(display);
486 intel_dpll_update_ref_clks(display);
487
488 if (display->cdclk.max_cdclk_freq == 0)
489 intel_update_max_cdclk(display);
490
491 intel_hti_init(display);
492
493 intel_setup_outputs(display);
494
495 ret = intel_dp_tunnel_mgr_init(display);
496 if (ret)
497 goto err_hdcp;
498
499 intel_display_driver_disable_user_access(display);
500
501 drm_modeset_lock_all(display->drm);
502 intel_modeset_setup_hw_state(display, display->drm->mode_config.acquire_ctx);
503 intel_acpi_assign_connector_fwnodes(display);
504 drm_modeset_unlock_all(display->drm);
505
506 intel_initial_plane_config(display);
507
508 /*
509 * Make sure hardware watermarks really match the state we read out.
510 * Note that we need to do this after reconstructing the BIOS fb's
511 * since the watermark calculation done here will use pstate->fb.
512 */
513 if (!HAS_GMCH(display))
514 ilk_wm_sanitize(display);
515
516 return 0;
517
518 err_hdcp:
519 intel_hdcp_component_fini(display);
520 err_mode_config:
521 intel_mode_config_cleanup(display);
522
523 return ret;
524 }
525
526 /* part #3: call after gem init */
intel_display_driver_probe(struct intel_display * display)527 int intel_display_driver_probe(struct intel_display *display)
528 {
529 int ret;
530
531 if (!HAS_DISPLAY(display))
532 return 0;
533
534 /*
535 * This will bind stuff into ggtt, so it needs to be done after
536 * the BIOS fb takeover and whatever else magic ggtt reservations
537 * happen during gem/ggtt init.
538 */
539 intel_hdcp_component_init(display);
540
541 intel_flipq_init(display);
542
543 /*
544 * Force all active planes to recompute their states. So that on
545 * mode_setcrtc after probe, all the intel_plane_state variables
546 * are already calculated and there is no assert_plane warnings
547 * during bootup.
548 */
549 ret = intel_initial_commit(display);
550 if (ret)
551 drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret);
552
553 intel_overlay_setup(display);
554
555 /* Only enable hotplug handling once the fbdev is fully set up. */
556 intel_hpd_init(display);
557
558 skl_watermark_ipc_init(display);
559
560 return 0;
561 }
562
intel_display_driver_register(struct intel_display * display)563 void intel_display_driver_register(struct intel_display *display)
564 {
565 struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
566 "i915 display info:");
567
568 if (!HAS_DISPLAY(display))
569 return;
570
571 /* Must be done after probing outputs */
572 intel_opregion_register(display);
573 intel_acpi_video_register(display);
574
575 intel_audio_init(display);
576
577 intel_display_driver_enable_user_access(display);
578
579 intel_audio_register(display);
580
581 intel_display_debugfs_register(display);
582
583 /*
584 * We need to coordinate the hotplugs with the asynchronous
585 * fbdev configuration, for which we use the
586 * fbdev->async_cookie.
587 */
588 drm_kms_helper_poll_init(display->drm);
589 intel_hpd_poll_disable(display);
590
591 intel_fbdev_setup(display);
592
593 intel_display_device_info_print(DISPLAY_INFO(display),
594 DISPLAY_RUNTIME_INFO(display), &p);
595
596 intel_register_dsm_handler();
597 }
598
599 /* part #1: call before irq uninstall */
intel_display_driver_remove(struct intel_display * display)600 void intel_display_driver_remove(struct intel_display *display)
601 {
602 if (!HAS_DISPLAY(display))
603 return;
604
605 flush_workqueue(display->wq.flip);
606 flush_workqueue(display->wq.modeset);
607 flush_workqueue(display->wq.cleanup);
608 flush_workqueue(display->wq.unordered);
609
610 /*
611 * MST topology needs to be suspended so we don't have any calls to
612 * fbdev after it's finalized. MST will be destroyed later as part of
613 * drm_mode_config_cleanup()
614 */
615 intel_dp_mst_suspend(display);
616 }
617
618 /* part #2: call after irq uninstall */
intel_display_driver_remove_noirq(struct intel_display * display)619 void intel_display_driver_remove_noirq(struct intel_display *display)
620 {
621 if (!HAS_DISPLAY(display))
622 return;
623
624 intel_display_driver_suspend_access(display);
625
626 /*
627 * Due to the hpd irq storm handling the hotplug work can re-arm the
628 * poll handlers. Hence disable polling after hpd handling is shut down.
629 */
630 intel_hpd_poll_fini(display);
631
632 intel_unregister_dsm_handler();
633
634 /* flush any delayed tasks or pending work */
635 flush_workqueue(display->wq.unordered);
636
637 intel_hdcp_component_fini(display);
638
639 intel_mode_config_cleanup(display);
640
641 intel_dp_tunnel_mgr_cleanup(display);
642
643 intel_overlay_cleanup(display);
644
645 intel_gmbus_teardown(display);
646
647 destroy_workqueue(display->hotplug.dp_wq);
648 destroy_workqueue(display->wq.flip);
649 destroy_workqueue(display->wq.modeset);
650 destroy_workqueue(display->wq.cleanup);
651 destroy_workqueue(display->wq.unordered);
652
653 intel_fbc_cleanup(display);
654 }
655
656 /* part #3: call after gem init */
intel_display_driver_remove_nogem(struct intel_display * display)657 void intel_display_driver_remove_nogem(struct intel_display *display)
658 {
659 intel_dmc_fini(display);
660
661 intel_power_domains_driver_remove(display);
662
663 intel_vga_unregister(display);
664
665 intel_bios_driver_remove(display);
666 }
667
intel_display_driver_unregister(struct intel_display * display)668 void intel_display_driver_unregister(struct intel_display *display)
669 {
670 if (!HAS_DISPLAY(display))
671 return;
672
673 intel_unregister_dsm_handler();
674
675 drm_client_dev_unregister(display->drm);
676
677 /*
678 * After flushing the fbdev (incl. a late async config which
679 * will have delayed queuing of a hotplug event), then flush
680 * the hotplug events.
681 */
682 drm_kms_helper_poll_fini(display->drm);
683
684 intel_display_driver_disable_user_access(display);
685
686 intel_audio_deinit(display);
687
688 drm_atomic_helper_shutdown(display->drm);
689
690 acpi_video_unregister();
691 intel_opregion_unregister(display);
692 }
693
694 /*
695 * turn all crtc's off, but do not adjust state
696 * This has to be paired with a call to intel_modeset_setup_hw_state.
697 */
intel_display_driver_suspend(struct intel_display * display)698 int intel_display_driver_suspend(struct intel_display *display)
699 {
700 struct drm_atomic_state *state;
701 int ret;
702
703 if (!HAS_DISPLAY(display))
704 return 0;
705
706 state = drm_atomic_helper_suspend(display->drm);
707 ret = PTR_ERR_OR_ZERO(state);
708 if (ret)
709 drm_err(display->drm, "Suspending crtc's failed with %i\n",
710 ret);
711 else
712 display->restore.modeset_state = state;
713
714 /* ensure all DPT VMAs have been unpinned for intel_dpt_suspend() */
715 flush_workqueue(display->wq.cleanup);
716
717 intel_dp_mst_suspend(display);
718
719 return ret;
720 }
721
722 int
__intel_display_driver_resume(struct intel_display * display,struct drm_atomic_state * state,struct drm_modeset_acquire_ctx * ctx)723 __intel_display_driver_resume(struct intel_display *display,
724 struct drm_atomic_state *state,
725 struct drm_modeset_acquire_ctx *ctx)
726 {
727 struct drm_crtc_state *crtc_state;
728 struct drm_crtc *crtc;
729 int ret, i;
730
731 intel_modeset_setup_hw_state(display, ctx);
732
733 if (!state)
734 return 0;
735
736 /*
737 * We've duplicated the state, pointers to the old state are invalid.
738 *
739 * Don't attempt to use the old state until we commit the duplicated state.
740 */
741 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
742 /*
743 * Force recalculation even if we restore
744 * current state. With fast modeset this may not result
745 * in a modeset when the state is compatible.
746 */
747 crtc_state->mode_changed = true;
748 }
749
750 /* ignore any reset values/BIOS leftovers in the WM registers */
751 if (!HAS_GMCH(display))
752 to_intel_atomic_state(state)->skip_intermediate_wm = true;
753
754 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
755
756 drm_WARN_ON(display->drm, ret == -EDEADLK);
757
758 return ret;
759 }
760
intel_display_driver_resume(struct intel_display * display)761 void intel_display_driver_resume(struct intel_display *display)
762 {
763 struct drm_atomic_state *state = display->restore.modeset_state;
764 struct drm_modeset_acquire_ctx ctx;
765 int ret;
766
767 if (!HAS_DISPLAY(display))
768 return;
769
770 /* MST sideband requires HPD interrupts enabled */
771 intel_dp_mst_resume(display);
772
773 display->restore.modeset_state = NULL;
774 if (state)
775 state->acquire_ctx = &ctx;
776
777 drm_modeset_acquire_init(&ctx, 0);
778
779 while (1) {
780 ret = drm_modeset_lock_all_ctx(display->drm, &ctx);
781 if (ret != -EDEADLK)
782 break;
783
784 drm_modeset_backoff(&ctx);
785 }
786
787 if (!ret)
788 ret = __intel_display_driver_resume(display, state, &ctx);
789
790 skl_watermark_ipc_update(display);
791 drm_modeset_drop_locks(&ctx);
792 drm_modeset_acquire_fini(&ctx);
793
794 if (ret)
795 drm_err(display->drm,
796 "Restoring old state failed with %i\n", ret);
797 if (state)
798 drm_atomic_state_put(state);
799 }
800