1 /*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28 #include <linux/dma-fence.h>
29 #include <linux/ktime.h>
30
31 #include <drm/drm_atomic.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_atomic_uapi.h>
34 #include <drm/drm_blend.h>
35 #include <drm/drm_bridge.h>
36 #include <drm/drm_damage_helper.h>
37 #include <drm/drm_device.h>
38 #include <drm/drm_drv.h>
39 #include <drm/drm_framebuffer.h>
40 #include <drm/drm_gem_atomic_helper.h>
41 #include <drm/drm_panic.h>
42 #include <drm/drm_print.h>
43 #include <drm/drm_self_refresh_helper.h>
44 #include <drm/drm_vblank.h>
45 #include <drm/drm_writeback.h>
46
47 #include "drm_crtc_helper_internal.h"
48 #include "drm_crtc_internal.h"
49
50 /**
51 * DOC: overview
52 *
53 * This helper library provides implementations of check and commit functions on
54 * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
55 * also provides convenience implementations for the atomic state handling
56 * callbacks for drivers which don't need to subclass the drm core structures to
57 * add their own additional internal state.
58 *
59 * This library also provides default implementations for the check callback in
60 * drm_atomic_helper_check() and for the commit callback with
61 * drm_atomic_helper_commit(). But the individual stages and callbacks are
62 * exposed to allow drivers to mix and match and e.g. use the plane helpers only
63 * together with a driver private modeset implementation.
64 *
65 * This library also provides implementations for all the legacy driver
66 * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
67 * drm_atomic_helper_disable_plane(), and the various functions to implement
68 * set_property callbacks. New drivers must not implement these functions
69 * themselves but must use the provided helpers.
70 *
71 * The atomic helper uses the same function table structures as all other
72 * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
73 * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
74 * also shares the &struct drm_plane_helper_funcs function table with the plane
75 * helpers.
76 */
77 static void
drm_atomic_helper_plane_changed(struct drm_atomic_state * state,struct drm_plane_state * old_plane_state,struct drm_plane_state * plane_state,struct drm_plane * plane)78 drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
79 struct drm_plane_state *old_plane_state,
80 struct drm_plane_state *plane_state,
81 struct drm_plane *plane)
82 {
83 struct drm_crtc_state *crtc_state;
84
85 if (old_plane_state->crtc) {
86 crtc_state = drm_atomic_get_new_crtc_state(state,
87 old_plane_state->crtc);
88
89 if (WARN_ON(!crtc_state))
90 return;
91
92 crtc_state->planes_changed = true;
93 }
94
95 if (plane_state->crtc) {
96 crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
97
98 if (WARN_ON(!crtc_state))
99 return;
100
101 crtc_state->planes_changed = true;
102 }
103 }
104
handle_conflicting_encoders(struct drm_atomic_state * state,bool disable_conflicting_encoders)105 static int handle_conflicting_encoders(struct drm_atomic_state *state,
106 bool disable_conflicting_encoders)
107 {
108 struct drm_connector_state *new_conn_state;
109 struct drm_connector *connector;
110 struct drm_connector_list_iter conn_iter;
111 struct drm_encoder *encoder;
112 unsigned int encoder_mask = 0;
113 int i, ret = 0;
114
115 /*
116 * First loop, find all newly assigned encoders from the connectors
117 * part of the state. If the same encoder is assigned to multiple
118 * connectors bail out.
119 */
120 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
121 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
122 struct drm_encoder *new_encoder;
123
124 if (!new_conn_state->crtc)
125 continue;
126
127 if (funcs->atomic_best_encoder)
128 new_encoder = funcs->atomic_best_encoder(connector,
129 state);
130 else if (funcs->best_encoder)
131 new_encoder = funcs->best_encoder(connector);
132 else
133 new_encoder = drm_connector_get_single_encoder(connector);
134
135 if (new_encoder) {
136 if (encoder_mask & drm_encoder_mask(new_encoder)) {
137 drm_dbg_atomic(connector->dev,
138 "[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
139 new_encoder->base.id, new_encoder->name,
140 connector->base.id, connector->name);
141
142 return -EINVAL;
143 }
144
145 encoder_mask |= drm_encoder_mask(new_encoder);
146 }
147 }
148
149 if (!encoder_mask)
150 return 0;
151
152 /*
153 * Second loop, iterate over all connectors not part of the state.
154 *
155 * If a conflicting encoder is found and disable_conflicting_encoders
156 * is not set, an error is returned. Userspace can provide a solution
157 * through the atomic ioctl.
158 *
159 * If the flag is set conflicting connectors are removed from the CRTC
160 * and the CRTC is disabled if no encoder is left. This preserves
161 * compatibility with the legacy set_config behavior.
162 */
163 drm_connector_list_iter_begin(state->dev, &conn_iter);
164 drm_for_each_connector_iter(connector, &conn_iter) {
165 struct drm_crtc_state *crtc_state;
166
167 if (drm_atomic_get_new_connector_state(state, connector))
168 continue;
169
170 encoder = connector->state->best_encoder;
171 if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
172 continue;
173
174 if (!disable_conflicting_encoders) {
175 drm_dbg_atomic(connector->dev,
176 "[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
177 encoder->base.id, encoder->name,
178 connector->state->crtc->base.id,
179 connector->state->crtc->name,
180 connector->base.id, connector->name);
181 ret = -EINVAL;
182 goto out;
183 }
184
185 new_conn_state = drm_atomic_get_connector_state(state, connector);
186 if (IS_ERR(new_conn_state)) {
187 ret = PTR_ERR(new_conn_state);
188 goto out;
189 }
190
191 drm_dbg_atomic(connector->dev,
192 "[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
193 encoder->base.id, encoder->name,
194 new_conn_state->crtc->base.id, new_conn_state->crtc->name,
195 connector->base.id, connector->name);
196
197 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
198
199 ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
200 if (ret)
201 goto out;
202
203 if (!crtc_state->connector_mask) {
204 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
205 NULL);
206 if (ret < 0)
207 goto out;
208
209 crtc_state->active = false;
210 }
211 }
212 out:
213 drm_connector_list_iter_end(&conn_iter);
214
215 return ret;
216 }
217
218 static void
set_best_encoder(struct drm_atomic_state * state,struct drm_connector_state * conn_state,struct drm_encoder * encoder)219 set_best_encoder(struct drm_atomic_state *state,
220 struct drm_connector_state *conn_state,
221 struct drm_encoder *encoder)
222 {
223 struct drm_crtc_state *crtc_state;
224 struct drm_crtc *crtc;
225
226 if (conn_state->best_encoder) {
227 /* Unset the encoder_mask in the old crtc state. */
228 crtc = conn_state->connector->state->crtc;
229
230 /* A NULL crtc is an error here because we should have
231 * duplicated a NULL best_encoder when crtc was NULL.
232 * As an exception restoring duplicated atomic state
233 * during resume is allowed, so don't warn when
234 * best_encoder is equal to encoder we intend to set.
235 */
236 WARN_ON(!crtc && encoder != conn_state->best_encoder);
237 if (crtc) {
238 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
239
240 crtc_state->encoder_mask &=
241 ~drm_encoder_mask(conn_state->best_encoder);
242 }
243 }
244
245 if (encoder) {
246 crtc = conn_state->crtc;
247 WARN_ON(!crtc);
248 if (crtc) {
249 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
250
251 crtc_state->encoder_mask |=
252 drm_encoder_mask(encoder);
253 }
254 }
255
256 conn_state->best_encoder = encoder;
257 }
258
259 static void
steal_encoder(struct drm_atomic_state * state,struct drm_encoder * encoder)260 steal_encoder(struct drm_atomic_state *state,
261 struct drm_encoder *encoder)
262 {
263 struct drm_crtc_state *crtc_state;
264 struct drm_connector *connector;
265 struct drm_connector_state *old_connector_state, *new_connector_state;
266 int i;
267
268 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
269 struct drm_crtc *encoder_crtc;
270
271 if (new_connector_state->best_encoder != encoder)
272 continue;
273
274 encoder_crtc = old_connector_state->crtc;
275
276 drm_dbg_atomic(encoder->dev,
277 "[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
278 encoder->base.id, encoder->name,
279 encoder_crtc->base.id, encoder_crtc->name);
280
281 set_best_encoder(state, new_connector_state, NULL);
282
283 crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
284 crtc_state->connectors_changed = true;
285
286 return;
287 }
288 }
289
290 static int
update_connector_routing(struct drm_atomic_state * state,struct drm_connector * connector,struct drm_connector_state * old_connector_state,struct drm_connector_state * new_connector_state,bool added_by_user)291 update_connector_routing(struct drm_atomic_state *state,
292 struct drm_connector *connector,
293 struct drm_connector_state *old_connector_state,
294 struct drm_connector_state *new_connector_state,
295 bool added_by_user)
296 {
297 const struct drm_connector_helper_funcs *funcs;
298 struct drm_encoder *new_encoder;
299 struct drm_crtc_state *crtc_state;
300
301 drm_dbg_atomic(connector->dev, "Updating routing for [CONNECTOR:%d:%s]\n",
302 connector->base.id, connector->name);
303
304 if (old_connector_state->crtc != new_connector_state->crtc) {
305 if (old_connector_state->crtc) {
306 crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
307 crtc_state->connectors_changed = true;
308 }
309
310 if (new_connector_state->crtc) {
311 crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
312 crtc_state->connectors_changed = true;
313 }
314 }
315
316 if (!new_connector_state->crtc) {
317 drm_dbg_atomic(connector->dev, "Disabling [CONNECTOR:%d:%s]\n",
318 connector->base.id, connector->name);
319
320 set_best_encoder(state, new_connector_state, NULL);
321
322 return 0;
323 }
324
325 crtc_state = drm_atomic_get_new_crtc_state(state,
326 new_connector_state->crtc);
327 /*
328 * For compatibility with legacy users, we want to make sure that
329 * we allow DPMS On->Off modesets on unregistered connectors. Modesets
330 * which would result in anything else must be considered invalid, to
331 * avoid turning on new displays on dead connectors.
332 *
333 * Since the connector can be unregistered at any point during an
334 * atomic check or commit, this is racy. But that's OK: all we care
335 * about is ensuring that userspace can't do anything but shut off the
336 * display on a connector that was destroyed after it's been notified,
337 * not before.
338 *
339 * Additionally, we also want to ignore connector registration when
340 * we're trying to restore an atomic state during system resume since
341 * there's a chance the connector may have been destroyed during the
342 * process, but it's better to ignore that then cause
343 * drm_atomic_helper_resume() to fail.
344 *
345 * Last, we want to ignore connector registration when the connector
346 * was not pulled in the atomic state by user-space (ie, was pulled
347 * in by the driver, e.g. when updating a DP-MST stream).
348 */
349 if (!state->duplicated && drm_connector_is_unregistered(connector) &&
350 added_by_user && crtc_state->active) {
351 drm_dbg_atomic(connector->dev,
352 "[CONNECTOR:%d:%s] is not registered\n",
353 connector->base.id, connector->name);
354 return -EINVAL;
355 }
356
357 funcs = connector->helper_private;
358
359 if (funcs->atomic_best_encoder)
360 new_encoder = funcs->atomic_best_encoder(connector, state);
361 else if (funcs->best_encoder)
362 new_encoder = funcs->best_encoder(connector);
363 else
364 new_encoder = drm_connector_get_single_encoder(connector);
365
366 if (!new_encoder) {
367 drm_dbg_atomic(connector->dev,
368 "No suitable encoder found for [CONNECTOR:%d:%s]\n",
369 connector->base.id, connector->name);
370 return -EINVAL;
371 }
372
373 if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
374 drm_dbg_atomic(connector->dev,
375 "[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
376 new_encoder->base.id,
377 new_encoder->name,
378 new_connector_state->crtc->base.id,
379 new_connector_state->crtc->name);
380 return -EINVAL;
381 }
382
383 if (new_encoder == new_connector_state->best_encoder) {
384 set_best_encoder(state, new_connector_state, new_encoder);
385
386 drm_dbg_atomic(connector->dev,
387 "[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
388 connector->base.id,
389 connector->name,
390 new_encoder->base.id,
391 new_encoder->name,
392 new_connector_state->crtc->base.id,
393 new_connector_state->crtc->name);
394
395 return 0;
396 }
397
398 steal_encoder(state, new_encoder);
399
400 set_best_encoder(state, new_connector_state, new_encoder);
401
402 crtc_state->connectors_changed = true;
403
404 drm_dbg_atomic(connector->dev,
405 "[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
406 connector->base.id,
407 connector->name,
408 new_encoder->base.id,
409 new_encoder->name,
410 new_connector_state->crtc->base.id,
411 new_connector_state->crtc->name);
412
413 return 0;
414 }
415
416 static int
mode_fixup(struct drm_atomic_state * state)417 mode_fixup(struct drm_atomic_state *state)
418 {
419 struct drm_crtc *crtc;
420 struct drm_crtc_state *new_crtc_state;
421 struct drm_connector *connector;
422 struct drm_connector_state *new_conn_state;
423 int i;
424 int ret;
425
426 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
427 if (!new_crtc_state->mode_changed &&
428 !new_crtc_state->connectors_changed)
429 continue;
430
431 drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
432 }
433
434 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
435 const struct drm_encoder_helper_funcs *funcs;
436 struct drm_encoder *encoder;
437 struct drm_bridge *bridge;
438
439 WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
440
441 if (!new_conn_state->crtc || !new_conn_state->best_encoder)
442 continue;
443
444 new_crtc_state =
445 drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
446
447 /*
448 * Each encoder has at most one connector (since we always steal
449 * it away), so we won't call ->mode_fixup twice.
450 */
451 encoder = new_conn_state->best_encoder;
452 funcs = encoder->helper_private;
453
454 bridge = drm_bridge_chain_get_first_bridge(encoder);
455 ret = drm_atomic_bridge_chain_check(bridge,
456 new_crtc_state,
457 new_conn_state);
458 if (ret) {
459 drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n");
460 return ret;
461 }
462
463 if (funcs && funcs->atomic_check) {
464 ret = funcs->atomic_check(encoder, new_crtc_state,
465 new_conn_state);
466 if (ret) {
467 drm_dbg_atomic(encoder->dev,
468 "[ENCODER:%d:%s] check failed\n",
469 encoder->base.id, encoder->name);
470 return ret;
471 }
472 } else if (funcs && funcs->mode_fixup) {
473 ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
474 &new_crtc_state->adjusted_mode);
475 if (!ret) {
476 drm_dbg_atomic(encoder->dev,
477 "[ENCODER:%d:%s] fixup failed\n",
478 encoder->base.id, encoder->name);
479 return -EINVAL;
480 }
481 }
482 }
483
484 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
485 const struct drm_crtc_helper_funcs *funcs;
486
487 if (!new_crtc_state->enable)
488 continue;
489
490 if (!new_crtc_state->mode_changed &&
491 !new_crtc_state->connectors_changed)
492 continue;
493
494 funcs = crtc->helper_private;
495 if (!funcs || !funcs->mode_fixup)
496 continue;
497
498 ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
499 &new_crtc_state->adjusted_mode);
500 if (!ret) {
501 drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] fixup failed\n",
502 crtc->base.id, crtc->name);
503 return -EINVAL;
504 }
505 }
506
507 return 0;
508 }
509
mode_valid_path(struct drm_connector * connector,struct drm_encoder * encoder,struct drm_crtc * crtc,const struct drm_display_mode * mode)510 static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
511 struct drm_encoder *encoder,
512 struct drm_crtc *crtc,
513 const struct drm_display_mode *mode)
514 {
515 struct drm_bridge *bridge;
516 enum drm_mode_status ret;
517
518 ret = drm_encoder_mode_valid(encoder, mode);
519 if (ret != MODE_OK) {
520 drm_dbg_atomic(encoder->dev,
521 "[ENCODER:%d:%s] mode_valid() failed\n",
522 encoder->base.id, encoder->name);
523 return ret;
524 }
525
526 bridge = drm_bridge_chain_get_first_bridge(encoder);
527 ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info,
528 mode);
529 if (ret != MODE_OK) {
530 drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n");
531 return ret;
532 }
533
534 ret = drm_crtc_mode_valid(crtc, mode);
535 if (ret != MODE_OK) {
536 drm_dbg_atomic(encoder->dev, "[CRTC:%d:%s] mode_valid() failed\n",
537 crtc->base.id, crtc->name);
538 return ret;
539 }
540
541 return ret;
542 }
543
544 static int
mode_valid(struct drm_atomic_state * state)545 mode_valid(struct drm_atomic_state *state)
546 {
547 struct drm_connector_state *conn_state;
548 struct drm_connector *connector;
549 int i;
550
551 for_each_new_connector_in_state(state, connector, conn_state, i) {
552 struct drm_encoder *encoder = conn_state->best_encoder;
553 struct drm_crtc *crtc = conn_state->crtc;
554 struct drm_crtc_state *crtc_state;
555 enum drm_mode_status mode_status;
556 const struct drm_display_mode *mode;
557
558 if (!crtc || !encoder)
559 continue;
560
561 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
562 if (!crtc_state)
563 continue;
564 if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
565 continue;
566
567 mode = &crtc_state->mode;
568
569 mode_status = mode_valid_path(connector, encoder, crtc, mode);
570 if (mode_status != MODE_OK)
571 return -EINVAL;
572 }
573
574 return 0;
575 }
576
drm_atomic_check_valid_clones(struct drm_atomic_state * state,struct drm_crtc * crtc)577 static int drm_atomic_check_valid_clones(struct drm_atomic_state *state,
578 struct drm_crtc *crtc)
579 {
580 struct drm_encoder *drm_enc;
581 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
582 crtc);
583
584 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) {
585 if (!drm_enc->possible_clones) {
586 DRM_DEBUG("enc%d possible_clones is 0\n", drm_enc->base.id);
587 continue;
588 }
589
590 if ((crtc_state->encoder_mask & drm_enc->possible_clones) !=
591 crtc_state->encoder_mask) {
592 DRM_DEBUG("crtc%d failed valid clone check for mask 0x%x\n",
593 crtc->base.id, crtc_state->encoder_mask);
594 return -EINVAL;
595 }
596 }
597
598 return 0;
599 }
600
601 /**
602 * drm_atomic_helper_check_modeset - validate state object for modeset changes
603 * @dev: DRM device
604 * @state: the driver state object
605 *
606 * Check the state object to see if the requested state is physically possible.
607 * This does all the CRTC and connector related computations for an atomic
608 * update and adds any additional connectors needed for full modesets. It calls
609 * the various per-object callbacks in the follow order:
610 *
611 * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
612 * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
613 * 3. If it's determined a modeset is needed then all connectors on the affected
614 * CRTC are added and &drm_connector_helper_funcs.atomic_check is run on them.
615 * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
616 * &drm_crtc_helper_funcs.mode_valid are called on the affected components.
617 * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
618 * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
619 * This function is only called when the encoder will be part of a configured CRTC,
620 * it must not be used for implementing connector property validation.
621 * If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
622 * instead.
623 * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with CRTC constraints.
624 *
625 * &drm_crtc_state.mode_changed is set when the input mode is changed.
626 * &drm_crtc_state.connectors_changed is set when a connector is added or
627 * removed from the CRTC. &drm_crtc_state.active_changed is set when
628 * &drm_crtc_state.active changes, which is used for DPMS.
629 * &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
630 * See also: drm_atomic_crtc_needs_modeset()
631 *
632 * IMPORTANT:
633 *
634 * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
635 * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
636 * without a full modeset) _must_ call this function after that change. It is
637 * permitted to call this function multiple times for the same update, e.g.
638 * when the &drm_crtc_helper_funcs.atomic_check functions depend upon the
639 * adjusted dotclock for fifo space allocation and watermark computation.
640 *
641 * RETURNS:
642 * Zero for success or -errno
643 */
644 int
drm_atomic_helper_check_modeset(struct drm_device * dev,struct drm_atomic_state * state)645 drm_atomic_helper_check_modeset(struct drm_device *dev,
646 struct drm_atomic_state *state)
647 {
648 struct drm_crtc *crtc;
649 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
650 struct drm_connector *connector;
651 struct drm_connector_state *old_connector_state, *new_connector_state;
652 int i, ret;
653 unsigned int connectors_mask = 0, user_connectors_mask = 0;
654
655 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
656 user_connectors_mask |= BIT(i);
657
658 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
659 bool has_connectors =
660 !!new_crtc_state->connector_mask;
661
662 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
663
664 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
665 drm_dbg_atomic(dev, "[CRTC:%d:%s] mode changed\n",
666 crtc->base.id, crtc->name);
667 new_crtc_state->mode_changed = true;
668 }
669
670 if (old_crtc_state->enable != new_crtc_state->enable) {
671 drm_dbg_atomic(dev, "[CRTC:%d:%s] enable changed\n",
672 crtc->base.id, crtc->name);
673
674 /*
675 * For clarity this assignment is done here, but
676 * enable == 0 is only true when there are no
677 * connectors and a NULL mode.
678 *
679 * The other way around is true as well. enable != 0
680 * implies that connectors are attached and a mode is set.
681 */
682 new_crtc_state->mode_changed = true;
683 new_crtc_state->connectors_changed = true;
684 }
685
686 if (old_crtc_state->active != new_crtc_state->active) {
687 drm_dbg_atomic(dev, "[CRTC:%d:%s] active changed\n",
688 crtc->base.id, crtc->name);
689 new_crtc_state->active_changed = true;
690 }
691
692 if (new_crtc_state->enable != has_connectors) {
693 drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch (%d/%d)\n",
694 crtc->base.id, crtc->name,
695 new_crtc_state->enable, has_connectors);
696
697 return -EINVAL;
698 }
699
700 if (drm_dev_has_vblank(dev))
701 new_crtc_state->no_vblank = false;
702 else
703 new_crtc_state->no_vblank = true;
704 }
705
706 ret = handle_conflicting_encoders(state, false);
707 if (ret)
708 return ret;
709
710 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
711 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
712
713 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
714
715 /*
716 * This only sets crtc->connectors_changed for routing changes,
717 * drivers must set crtc->connectors_changed themselves when
718 * connector properties need to be updated.
719 */
720 ret = update_connector_routing(state, connector,
721 old_connector_state,
722 new_connector_state,
723 BIT(i) & user_connectors_mask);
724 if (ret)
725 return ret;
726 if (old_connector_state->crtc) {
727 new_crtc_state = drm_atomic_get_new_crtc_state(state,
728 old_connector_state->crtc);
729 if (old_connector_state->link_status !=
730 new_connector_state->link_status)
731 new_crtc_state->connectors_changed = true;
732
733 if (old_connector_state->max_requested_bpc !=
734 new_connector_state->max_requested_bpc)
735 new_crtc_state->connectors_changed = true;
736 }
737
738 if (funcs->atomic_check)
739 ret = funcs->atomic_check(connector, state);
740 if (ret) {
741 drm_dbg_atomic(dev,
742 "[CONNECTOR:%d:%s] driver check failed\n",
743 connector->base.id, connector->name);
744 return ret;
745 }
746
747 connectors_mask |= BIT(i);
748 }
749
750 /*
751 * After all the routing has been prepared we need to add in any
752 * connector which is itself unchanged, but whose CRTC changes its
753 * configuration. This must be done before calling mode_fixup in case a
754 * crtc only changed its mode but has the same set of connectors.
755 */
756 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
757 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
758 continue;
759
760 drm_dbg_atomic(dev,
761 "[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
762 crtc->base.id, crtc->name,
763 new_crtc_state->enable ? 'y' : 'n',
764 new_crtc_state->active ? 'y' : 'n');
765
766 ret = drm_atomic_add_affected_connectors(state, crtc);
767 if (ret != 0)
768 return ret;
769
770 ret = drm_atomic_add_affected_planes(state, crtc);
771 if (ret != 0)
772 return ret;
773
774 ret = drm_atomic_check_valid_clones(state, crtc);
775 if (ret != 0)
776 return ret;
777 }
778
779 /*
780 * Iterate over all connectors again, to make sure atomic_check()
781 * has been called on them when a modeset is forced.
782 */
783 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
784 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
785
786 if (connectors_mask & BIT(i))
787 continue;
788
789 if (funcs->atomic_check)
790 ret = funcs->atomic_check(connector, state);
791 if (ret) {
792 drm_dbg_atomic(dev,
793 "[CONNECTOR:%d:%s] driver check failed\n",
794 connector->base.id, connector->name);
795 return ret;
796 }
797 }
798
799 /*
800 * Iterate over all connectors again, and add all affected bridges to
801 * the state.
802 */
803 for_each_oldnew_connector_in_state(state, connector,
804 old_connector_state,
805 new_connector_state, i) {
806 struct drm_encoder *encoder;
807
808 encoder = old_connector_state->best_encoder;
809 ret = drm_atomic_add_encoder_bridges(state, encoder);
810 if (ret)
811 return ret;
812
813 encoder = new_connector_state->best_encoder;
814 ret = drm_atomic_add_encoder_bridges(state, encoder);
815 if (ret)
816 return ret;
817 }
818
819 ret = mode_valid(state);
820 if (ret)
821 return ret;
822
823 return mode_fixup(state);
824 }
825 EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
826
827 /**
828 * drm_atomic_helper_check_wb_connector_state() - Check writeback connector state
829 * @connector: corresponding connector
830 * @state: the driver state object
831 *
832 * Checks if the writeback connector state is valid, and returns an error if it
833 * isn't.
834 *
835 * RETURNS:
836 * Zero for success or -errno
837 */
838 int
drm_atomic_helper_check_wb_connector_state(struct drm_connector * connector,struct drm_atomic_state * state)839 drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector,
840 struct drm_atomic_state *state)
841 {
842 struct drm_connector_state *conn_state =
843 drm_atomic_get_new_connector_state(state, connector);
844 struct drm_writeback_job *wb_job = conn_state->writeback_job;
845 struct drm_property_blob *pixel_format_blob;
846 struct drm_framebuffer *fb;
847 size_t i, nformats;
848 u32 *formats;
849
850 if (!wb_job || !wb_job->fb)
851 return 0;
852
853 pixel_format_blob = wb_job->connector->pixel_formats_blob_ptr;
854 nformats = pixel_format_blob->length / sizeof(u32);
855 formats = pixel_format_blob->data;
856 fb = wb_job->fb;
857
858 for (i = 0; i < nformats; i++)
859 if (fb->format->format == formats[i])
860 return 0;
861
862 drm_dbg_kms(connector->dev, "Invalid pixel format %p4cc\n", &fb->format->format);
863
864 return -EINVAL;
865 }
866 EXPORT_SYMBOL(drm_atomic_helper_check_wb_connector_state);
867
868 /**
869 * drm_atomic_helper_check_plane_state() - Check plane state for validity
870 * @plane_state: plane state to check
871 * @crtc_state: CRTC state to check
872 * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
873 * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
874 * @can_position: is it legal to position the plane such that it
875 * doesn't cover the entire CRTC? This will generally
876 * only be false for primary planes.
877 * @can_update_disabled: can the plane be updated while the CRTC
878 * is disabled?
879 *
880 * Checks that a desired plane update is valid, and updates various
881 * bits of derived state (clipped coordinates etc.). Drivers that provide
882 * their own plane handling rather than helper-provided implementations may
883 * still wish to call this function to avoid duplication of error checking
884 * code.
885 *
886 * RETURNS:
887 * Zero if update appears valid, error code on failure
888 */
drm_atomic_helper_check_plane_state(struct drm_plane_state * plane_state,const struct drm_crtc_state * crtc_state,int min_scale,int max_scale,bool can_position,bool can_update_disabled)889 int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
890 const struct drm_crtc_state *crtc_state,
891 int min_scale,
892 int max_scale,
893 bool can_position,
894 bool can_update_disabled)
895 {
896 struct drm_framebuffer *fb = plane_state->fb;
897 struct drm_rect *src = &plane_state->src;
898 struct drm_rect *dst = &plane_state->dst;
899 unsigned int rotation = plane_state->rotation;
900 struct drm_rect clip = {};
901 int hscale, vscale;
902
903 WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
904
905 *src = drm_plane_state_src(plane_state);
906 *dst = drm_plane_state_dest(plane_state);
907
908 if (!fb) {
909 plane_state->visible = false;
910 return 0;
911 }
912
913 /* crtc should only be NULL when disabling (i.e., !fb) */
914 if (WARN_ON(!plane_state->crtc)) {
915 plane_state->visible = false;
916 return 0;
917 }
918
919 if (!crtc_state->enable && !can_update_disabled) {
920 drm_dbg_kms(plane_state->plane->dev,
921 "Cannot update plane of a disabled CRTC.\n");
922 return -EINVAL;
923 }
924
925 drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
926
927 /* Check scaling */
928 hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
929 vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
930 if (hscale < 0 || vscale < 0) {
931 drm_dbg_kms(plane_state->plane->dev,
932 "Invalid scaling of plane\n");
933 drm_rect_debug_print("src: ", &plane_state->src, true);
934 drm_rect_debug_print("dst: ", &plane_state->dst, false);
935 return -ERANGE;
936 }
937
938 if (crtc_state->enable)
939 drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
940
941 plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
942
943 drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
944
945 if (!plane_state->visible)
946 /*
947 * Plane isn't visible; some drivers can handle this
948 * so we just return success here. Drivers that can't
949 * (including those that use the primary plane helper's
950 * update function) will return an error from their
951 * update_plane handler.
952 */
953 return 0;
954
955 if (!can_position && !drm_rect_equals(dst, &clip)) {
956 drm_dbg_kms(plane_state->plane->dev,
957 "Plane must cover entire CRTC\n");
958 drm_rect_debug_print("dst: ", dst, false);
959 drm_rect_debug_print("clip: ", &clip, false);
960 return -EINVAL;
961 }
962
963 return 0;
964 }
965 EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
966
967 /**
968 * drm_atomic_helper_check_crtc_primary_plane() - Check CRTC state for primary plane
969 * @crtc_state: CRTC state to check
970 *
971 * Checks that a CRTC has at least one primary plane attached to it, which is
972 * a requirement on some hardware. Note that this only involves the CRTC side
973 * of the test. To test if the primary plane is visible or if it can be updated
974 * without the CRTC being enabled, use drm_atomic_helper_check_plane_state() in
975 * the plane's atomic check.
976 *
977 * RETURNS:
978 * 0 if a primary plane is attached to the CRTC, or an error code otherwise
979 */
drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state * crtc_state)980 int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state)
981 {
982 struct drm_crtc *crtc = crtc_state->crtc;
983 struct drm_device *dev = crtc->dev;
984 struct drm_plane *plane;
985
986 /* needs at least one primary plane to be enabled */
987 drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
988 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
989 return 0;
990 }
991
992 drm_dbg_atomic(dev, "[CRTC:%d:%s] primary plane missing\n", crtc->base.id, crtc->name);
993
994 return -EINVAL;
995 }
996 EXPORT_SYMBOL(drm_atomic_helper_check_crtc_primary_plane);
997
998 /**
999 * drm_atomic_helper_check_planes - validate state object for planes changes
1000 * @dev: DRM device
1001 * @state: the driver state object
1002 *
1003 * Check the state object to see if the requested state is physically possible.
1004 * This does all the plane update related checks using by calling into the
1005 * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
1006 * hooks provided by the driver.
1007 *
1008 * It also sets &drm_crtc_state.planes_changed to indicate that a CRTC has
1009 * updated planes.
1010 *
1011 * RETURNS:
1012 * Zero for success or -errno
1013 */
1014 int
drm_atomic_helper_check_planes(struct drm_device * dev,struct drm_atomic_state * state)1015 drm_atomic_helper_check_planes(struct drm_device *dev,
1016 struct drm_atomic_state *state)
1017 {
1018 struct drm_crtc *crtc;
1019 struct drm_crtc_state *new_crtc_state;
1020 struct drm_plane *plane;
1021 struct drm_plane_state *new_plane_state, *old_plane_state;
1022 int i, ret = 0;
1023
1024 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
1025 const struct drm_plane_helper_funcs *funcs;
1026
1027 WARN_ON(!drm_modeset_is_locked(&plane->mutex));
1028
1029 funcs = plane->helper_private;
1030
1031 drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
1032
1033 drm_atomic_helper_check_plane_damage(state, new_plane_state);
1034
1035 if (!funcs || !funcs->atomic_check)
1036 continue;
1037
1038 ret = funcs->atomic_check(plane, state);
1039 if (ret) {
1040 drm_dbg_atomic(plane->dev,
1041 "[PLANE:%d:%s] atomic driver check failed\n",
1042 plane->base.id, plane->name);
1043 return ret;
1044 }
1045 }
1046
1047 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1048 const struct drm_crtc_helper_funcs *funcs;
1049
1050 funcs = crtc->helper_private;
1051
1052 if (!funcs || !funcs->atomic_check)
1053 continue;
1054
1055 ret = funcs->atomic_check(crtc, state);
1056 if (ret) {
1057 drm_dbg_atomic(crtc->dev,
1058 "[CRTC:%d:%s] atomic driver check failed\n",
1059 crtc->base.id, crtc->name);
1060 return ret;
1061 }
1062 }
1063
1064 return ret;
1065 }
1066 EXPORT_SYMBOL(drm_atomic_helper_check_planes);
1067
1068 /**
1069 * drm_atomic_helper_check - validate state object
1070 * @dev: DRM device
1071 * @state: the driver state object
1072 *
1073 * Check the state object to see if the requested state is physically possible.
1074 * Only CRTCs and planes have check callbacks, so for any additional (global)
1075 * checking that a driver needs it can simply wrap that around this function.
1076 * Drivers without such needs can directly use this as their
1077 * &drm_mode_config_funcs.atomic_check callback.
1078 *
1079 * This just wraps the two parts of the state checking for planes and modeset
1080 * state in the default order: First it calls drm_atomic_helper_check_modeset()
1081 * and then drm_atomic_helper_check_planes(). The assumption is that the
1082 * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
1083 * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
1084 * watermarks.
1085 *
1086 * Note that zpos normalization will add all enable planes to the state which
1087 * might not desired for some drivers.
1088 * For example enable/disable of a cursor plane which have fixed zpos value
1089 * would trigger all other enabled planes to be forced to the state change.
1090 *
1091 * IMPORTANT:
1092 *
1093 * As this function calls drm_atomic_helper_check_modeset() internally, its
1094 * restrictions also apply:
1095 * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
1096 * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
1097 * without a full modeset) _must_ call drm_atomic_helper_check_modeset()
1098 * function again after that change.
1099 *
1100 * RETURNS:
1101 * Zero for success or -errno
1102 */
drm_atomic_helper_check(struct drm_device * dev,struct drm_atomic_state * state)1103 int drm_atomic_helper_check(struct drm_device *dev,
1104 struct drm_atomic_state *state)
1105 {
1106 int ret;
1107
1108 ret = drm_atomic_helper_check_modeset(dev, state);
1109 if (ret)
1110 return ret;
1111
1112 if (dev->mode_config.normalize_zpos) {
1113 ret = drm_atomic_normalize_zpos(dev, state);
1114 if (ret)
1115 return ret;
1116 }
1117
1118 ret = drm_atomic_helper_check_planes(dev, state);
1119 if (ret)
1120 return ret;
1121
1122 if (state->legacy_cursor_update)
1123 state->async_update = !drm_atomic_helper_async_check(dev, state);
1124
1125 drm_self_refresh_helper_alter_state(state);
1126
1127 return ret;
1128 }
1129 EXPORT_SYMBOL(drm_atomic_helper_check);
1130
1131 static bool
crtc_needs_disable(struct drm_crtc_state * old_state,struct drm_crtc_state * new_state)1132 crtc_needs_disable(struct drm_crtc_state *old_state,
1133 struct drm_crtc_state *new_state)
1134 {
1135 /*
1136 * No new_state means the CRTC is off, so the only criteria is whether
1137 * it's currently active or in self refresh mode.
1138 */
1139 if (!new_state)
1140 return drm_atomic_crtc_effectively_active(old_state);
1141
1142 /*
1143 * We need to disable bridge(s) and CRTC if we're transitioning out of
1144 * self-refresh and changing CRTCs at the same time, because the
1145 * bridge tracks self-refresh status via CRTC state.
1146 */
1147 if (old_state->self_refresh_active &&
1148 old_state->crtc != new_state->crtc)
1149 return true;
1150
1151 /*
1152 * We also need to run through the crtc_funcs->disable() function if
1153 * the CRTC is currently on, if it's transitioning to self refresh
1154 * mode, or if it's in self refresh mode and needs to be fully
1155 * disabled.
1156 */
1157 return old_state->active ||
1158 (old_state->self_refresh_active && !new_state->active) ||
1159 new_state->self_refresh_active;
1160 }
1161
1162 static void
disable_outputs(struct drm_device * dev,struct drm_atomic_state * state)1163 disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
1164 {
1165 struct drm_connector *connector;
1166 struct drm_connector_state *old_conn_state, *new_conn_state;
1167 struct drm_crtc *crtc;
1168 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1169 int i;
1170
1171 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
1172 const struct drm_encoder_helper_funcs *funcs;
1173 struct drm_encoder *encoder;
1174 struct drm_bridge *bridge;
1175
1176 /*
1177 * Shut down everything that's in the changeset and currently
1178 * still on. So need to check the old, saved state.
1179 */
1180 if (!old_conn_state->crtc)
1181 continue;
1182
1183 old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
1184
1185 if (new_conn_state->crtc)
1186 new_crtc_state = drm_atomic_get_new_crtc_state(
1187 state,
1188 new_conn_state->crtc);
1189 else
1190 new_crtc_state = NULL;
1191
1192 if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
1193 !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
1194 continue;
1195
1196 encoder = old_conn_state->best_encoder;
1197
1198 /* We shouldn't get this far if we didn't previously have
1199 * an encoder.. but WARN_ON() rather than explode.
1200 */
1201 if (WARN_ON(!encoder))
1202 continue;
1203
1204 funcs = encoder->helper_private;
1205
1206 drm_dbg_atomic(dev, "disabling [ENCODER:%d:%s]\n",
1207 encoder->base.id, encoder->name);
1208
1209 /*
1210 * Each encoder has at most one connector (since we always steal
1211 * it away), so we won't call disable hooks twice.
1212 */
1213 bridge = drm_bridge_chain_get_first_bridge(encoder);
1214 drm_atomic_bridge_chain_disable(bridge, state);
1215
1216 /* Right function depends upon target state. */
1217 if (funcs) {
1218 if (funcs->atomic_disable)
1219 funcs->atomic_disable(encoder, state);
1220 else if (new_conn_state->crtc && funcs->prepare)
1221 funcs->prepare(encoder);
1222 else if (funcs->disable)
1223 funcs->disable(encoder);
1224 else if (funcs->dpms)
1225 funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
1226 }
1227
1228 drm_atomic_bridge_chain_post_disable(bridge, state);
1229 }
1230
1231 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1232 const struct drm_crtc_helper_funcs *funcs;
1233 int ret;
1234
1235 /* Shut down everything that needs a full modeset. */
1236 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1237 continue;
1238
1239 if (!crtc_needs_disable(old_crtc_state, new_crtc_state))
1240 continue;
1241
1242 funcs = crtc->helper_private;
1243
1244 drm_dbg_atomic(dev, "disabling [CRTC:%d:%s]\n",
1245 crtc->base.id, crtc->name);
1246
1247
1248 /* Right function depends upon target state. */
1249 if (new_crtc_state->enable && funcs->prepare)
1250 funcs->prepare(crtc);
1251 else if (funcs->atomic_disable)
1252 funcs->atomic_disable(crtc, state);
1253 else if (funcs->disable)
1254 funcs->disable(crtc);
1255 else if (funcs->dpms)
1256 funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1257
1258 if (!drm_dev_has_vblank(dev))
1259 continue;
1260
1261 ret = drm_crtc_vblank_get(crtc);
1262 /*
1263 * Self-refresh is not a true "disable"; ensure vblank remains
1264 * enabled.
1265 */
1266 if (new_crtc_state->self_refresh_active)
1267 WARN_ONCE(ret != 0,
1268 "driver disabled vblank in self-refresh\n");
1269 else
1270 WARN_ONCE(ret != -EINVAL,
1271 "driver forgot to call drm_crtc_vblank_off()\n");
1272 if (ret == 0)
1273 drm_crtc_vblank_put(crtc);
1274 }
1275 }
1276
1277 /**
1278 * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
1279 * @dev: DRM device
1280 * @state: atomic state object being committed
1281 *
1282 * This function updates all the various legacy modeset state pointers in
1283 * connectors, encoders and CRTCs.
1284 *
1285 * Drivers can use this for building their own atomic commit if they don't have
1286 * a pure helper-based modeset implementation.
1287 *
1288 * Since these updates are not synchronized with lockings, only code paths
1289 * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
1290 * legacy state filled out by this helper. Defacto this means this helper and
1291 * the legacy state pointers are only really useful for transitioning an
1292 * existing driver to the atomic world.
1293 */
1294 void
drm_atomic_helper_update_legacy_modeset_state(struct drm_device * dev,struct drm_atomic_state * state)1295 drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
1296 struct drm_atomic_state *state)
1297 {
1298 struct drm_connector *connector;
1299 struct drm_connector_state *old_conn_state, *new_conn_state;
1300 struct drm_crtc *crtc;
1301 struct drm_crtc_state *new_crtc_state;
1302 int i;
1303
1304 /* clear out existing links and update dpms */
1305 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
1306 if (connector->encoder) {
1307 WARN_ON(!connector->encoder->crtc);
1308
1309 connector->encoder->crtc = NULL;
1310 connector->encoder = NULL;
1311 }
1312
1313 crtc = new_conn_state->crtc;
1314 if ((!crtc && old_conn_state->crtc) ||
1315 (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
1316 int mode = DRM_MODE_DPMS_OFF;
1317
1318 if (crtc && crtc->state->active)
1319 mode = DRM_MODE_DPMS_ON;
1320
1321 connector->dpms = mode;
1322 }
1323 }
1324
1325 /* set new links */
1326 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1327 if (!new_conn_state->crtc)
1328 continue;
1329
1330 if (WARN_ON(!new_conn_state->best_encoder))
1331 continue;
1332
1333 connector->encoder = new_conn_state->best_encoder;
1334 connector->encoder->crtc = new_conn_state->crtc;
1335 }
1336
1337 /* set legacy state in the crtc structure */
1338 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1339 struct drm_plane *primary = crtc->primary;
1340 struct drm_plane_state *new_plane_state;
1341
1342 crtc->mode = new_crtc_state->mode;
1343 crtc->enabled = new_crtc_state->enable;
1344
1345 new_plane_state =
1346 drm_atomic_get_new_plane_state(state, primary);
1347
1348 if (new_plane_state && new_plane_state->crtc == crtc) {
1349 crtc->x = new_plane_state->src_x >> 16;
1350 crtc->y = new_plane_state->src_y >> 16;
1351 }
1352 }
1353 }
1354 EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
1355
1356 /**
1357 * drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants
1358 * @state: atomic state object
1359 *
1360 * Updates the timestamping constants used for precise vblank timestamps
1361 * by calling drm_calc_timestamping_constants() for all enabled crtcs in @state.
1362 */
drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state * state)1363 void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state)
1364 {
1365 struct drm_crtc_state *new_crtc_state;
1366 struct drm_crtc *crtc;
1367 int i;
1368
1369 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1370 if (new_crtc_state->enable)
1371 drm_calc_timestamping_constants(crtc,
1372 &new_crtc_state->adjusted_mode);
1373 }
1374 }
1375 EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
1376
1377 static void
crtc_set_mode(struct drm_device * dev,struct drm_atomic_state * state)1378 crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
1379 {
1380 struct drm_crtc *crtc;
1381 struct drm_crtc_state *new_crtc_state;
1382 struct drm_connector *connector;
1383 struct drm_connector_state *new_conn_state;
1384 int i;
1385
1386 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1387 const struct drm_crtc_helper_funcs *funcs;
1388
1389 if (!new_crtc_state->mode_changed)
1390 continue;
1391
1392 funcs = crtc->helper_private;
1393
1394 if (new_crtc_state->enable && funcs->mode_set_nofb) {
1395 drm_dbg_atomic(dev, "modeset on [CRTC:%d:%s]\n",
1396 crtc->base.id, crtc->name);
1397
1398 funcs->mode_set_nofb(crtc);
1399 }
1400 }
1401
1402 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1403 const struct drm_encoder_helper_funcs *funcs;
1404 struct drm_encoder *encoder;
1405 struct drm_display_mode *mode, *adjusted_mode;
1406 struct drm_bridge *bridge;
1407
1408 if (!new_conn_state->best_encoder)
1409 continue;
1410
1411 encoder = new_conn_state->best_encoder;
1412 funcs = encoder->helper_private;
1413 new_crtc_state = new_conn_state->crtc->state;
1414 mode = &new_crtc_state->mode;
1415 adjusted_mode = &new_crtc_state->adjusted_mode;
1416
1417 if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
1418 continue;
1419
1420 drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n",
1421 encoder->base.id, encoder->name);
1422
1423 /*
1424 * Each encoder has at most one connector (since we always steal
1425 * it away), so we won't call mode_set hooks twice.
1426 */
1427 if (funcs && funcs->atomic_mode_set) {
1428 funcs->atomic_mode_set(encoder, new_crtc_state,
1429 new_conn_state);
1430 } else if (funcs && funcs->mode_set) {
1431 funcs->mode_set(encoder, mode, adjusted_mode);
1432 }
1433
1434 bridge = drm_bridge_chain_get_first_bridge(encoder);
1435 drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
1436 }
1437 }
1438
1439 /**
1440 * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
1441 * @dev: DRM device
1442 * @state: atomic state object being committed
1443 *
1444 * This function shuts down all the outputs that need to be shut down and
1445 * prepares them (if required) with the new mode.
1446 *
1447 * For compatibility with legacy CRTC helpers this should be called before
1448 * drm_atomic_helper_commit_planes(), which is what the default commit function
1449 * does. But drivers with different needs can group the modeset commits together
1450 * and do the plane commits at the end. This is useful for drivers doing runtime
1451 * PM since planes updates then only happen when the CRTC is actually enabled.
1452 */
drm_atomic_helper_commit_modeset_disables(struct drm_device * dev,struct drm_atomic_state * state)1453 void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
1454 struct drm_atomic_state *state)
1455 {
1456 disable_outputs(dev, state);
1457
1458 drm_atomic_helper_update_legacy_modeset_state(dev, state);
1459 drm_atomic_helper_calc_timestamping_constants(state);
1460
1461 crtc_set_mode(dev, state);
1462 }
1463 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
1464
drm_atomic_helper_commit_writebacks(struct drm_device * dev,struct drm_atomic_state * state)1465 static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
1466 struct drm_atomic_state *state)
1467 {
1468 struct drm_connector *connector;
1469 struct drm_connector_state *new_conn_state;
1470 int i;
1471
1472 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1473 const struct drm_connector_helper_funcs *funcs;
1474
1475 funcs = connector->helper_private;
1476 if (!funcs->atomic_commit)
1477 continue;
1478
1479 if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
1480 WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
1481 funcs->atomic_commit(connector, state);
1482 }
1483 }
1484 }
1485
1486 /**
1487 * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
1488 * @dev: DRM device
1489 * @state: atomic state object being committed
1490 *
1491 * This function enables all the outputs with the new configuration which had to
1492 * be turned off for the update.
1493 *
1494 * For compatibility with legacy CRTC helpers this should be called after
1495 * drm_atomic_helper_commit_planes(), which is what the default commit function
1496 * does. But drivers with different needs can group the modeset commits together
1497 * and do the plane commits at the end. This is useful for drivers doing runtime
1498 * PM since planes updates then only happen when the CRTC is actually enabled.
1499 */
drm_atomic_helper_commit_modeset_enables(struct drm_device * dev,struct drm_atomic_state * state)1500 void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
1501 struct drm_atomic_state *state)
1502 {
1503 struct drm_crtc *crtc;
1504 struct drm_crtc_state *old_crtc_state;
1505 struct drm_crtc_state *new_crtc_state;
1506 struct drm_connector *connector;
1507 struct drm_connector_state *new_conn_state;
1508 int i;
1509
1510 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1511 const struct drm_crtc_helper_funcs *funcs;
1512
1513 /* Need to filter out CRTCs where only planes change. */
1514 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1515 continue;
1516
1517 if (!new_crtc_state->active)
1518 continue;
1519
1520 funcs = crtc->helper_private;
1521
1522 if (new_crtc_state->enable) {
1523 drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n",
1524 crtc->base.id, crtc->name);
1525 if (funcs->atomic_enable)
1526 funcs->atomic_enable(crtc, state);
1527 else if (funcs->commit)
1528 funcs->commit(crtc);
1529 }
1530 }
1531
1532 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1533 const struct drm_encoder_helper_funcs *funcs;
1534 struct drm_encoder *encoder;
1535 struct drm_bridge *bridge;
1536
1537 if (!new_conn_state->best_encoder)
1538 continue;
1539
1540 if (!new_conn_state->crtc->state->active ||
1541 !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1542 continue;
1543
1544 encoder = new_conn_state->best_encoder;
1545 funcs = encoder->helper_private;
1546
1547 drm_dbg_atomic(dev, "enabling [ENCODER:%d:%s]\n",
1548 encoder->base.id, encoder->name);
1549
1550 /*
1551 * Each encoder has at most one connector (since we always steal
1552 * it away), so we won't call enable hooks twice.
1553 */
1554 bridge = drm_bridge_chain_get_first_bridge(encoder);
1555 drm_atomic_bridge_chain_pre_enable(bridge, state);
1556
1557 if (funcs) {
1558 if (funcs->atomic_enable)
1559 funcs->atomic_enable(encoder, state);
1560 else if (funcs->enable)
1561 funcs->enable(encoder);
1562 else if (funcs->commit)
1563 funcs->commit(encoder);
1564 }
1565
1566 drm_atomic_bridge_chain_enable(bridge, state);
1567 }
1568
1569 drm_atomic_helper_commit_writebacks(dev, state);
1570 }
1571 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
1572
1573 /*
1574 * For atomic updates which touch just a single CRTC, calculate the time of the
1575 * next vblank, and inform all the fences of the deadline.
1576 */
set_fence_deadline(struct drm_device * dev,struct drm_atomic_state * state)1577 static void set_fence_deadline(struct drm_device *dev,
1578 struct drm_atomic_state *state)
1579 {
1580 struct drm_crtc *crtc;
1581 struct drm_crtc_state *new_crtc_state;
1582 struct drm_plane *plane;
1583 struct drm_plane_state *new_plane_state;
1584 ktime_t vbltime = 0;
1585 int i;
1586
1587 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
1588 ktime_t v;
1589
1590 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
1591 continue;
1592
1593 if (!new_crtc_state->active)
1594 continue;
1595
1596 if (drm_crtc_next_vblank_start(crtc, &v))
1597 continue;
1598
1599 if (!vbltime || ktime_before(v, vbltime))
1600 vbltime = v;
1601 }
1602
1603 /* If no CRTCs updated, then nothing to do: */
1604 if (!vbltime)
1605 return;
1606
1607 for_each_new_plane_in_state (state, plane, new_plane_state, i) {
1608 if (!new_plane_state->fence)
1609 continue;
1610 dma_fence_set_deadline(new_plane_state->fence, vbltime);
1611 }
1612 }
1613
1614 /**
1615 * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
1616 * @dev: DRM device
1617 * @state: atomic state object with old state structures
1618 * @pre_swap: If true, do an interruptible wait, and @state is the new state.
1619 * Otherwise @state is the old state.
1620 *
1621 * For implicit sync, driver should fish the exclusive fence out from the
1622 * incoming fb's and stash it in the drm_plane_state. This is called after
1623 * drm_atomic_helper_swap_state() so it uses the current plane state (and
1624 * just uses the atomic state to find the changed planes)
1625 *
1626 * Note that @pre_swap is needed since the point where we block for fences moves
1627 * around depending upon whether an atomic commit is blocking or
1628 * non-blocking. For non-blocking commit all waiting needs to happen after
1629 * drm_atomic_helper_swap_state() is called, but for blocking commits we want
1630 * to wait **before** we do anything that can't be easily rolled back. That is
1631 * before we call drm_atomic_helper_swap_state().
1632 *
1633 * Returns zero if success or < 0 if dma_fence_wait() fails.
1634 */
drm_atomic_helper_wait_for_fences(struct drm_device * dev,struct drm_atomic_state * state,bool pre_swap)1635 int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
1636 struct drm_atomic_state *state,
1637 bool pre_swap)
1638 {
1639 struct drm_plane *plane;
1640 struct drm_plane_state *new_plane_state;
1641 int i, ret;
1642
1643 set_fence_deadline(dev, state);
1644
1645 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1646 if (!new_plane_state->fence)
1647 continue;
1648
1649 WARN_ON(!new_plane_state->fb);
1650
1651 /*
1652 * If waiting for fences pre-swap (ie: nonblock), userspace can
1653 * still interrupt the operation. Instead of blocking until the
1654 * timer expires, make the wait interruptible.
1655 */
1656 ret = dma_fence_wait(new_plane_state->fence, pre_swap);
1657 if (ret)
1658 return ret;
1659
1660 dma_fence_put(new_plane_state->fence);
1661 new_plane_state->fence = NULL;
1662 }
1663
1664 return 0;
1665 }
1666 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
1667
1668 /**
1669 * drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
1670 * @dev: DRM device
1671 * @state: atomic state object being committed
1672 *
1673 * Helper to, after atomic commit, wait for vblanks on all affected
1674 * CRTCs (ie. before cleaning up old framebuffers using
1675 * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
1676 * framebuffers have actually changed to optimize for the legacy cursor and
1677 * plane update use-case.
1678 *
1679 * Drivers using the nonblocking commit tracking support initialized by calling
1680 * drm_atomic_helper_setup_commit() should look at
1681 * drm_atomic_helper_wait_for_flip_done() as an alternative.
1682 */
1683 void
drm_atomic_helper_wait_for_vblanks(struct drm_device * dev,struct drm_atomic_state * state)1684 drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
1685 struct drm_atomic_state *state)
1686 {
1687 struct drm_crtc *crtc;
1688 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1689 int i, ret;
1690 unsigned int crtc_mask = 0;
1691
1692 /*
1693 * Legacy cursor ioctls are completely unsynced, and userspace
1694 * relies on that (by doing tons of cursor updates).
1695 */
1696 if (state->legacy_cursor_update)
1697 return;
1698
1699 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1700 if (!new_crtc_state->active)
1701 continue;
1702
1703 ret = drm_crtc_vblank_get(crtc);
1704 if (ret != 0)
1705 continue;
1706
1707 crtc_mask |= drm_crtc_mask(crtc);
1708 state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
1709 }
1710
1711 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
1712 if (!(crtc_mask & drm_crtc_mask(crtc)))
1713 continue;
1714
1715 ret = wait_event_timeout(dev->vblank[i].queue,
1716 state->crtcs[i].last_vblank_count !=
1717 drm_crtc_vblank_count(crtc),
1718 msecs_to_jiffies(100));
1719
1720 WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
1721 crtc->base.id, crtc->name);
1722
1723 drm_crtc_vblank_put(crtc);
1724 }
1725 }
1726 EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1727
1728 /**
1729 * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
1730 * @dev: DRM device
1731 * @state: atomic state object being committed
1732 *
1733 * Helper to, after atomic commit, wait for page flips on all affected
1734 * crtcs (ie. before cleaning up old framebuffers using
1735 * drm_atomic_helper_cleanup_planes()). Compared to
1736 * drm_atomic_helper_wait_for_vblanks() this waits for the completion on all
1737 * CRTCs, assuming that cursors-only updates are signalling their completion
1738 * immediately (or using a different path).
1739 *
1740 * This requires that drivers use the nonblocking commit tracking support
1741 * initialized using drm_atomic_helper_setup_commit().
1742 */
drm_atomic_helper_wait_for_flip_done(struct drm_device * dev,struct drm_atomic_state * state)1743 void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
1744 struct drm_atomic_state *state)
1745 {
1746 struct drm_crtc *crtc;
1747 int i;
1748
1749 for (i = 0; i < dev->mode_config.num_crtc; i++) {
1750 struct drm_crtc_commit *commit = state->crtcs[i].commit;
1751 int ret;
1752
1753 crtc = state->crtcs[i].ptr;
1754
1755 if (!crtc || !commit)
1756 continue;
1757
1758 ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
1759 if (ret == 0)
1760 drm_err(dev, "[CRTC:%d:%s] flip_done timed out\n",
1761 crtc->base.id, crtc->name);
1762 }
1763
1764 if (state->fake_commit)
1765 complete_all(&state->fake_commit->flip_done);
1766 }
1767 EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
1768
1769 /**
1770 * drm_atomic_helper_commit_tail - commit atomic update to hardware
1771 * @state: atomic state object being committed
1772 *
1773 * This is the default implementation for the
1774 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1775 * that do not support runtime_pm or do not need the CRTC to be
1776 * enabled to perform a commit. Otherwise, see
1777 * drm_atomic_helper_commit_tail_rpm().
1778 *
1779 * Note that the default ordering of how the various stages are called is to
1780 * match the legacy modeset helper library closest.
1781 */
drm_atomic_helper_commit_tail(struct drm_atomic_state * state)1782 void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
1783 {
1784 struct drm_device *dev = state->dev;
1785
1786 drm_atomic_helper_commit_modeset_disables(dev, state);
1787
1788 drm_atomic_helper_commit_planes(dev, state, 0);
1789
1790 drm_atomic_helper_commit_modeset_enables(dev, state);
1791
1792 drm_atomic_helper_fake_vblank(state);
1793
1794 drm_atomic_helper_commit_hw_done(state);
1795
1796 drm_atomic_helper_wait_for_vblanks(dev, state);
1797
1798 drm_atomic_helper_cleanup_planes(dev, state);
1799 }
1800 EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
1801
1802 /**
1803 * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
1804 * @state: new modeset state to be committed
1805 *
1806 * This is an alternative implementation for the
1807 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1808 * that support runtime_pm or need the CRTC to be enabled to perform a
1809 * commit. Otherwise, one should use the default implementation
1810 * drm_atomic_helper_commit_tail().
1811 */
drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state * state)1812 void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *state)
1813 {
1814 struct drm_device *dev = state->dev;
1815
1816 drm_atomic_helper_commit_modeset_disables(dev, state);
1817
1818 drm_atomic_helper_commit_modeset_enables(dev, state);
1819
1820 drm_atomic_helper_commit_planes(dev, state,
1821 DRM_PLANE_COMMIT_ACTIVE_ONLY);
1822
1823 drm_atomic_helper_fake_vblank(state);
1824
1825 drm_atomic_helper_commit_hw_done(state);
1826
1827 drm_atomic_helper_wait_for_vblanks(dev, state);
1828
1829 drm_atomic_helper_cleanup_planes(dev, state);
1830 }
1831 EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
1832
commit_tail(struct drm_atomic_state * state)1833 static void commit_tail(struct drm_atomic_state *state)
1834 {
1835 struct drm_device *dev = state->dev;
1836 const struct drm_mode_config_helper_funcs *funcs;
1837 struct drm_crtc_state *new_crtc_state;
1838 struct drm_crtc *crtc;
1839 ktime_t start;
1840 s64 commit_time_ms;
1841 unsigned int i, new_self_refresh_mask = 0;
1842
1843 funcs = dev->mode_config.helper_private;
1844
1845 /*
1846 * We're measuring the _entire_ commit, so the time will vary depending
1847 * on how many fences and objects are involved. For the purposes of self
1848 * refresh, this is desirable since it'll give us an idea of how
1849 * congested things are. This will inform our decision on how often we
1850 * should enter self refresh after idle.
1851 *
1852 * These times will be averaged out in the self refresh helpers to avoid
1853 * overreacting over one outlier frame
1854 */
1855 start = ktime_get();
1856
1857 drm_atomic_helper_wait_for_fences(dev, state, false);
1858
1859 drm_atomic_helper_wait_for_dependencies(state);
1860
1861 /*
1862 * We cannot safely access new_crtc_state after
1863 * drm_atomic_helper_commit_hw_done() so figure out which crtc's have
1864 * self-refresh active beforehand:
1865 */
1866 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
1867 if (new_crtc_state->self_refresh_active)
1868 new_self_refresh_mask |= BIT(i);
1869
1870 if (funcs && funcs->atomic_commit_tail)
1871 funcs->atomic_commit_tail(state);
1872 else
1873 drm_atomic_helper_commit_tail(state);
1874
1875 commit_time_ms = ktime_ms_delta(ktime_get(), start);
1876 if (commit_time_ms > 0)
1877 drm_self_refresh_helper_update_avg_times(state,
1878 (unsigned long)commit_time_ms,
1879 new_self_refresh_mask);
1880
1881 drm_atomic_helper_commit_cleanup_done(state);
1882
1883 drm_atomic_state_put(state);
1884 }
1885
commit_work(struct work_struct * work)1886 static void commit_work(struct work_struct *work)
1887 {
1888 struct drm_atomic_state *state = container_of(work,
1889 struct drm_atomic_state,
1890 commit_work);
1891 commit_tail(state);
1892 }
1893
1894 /**
1895 * drm_atomic_helper_async_check - check if state can be committed asynchronously
1896 * @dev: DRM device
1897 * @state: the driver state object
1898 *
1899 * This helper will check if it is possible to commit the state asynchronously.
1900 * Async commits are not supposed to swap the states like normal sync commits
1901 * but just do in-place changes on the current state.
1902 *
1903 * It will return 0 if the commit can happen in an asynchronous fashion or error
1904 * if not. Note that error just mean it can't be committed asynchronously, if it
1905 * fails the commit should be treated like a normal synchronous commit.
1906 */
drm_atomic_helper_async_check(struct drm_device * dev,struct drm_atomic_state * state)1907 int drm_atomic_helper_async_check(struct drm_device *dev,
1908 struct drm_atomic_state *state)
1909 {
1910 struct drm_crtc *crtc;
1911 struct drm_crtc_state *crtc_state;
1912 struct drm_plane *plane = NULL;
1913 struct drm_plane_state *old_plane_state = NULL;
1914 struct drm_plane_state *new_plane_state = NULL;
1915 const struct drm_plane_helper_funcs *funcs;
1916 int i, ret, n_planes = 0;
1917
1918 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1919 if (drm_atomic_crtc_needs_modeset(crtc_state))
1920 return -EINVAL;
1921 }
1922
1923 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
1924 n_planes++;
1925
1926 /* FIXME: we support only single plane updates for now */
1927 if (n_planes != 1) {
1928 drm_dbg_atomic(dev,
1929 "only single plane async updates are supported\n");
1930 return -EINVAL;
1931 }
1932
1933 if (!new_plane_state->crtc ||
1934 old_plane_state->crtc != new_plane_state->crtc) {
1935 drm_dbg_atomic(dev,
1936 "[PLANE:%d:%s] async update cannot change CRTC\n",
1937 plane->base.id, plane->name);
1938 return -EINVAL;
1939 }
1940
1941 funcs = plane->helper_private;
1942 if (!funcs->atomic_async_update) {
1943 drm_dbg_atomic(dev,
1944 "[PLANE:%d:%s] driver does not support async updates\n",
1945 plane->base.id, plane->name);
1946 return -EINVAL;
1947 }
1948
1949 if (new_plane_state->fence) {
1950 drm_dbg_atomic(dev,
1951 "[PLANE:%d:%s] missing fence for async update\n",
1952 plane->base.id, plane->name);
1953 return -EINVAL;
1954 }
1955
1956 /*
1957 * Don't do an async update if there is an outstanding commit modifying
1958 * the plane. This prevents our async update's changes from getting
1959 * overridden by a previous synchronous update's state.
1960 */
1961 if (old_plane_state->commit &&
1962 !try_wait_for_completion(&old_plane_state->commit->hw_done)) {
1963 drm_dbg_atomic(dev,
1964 "[PLANE:%d:%s] inflight previous commit preventing async commit\n",
1965 plane->base.id, plane->name);
1966 return -EBUSY;
1967 }
1968
1969 ret = funcs->atomic_async_check(plane, state, false);
1970 if (ret != 0)
1971 drm_dbg_atomic(dev,
1972 "[PLANE:%d:%s] driver async check failed\n",
1973 plane->base.id, plane->name);
1974 return ret;
1975 }
1976 EXPORT_SYMBOL(drm_atomic_helper_async_check);
1977
1978 /**
1979 * drm_atomic_helper_async_commit - commit state asynchronously
1980 * @dev: DRM device
1981 * @state: the driver state object
1982 *
1983 * This function commits a state asynchronously, i.e., not vblank
1984 * synchronized. It should be used on a state only when
1985 * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
1986 * the states like normal sync commits, but just do in-place changes on the
1987 * current state.
1988 *
1989 * TODO: Implement full swap instead of doing in-place changes.
1990 */
drm_atomic_helper_async_commit(struct drm_device * dev,struct drm_atomic_state * state)1991 void drm_atomic_helper_async_commit(struct drm_device *dev,
1992 struct drm_atomic_state *state)
1993 {
1994 struct drm_plane *plane;
1995 struct drm_plane_state *plane_state;
1996 const struct drm_plane_helper_funcs *funcs;
1997 int i;
1998
1999 for_each_new_plane_in_state(state, plane, plane_state, i) {
2000 struct drm_framebuffer *new_fb = plane_state->fb;
2001 struct drm_framebuffer *old_fb = plane->state->fb;
2002
2003 funcs = plane->helper_private;
2004 funcs->atomic_async_update(plane, state);
2005
2006 /*
2007 * ->atomic_async_update() is supposed to update the
2008 * plane->state in-place, make sure at least common
2009 * properties have been properly updated.
2010 */
2011 WARN_ON_ONCE(plane->state->fb != new_fb);
2012 WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
2013 WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
2014 WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
2015 WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
2016
2017 /*
2018 * Make sure the FBs have been swapped so that cleanups in the
2019 * new_state performs a cleanup in the old FB.
2020 */
2021 WARN_ON_ONCE(plane_state->fb != old_fb);
2022 }
2023 }
2024 EXPORT_SYMBOL(drm_atomic_helper_async_commit);
2025
2026 /**
2027 * drm_atomic_helper_commit - commit validated state object
2028 * @dev: DRM device
2029 * @state: the driver state object
2030 * @nonblock: whether nonblocking behavior is requested.
2031 *
2032 * This function commits a with drm_atomic_helper_check() pre-validated state
2033 * object. This can still fail when e.g. the framebuffer reservation fails. This
2034 * function implements nonblocking commits, using
2035 * drm_atomic_helper_setup_commit() and related functions.
2036 *
2037 * Committing the actual hardware state is done through the
2038 * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or its default
2039 * implementation drm_atomic_helper_commit_tail().
2040 *
2041 * RETURNS:
2042 * Zero for success or -errno.
2043 */
drm_atomic_helper_commit(struct drm_device * dev,struct drm_atomic_state * state,bool nonblock)2044 int drm_atomic_helper_commit(struct drm_device *dev,
2045 struct drm_atomic_state *state,
2046 bool nonblock)
2047 {
2048 int ret;
2049
2050 if (state->async_update) {
2051 ret = drm_atomic_helper_prepare_planes(dev, state);
2052 if (ret)
2053 return ret;
2054
2055 drm_atomic_helper_async_commit(dev, state);
2056 drm_atomic_helper_unprepare_planes(dev, state);
2057
2058 return 0;
2059 }
2060
2061 ret = drm_atomic_helper_setup_commit(state, nonblock);
2062 if (ret)
2063 return ret;
2064
2065 INIT_WORK(&state->commit_work, commit_work);
2066
2067 ret = drm_atomic_helper_prepare_planes(dev, state);
2068 if (ret)
2069 return ret;
2070
2071 if (!nonblock) {
2072 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
2073 if (ret)
2074 goto err;
2075 }
2076
2077 /*
2078 * This is the point of no return - everything below never fails except
2079 * when the hw goes bonghits. Which means we can commit the new state on
2080 * the software side now.
2081 */
2082
2083 ret = drm_atomic_helper_swap_state(state, true);
2084 if (ret)
2085 goto err;
2086
2087 /*
2088 * Everything below can be run asynchronously without the need to grab
2089 * any modeset locks at all under one condition: It must be guaranteed
2090 * that the asynchronous work has either been cancelled (if the driver
2091 * supports it, which at least requires that the framebuffers get
2092 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
2093 * before the new state gets committed on the software side with
2094 * drm_atomic_helper_swap_state().
2095 *
2096 * This scheme allows new atomic state updates to be prepared and
2097 * checked in parallel to the asynchronous completion of the previous
2098 * update. Which is important since compositors need to figure out the
2099 * composition of the next frame right after having submitted the
2100 * current layout.
2101 *
2102 * NOTE: Commit work has multiple phases, first hardware commit, then
2103 * cleanup. We want them to overlap, hence need system_unbound_wq to
2104 * make sure work items don't artificially stall on each another.
2105 */
2106
2107 drm_atomic_state_get(state);
2108 if (nonblock)
2109 queue_work(system_unbound_wq, &state->commit_work);
2110 else
2111 commit_tail(state);
2112
2113 return 0;
2114
2115 err:
2116 drm_atomic_helper_unprepare_planes(dev, state);
2117 return ret;
2118 }
2119 EXPORT_SYMBOL(drm_atomic_helper_commit);
2120
2121 /**
2122 * DOC: implementing nonblocking commit
2123 *
2124 * Nonblocking atomic commits should use struct &drm_crtc_commit to sequence
2125 * different operations against each another. Locks, especially struct
2126 * &drm_modeset_lock, should not be held in worker threads or any other
2127 * asynchronous context used to commit the hardware state.
2128 *
2129 * drm_atomic_helper_commit() implements the recommended sequence for
2130 * nonblocking commits, using drm_atomic_helper_setup_commit() internally:
2131 *
2132 * 1. Run drm_atomic_helper_prepare_planes(). Since this can fail and we
2133 * need to propagate out of memory/VRAM errors to userspace, it must be called
2134 * synchronously.
2135 *
2136 * 2. Synchronize with any outstanding nonblocking commit worker threads which
2137 * might be affected by the new state update. This is handled by
2138 * drm_atomic_helper_setup_commit().
2139 *
2140 * Asynchronous workers need to have sufficient parallelism to be able to run
2141 * different atomic commits on different CRTCs in parallel. The simplest way to
2142 * achieve this is by running them on the &system_unbound_wq work queue. Note
2143 * that drivers are not required to split up atomic commits and run an
2144 * individual commit in parallel - userspace is supposed to do that if it cares.
2145 * But it might be beneficial to do that for modesets, since those necessarily
2146 * must be done as one global operation, and enabling or disabling a CRTC can
2147 * take a long time. But even that is not required.
2148 *
2149 * IMPORTANT: A &drm_atomic_state update for multiple CRTCs is sequenced
2150 * against all CRTCs therein. Therefore for atomic state updates which only flip
2151 * planes the driver must not get the struct &drm_crtc_state of unrelated CRTCs
2152 * in its atomic check code: This would prevent committing of atomic updates to
2153 * multiple CRTCs in parallel. In general, adding additional state structures
2154 * should be avoided as much as possible, because this reduces parallelism in
2155 * (nonblocking) commits, both due to locking and due to commit sequencing
2156 * requirements.
2157 *
2158 * 3. The software state is updated synchronously with
2159 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
2160 * locks means concurrent callers never see inconsistent state. Note that commit
2161 * workers do not hold any locks; their access is only coordinated through
2162 * ordering. If workers would access state only through the pointers in the
2163 * free-standing state objects (currently not the case for any driver) then even
2164 * multiple pending commits could be in-flight at the same time.
2165 *
2166 * 4. Schedule a work item to do all subsequent steps, using the split-out
2167 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
2168 * then cleaning up the framebuffers after the old framebuffer is no longer
2169 * being displayed. The scheduled work should synchronize against other workers
2170 * using the &drm_crtc_commit infrastructure as needed. See
2171 * drm_atomic_helper_setup_commit() for more details.
2172 */
2173
stall_checks(struct drm_crtc * crtc,bool nonblock)2174 static int stall_checks(struct drm_crtc *crtc, bool nonblock)
2175 {
2176 struct drm_crtc_commit *commit, *stall_commit = NULL;
2177 bool completed = true;
2178 int i;
2179 long ret = 0;
2180
2181 spin_lock(&crtc->commit_lock);
2182 i = 0;
2183 list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
2184 if (i == 0) {
2185 completed = try_wait_for_completion(&commit->flip_done);
2186 /*
2187 * Userspace is not allowed to get ahead of the previous
2188 * commit with nonblocking ones.
2189 */
2190 if (!completed && nonblock) {
2191 spin_unlock(&crtc->commit_lock);
2192 drm_dbg_atomic(crtc->dev,
2193 "[CRTC:%d:%s] busy with a previous commit\n",
2194 crtc->base.id, crtc->name);
2195
2196 return -EBUSY;
2197 }
2198 } else if (i == 1) {
2199 stall_commit = drm_crtc_commit_get(commit);
2200 break;
2201 }
2202
2203 i++;
2204 }
2205 spin_unlock(&crtc->commit_lock);
2206
2207 if (!stall_commit)
2208 return 0;
2209
2210 /* We don't want to let commits get ahead of cleanup work too much,
2211 * stalling on 2nd previous commit means triple-buffer won't ever stall.
2212 */
2213 ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
2214 10*HZ);
2215 if (ret == 0)
2216 drm_err(crtc->dev, "[CRTC:%d:%s] cleanup_done timed out\n",
2217 crtc->base.id, crtc->name);
2218
2219 drm_crtc_commit_put(stall_commit);
2220
2221 return ret < 0 ? ret : 0;
2222 }
2223
release_crtc_commit(struct completion * completion)2224 static void release_crtc_commit(struct completion *completion)
2225 {
2226 struct drm_crtc_commit *commit = container_of(completion,
2227 typeof(*commit),
2228 flip_done);
2229
2230 drm_crtc_commit_put(commit);
2231 }
2232
init_commit(struct drm_crtc_commit * commit,struct drm_crtc * crtc)2233 static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
2234 {
2235 init_completion(&commit->flip_done);
2236 init_completion(&commit->hw_done);
2237 init_completion(&commit->cleanup_done);
2238 INIT_LIST_HEAD(&commit->commit_entry);
2239 kref_init(&commit->ref);
2240 commit->crtc = crtc;
2241 }
2242
2243 static struct drm_crtc_commit *
crtc_or_fake_commit(struct drm_atomic_state * state,struct drm_crtc * crtc)2244 crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
2245 {
2246 if (crtc) {
2247 struct drm_crtc_state *new_crtc_state;
2248
2249 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
2250
2251 return new_crtc_state->commit;
2252 }
2253
2254 if (!state->fake_commit) {
2255 state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
2256 if (!state->fake_commit)
2257 return NULL;
2258
2259 init_commit(state->fake_commit, NULL);
2260 }
2261
2262 return state->fake_commit;
2263 }
2264
2265 /**
2266 * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
2267 * @state: new modeset state to be committed
2268 * @nonblock: whether nonblocking behavior is requested.
2269 *
2270 * This function prepares @state to be used by the atomic helper's support for
2271 * nonblocking commits. Drivers using the nonblocking commit infrastructure
2272 * should always call this function from their
2273 * &drm_mode_config_funcs.atomic_commit hook.
2274 *
2275 * Drivers that need to extend the commit setup to private objects can use the
2276 * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
2277 *
2278 * To be able to use this support drivers need to use a few more helper
2279 * functions. drm_atomic_helper_wait_for_dependencies() must be called before
2280 * actually committing the hardware state, and for nonblocking commits this call
2281 * must be placed in the async worker. See also drm_atomic_helper_swap_state()
2282 * and its stall parameter, for when a driver's commit hooks look at the
2283 * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
2284 *
2285 * Completion of the hardware commit step must be signalled using
2286 * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
2287 * to read or change any permanent software or hardware modeset state. The only
2288 * exception is state protected by other means than &drm_modeset_lock locks.
2289 * Only the free standing @state with pointers to the old state structures can
2290 * be inspected, e.g. to clean up old buffers using
2291 * drm_atomic_helper_cleanup_planes().
2292 *
2293 * At the very end, before cleaning up @state drivers must call
2294 * drm_atomic_helper_commit_cleanup_done().
2295 *
2296 * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
2297 * complete and easy-to-use default implementation of the atomic_commit() hook.
2298 *
2299 * The tracking of asynchronously executed and still pending commits is done
2300 * using the core structure &drm_crtc_commit.
2301 *
2302 * By default there's no need to clean up resources allocated by this function
2303 * explicitly: drm_atomic_state_default_clear() will take care of that
2304 * automatically.
2305 *
2306 * Returns:
2307 * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
2308 * -ENOMEM on allocation failures and -EINTR when a signal is pending.
2309 */
drm_atomic_helper_setup_commit(struct drm_atomic_state * state,bool nonblock)2310 int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
2311 bool nonblock)
2312 {
2313 struct drm_crtc *crtc;
2314 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2315 struct drm_connector *conn;
2316 struct drm_connector_state *old_conn_state, *new_conn_state;
2317 struct drm_plane *plane;
2318 struct drm_plane_state *old_plane_state, *new_plane_state;
2319 struct drm_crtc_commit *commit;
2320 const struct drm_mode_config_helper_funcs *funcs;
2321 int i, ret;
2322
2323 funcs = state->dev->mode_config.helper_private;
2324
2325 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2326 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
2327 if (!commit)
2328 return -ENOMEM;
2329
2330 init_commit(commit, crtc);
2331
2332 new_crtc_state->commit = commit;
2333
2334 ret = stall_checks(crtc, nonblock);
2335 if (ret)
2336 return ret;
2337
2338 /*
2339 * Drivers only send out events when at least either current or
2340 * new CRTC state is active. Complete right away if everything
2341 * stays off.
2342 */
2343 if (!old_crtc_state->active && !new_crtc_state->active) {
2344 complete_all(&commit->flip_done);
2345 continue;
2346 }
2347
2348 /* Legacy cursor updates are fully unsynced. */
2349 if (state->legacy_cursor_update) {
2350 complete_all(&commit->flip_done);
2351 continue;
2352 }
2353
2354 if (!new_crtc_state->event) {
2355 commit->event = kzalloc(sizeof(*commit->event),
2356 GFP_KERNEL);
2357 if (!commit->event)
2358 return -ENOMEM;
2359
2360 new_crtc_state->event = commit->event;
2361 }
2362
2363 new_crtc_state->event->base.completion = &commit->flip_done;
2364 new_crtc_state->event->base.completion_release = release_crtc_commit;
2365 drm_crtc_commit_get(commit);
2366
2367 commit->abort_completion = true;
2368
2369 state->crtcs[i].commit = commit;
2370 drm_crtc_commit_get(commit);
2371 }
2372
2373 for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
2374 /*
2375 * Userspace is not allowed to get ahead of the previous
2376 * commit with nonblocking ones.
2377 */
2378 if (nonblock && old_conn_state->commit &&
2379 !try_wait_for_completion(&old_conn_state->commit->flip_done)) {
2380 drm_dbg_atomic(conn->dev,
2381 "[CONNECTOR:%d:%s] busy with a previous commit\n",
2382 conn->base.id, conn->name);
2383
2384 return -EBUSY;
2385 }
2386
2387 /* Always track connectors explicitly for e.g. link retraining. */
2388 commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
2389 if (!commit)
2390 return -ENOMEM;
2391
2392 new_conn_state->commit = drm_crtc_commit_get(commit);
2393 }
2394
2395 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2396 /*
2397 * Userspace is not allowed to get ahead of the previous
2398 * commit with nonblocking ones.
2399 */
2400 if (nonblock && old_plane_state->commit &&
2401 !try_wait_for_completion(&old_plane_state->commit->flip_done)) {
2402 drm_dbg_atomic(plane->dev,
2403 "[PLANE:%d:%s] busy with a previous commit\n",
2404 plane->base.id, plane->name);
2405
2406 return -EBUSY;
2407 }
2408
2409 /* Always track planes explicitly for async pageflip support. */
2410 commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
2411 if (!commit)
2412 return -ENOMEM;
2413
2414 new_plane_state->commit = drm_crtc_commit_get(commit);
2415 }
2416
2417 if (funcs && funcs->atomic_commit_setup)
2418 return funcs->atomic_commit_setup(state);
2419
2420 return 0;
2421 }
2422 EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
2423
2424 /**
2425 * drm_atomic_helper_wait_for_dependencies - wait for required preceding commits
2426 * @state: atomic state object being committed
2427 *
2428 * This function waits for all preceding commits that touch the same CRTC as
2429 * @state to both be committed to the hardware (as signalled by
2430 * drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
2431 * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
2432 *
2433 * This is part of the atomic helper support for nonblocking commits, see
2434 * drm_atomic_helper_setup_commit() for an overview.
2435 */
drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state * state)2436 void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
2437 {
2438 struct drm_crtc *crtc;
2439 struct drm_crtc_state *old_crtc_state;
2440 struct drm_plane *plane;
2441 struct drm_plane_state *old_plane_state;
2442 struct drm_connector *conn;
2443 struct drm_connector_state *old_conn_state;
2444 int i;
2445 long ret;
2446
2447 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2448 ret = drm_crtc_commit_wait(old_crtc_state->commit);
2449 if (ret)
2450 drm_err(crtc->dev,
2451 "[CRTC:%d:%s] commit wait timed out\n",
2452 crtc->base.id, crtc->name);
2453 }
2454
2455 for_each_old_connector_in_state(state, conn, old_conn_state, i) {
2456 ret = drm_crtc_commit_wait(old_conn_state->commit);
2457 if (ret)
2458 drm_err(conn->dev,
2459 "[CONNECTOR:%d:%s] commit wait timed out\n",
2460 conn->base.id, conn->name);
2461 }
2462
2463 for_each_old_plane_in_state(state, plane, old_plane_state, i) {
2464 ret = drm_crtc_commit_wait(old_plane_state->commit);
2465 if (ret)
2466 drm_err(plane->dev,
2467 "[PLANE:%d:%s] commit wait timed out\n",
2468 plane->base.id, plane->name);
2469 }
2470 }
2471 EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
2472
2473 /**
2474 * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
2475 * @state: atomic state object being committed
2476 *
2477 * This function walks all CRTCs and fakes VBLANK events on those with
2478 * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
2479 * The primary use of this function is writeback connectors working in oneshot
2480 * mode and faking VBLANK events. In this case they only fake the VBLANK event
2481 * when a job is queued, and any change to the pipeline that does not touch the
2482 * connector is leading to timeouts when calling
2483 * drm_atomic_helper_wait_for_vblanks() or
2484 * drm_atomic_helper_wait_for_flip_done(). In addition to writeback
2485 * connectors, this function can also fake VBLANK events for CRTCs without
2486 * VBLANK interrupt.
2487 *
2488 * This is part of the atomic helper support for nonblocking commits, see
2489 * drm_atomic_helper_setup_commit() for an overview.
2490 */
drm_atomic_helper_fake_vblank(struct drm_atomic_state * state)2491 void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state)
2492 {
2493 struct drm_crtc_state *new_crtc_state;
2494 struct drm_crtc *crtc;
2495 int i;
2496
2497 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
2498 unsigned long flags;
2499
2500 if (!new_crtc_state->no_vblank)
2501 continue;
2502
2503 spin_lock_irqsave(&state->dev->event_lock, flags);
2504 if (new_crtc_state->event) {
2505 drm_crtc_send_vblank_event(crtc,
2506 new_crtc_state->event);
2507 new_crtc_state->event = NULL;
2508 }
2509 spin_unlock_irqrestore(&state->dev->event_lock, flags);
2510 }
2511 }
2512 EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
2513
2514 /**
2515 * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
2516 * @state: atomic state object being committed
2517 *
2518 * This function is used to signal completion of the hardware commit step. After
2519 * this step the driver is not allowed to read or change any permanent software
2520 * or hardware modeset state. The only exception is state protected by other
2521 * means than &drm_modeset_lock locks.
2522 *
2523 * Drivers should try to postpone any expensive or delayed cleanup work after
2524 * this function is called.
2525 *
2526 * This is part of the atomic helper support for nonblocking commits, see
2527 * drm_atomic_helper_setup_commit() for an overview.
2528 */
drm_atomic_helper_commit_hw_done(struct drm_atomic_state * state)2529 void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
2530 {
2531 struct drm_crtc *crtc;
2532 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2533 struct drm_crtc_commit *commit;
2534 int i;
2535
2536 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2537 commit = new_crtc_state->commit;
2538 if (!commit)
2539 continue;
2540
2541 /*
2542 * copy new_crtc_state->commit to old_crtc_state->commit,
2543 * it's unsafe to touch new_crtc_state after hw_done,
2544 * but we still need to do so in cleanup_done().
2545 */
2546 if (old_crtc_state->commit)
2547 drm_crtc_commit_put(old_crtc_state->commit);
2548
2549 old_crtc_state->commit = drm_crtc_commit_get(commit);
2550
2551 /* backend must have consumed any event by now */
2552 WARN_ON(new_crtc_state->event);
2553 complete_all(&commit->hw_done);
2554 }
2555
2556 if (state->fake_commit) {
2557 complete_all(&state->fake_commit->hw_done);
2558 complete_all(&state->fake_commit->flip_done);
2559 }
2560 }
2561 EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
2562
2563 /**
2564 * drm_atomic_helper_commit_cleanup_done - signal completion of commit
2565 * @state: atomic state object being committed
2566 *
2567 * This signals completion of the atomic update @state, including any
2568 * cleanup work. If used, it must be called right before calling
2569 * drm_atomic_state_put().
2570 *
2571 * This is part of the atomic helper support for nonblocking commits, see
2572 * drm_atomic_helper_setup_commit() for an overview.
2573 */
drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state * state)2574 void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
2575 {
2576 struct drm_crtc *crtc;
2577 struct drm_crtc_state *old_crtc_state;
2578 struct drm_crtc_commit *commit;
2579 int i;
2580
2581 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2582 commit = old_crtc_state->commit;
2583 if (WARN_ON(!commit))
2584 continue;
2585
2586 complete_all(&commit->cleanup_done);
2587 WARN_ON(!try_wait_for_completion(&commit->hw_done));
2588
2589 spin_lock(&crtc->commit_lock);
2590 list_del(&commit->commit_entry);
2591 spin_unlock(&crtc->commit_lock);
2592 }
2593
2594 if (state->fake_commit) {
2595 complete_all(&state->fake_commit->cleanup_done);
2596 WARN_ON(!try_wait_for_completion(&state->fake_commit->hw_done));
2597 }
2598 }
2599 EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
2600
2601 /**
2602 * drm_atomic_helper_prepare_planes - prepare plane resources before commit
2603 * @dev: DRM device
2604 * @state: atomic state object with new state structures
2605 *
2606 * This function prepares plane state, specifically framebuffers, for the new
2607 * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
2608 * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
2609 * any already successfully prepared framebuffer.
2610 *
2611 * Returns:
2612 * 0 on success, negative error code on failure.
2613 */
drm_atomic_helper_prepare_planes(struct drm_device * dev,struct drm_atomic_state * state)2614 int drm_atomic_helper_prepare_planes(struct drm_device *dev,
2615 struct drm_atomic_state *state)
2616 {
2617 struct drm_connector *connector;
2618 struct drm_connector_state *new_conn_state;
2619 struct drm_plane *plane;
2620 struct drm_plane_state *new_plane_state;
2621 int ret, i, j;
2622
2623 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
2624 if (!new_conn_state->writeback_job)
2625 continue;
2626
2627 ret = drm_writeback_prepare_job(new_conn_state->writeback_job);
2628 if (ret < 0)
2629 return ret;
2630 }
2631
2632 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2633 const struct drm_plane_helper_funcs *funcs;
2634
2635 funcs = plane->helper_private;
2636
2637 if (funcs->prepare_fb) {
2638 ret = funcs->prepare_fb(plane, new_plane_state);
2639 if (ret)
2640 goto fail_prepare_fb;
2641 } else {
2642 WARN_ON_ONCE(funcs->cleanup_fb);
2643
2644 if (!drm_core_check_feature(dev, DRIVER_GEM))
2645 continue;
2646
2647 ret = drm_gem_plane_helper_prepare_fb(plane, new_plane_state);
2648 if (ret)
2649 goto fail_prepare_fb;
2650 }
2651 }
2652
2653 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2654 const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2655
2656 if (funcs->begin_fb_access) {
2657 ret = funcs->begin_fb_access(plane, new_plane_state);
2658 if (ret)
2659 goto fail_begin_fb_access;
2660 }
2661 }
2662
2663 return 0;
2664
2665 fail_begin_fb_access:
2666 for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2667 const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2668
2669 if (j >= i)
2670 continue;
2671
2672 if (funcs->end_fb_access)
2673 funcs->end_fb_access(plane, new_plane_state);
2674 }
2675 i = j; /* set i to upper limit to cleanup all planes */
2676 fail_prepare_fb:
2677 for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2678 const struct drm_plane_helper_funcs *funcs;
2679
2680 if (j >= i)
2681 continue;
2682
2683 funcs = plane->helper_private;
2684
2685 if (funcs->cleanup_fb)
2686 funcs->cleanup_fb(plane, new_plane_state);
2687 }
2688
2689 return ret;
2690 }
2691 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
2692
2693 /**
2694 * drm_atomic_helper_unprepare_planes - release plane resources on aborts
2695 * @dev: DRM device
2696 * @state: atomic state object with old state structures
2697 *
2698 * This function cleans up plane state, specifically framebuffers, from the
2699 * atomic state. It undoes the effects of drm_atomic_helper_prepare_planes()
2700 * when aborting an atomic commit. For cleaning up after a successful commit
2701 * use drm_atomic_helper_cleanup_planes().
2702 */
drm_atomic_helper_unprepare_planes(struct drm_device * dev,struct drm_atomic_state * state)2703 void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
2704 struct drm_atomic_state *state)
2705 {
2706 struct drm_plane *plane;
2707 struct drm_plane_state *new_plane_state;
2708 int i;
2709
2710 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2711 const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2712
2713 if (funcs->end_fb_access)
2714 funcs->end_fb_access(plane, new_plane_state);
2715 }
2716
2717 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2718 const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2719
2720 if (funcs->cleanup_fb)
2721 funcs->cleanup_fb(plane, new_plane_state);
2722 }
2723 }
2724 EXPORT_SYMBOL(drm_atomic_helper_unprepare_planes);
2725
plane_crtc_active(const struct drm_plane_state * state)2726 static bool plane_crtc_active(const struct drm_plane_state *state)
2727 {
2728 return state->crtc && state->crtc->state->active;
2729 }
2730
2731 /**
2732 * drm_atomic_helper_commit_planes - commit plane state
2733 * @dev: DRM device
2734 * @state: atomic state object being committed
2735 * @flags: flags for committing plane state
2736 *
2737 * This function commits the new plane state using the plane and atomic helper
2738 * functions for planes and CRTCs. It assumes that the atomic state has already
2739 * been pushed into the relevant object state pointers, since this step can no
2740 * longer fail.
2741 *
2742 * It still requires the global state object @state to know which planes and
2743 * crtcs need to be updated though.
2744 *
2745 * Note that this function does all plane updates across all CRTCs in one step.
2746 * If the hardware can't support this approach look at
2747 * drm_atomic_helper_commit_planes_on_crtc() instead.
2748 *
2749 * Plane parameters can be updated by applications while the associated CRTC is
2750 * disabled. The DRM/KMS core will store the parameters in the plane state,
2751 * which will be available to the driver when the CRTC is turned on. As a result
2752 * most drivers don't need to be immediately notified of plane updates for a
2753 * disabled CRTC.
2754 *
2755 * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
2756 * @flags in order not to receive plane update notifications related to a
2757 * disabled CRTC. This avoids the need to manually ignore plane updates in
2758 * driver code when the driver and/or hardware can't or just don't need to deal
2759 * with updates on disabled CRTCs, for example when supporting runtime PM.
2760 *
2761 * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
2762 * display controllers require to disable a CRTC's planes when the CRTC is
2763 * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
2764 * call for a plane if the CRTC of the old plane state needs a modesetting
2765 * operation. Of course, the drivers need to disable the planes in their CRTC
2766 * disable callbacks since no one else would do that.
2767 *
2768 * The drm_atomic_helper_commit() default implementation doesn't set the
2769 * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
2770 * This should not be copied blindly by drivers.
2771 */
drm_atomic_helper_commit_planes(struct drm_device * dev,struct drm_atomic_state * state,uint32_t flags)2772 void drm_atomic_helper_commit_planes(struct drm_device *dev,
2773 struct drm_atomic_state *state,
2774 uint32_t flags)
2775 {
2776 struct drm_crtc *crtc;
2777 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2778 struct drm_plane *plane;
2779 struct drm_plane_state *old_plane_state, *new_plane_state;
2780 int i;
2781 bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
2782 bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
2783
2784 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2785 const struct drm_crtc_helper_funcs *funcs;
2786
2787 funcs = crtc->helper_private;
2788
2789 if (!funcs || !funcs->atomic_begin)
2790 continue;
2791
2792 if (active_only && !new_crtc_state->active)
2793 continue;
2794
2795 funcs->atomic_begin(crtc, state);
2796 }
2797
2798 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2799 const struct drm_plane_helper_funcs *funcs;
2800 bool disabling;
2801
2802 funcs = plane->helper_private;
2803
2804 if (!funcs)
2805 continue;
2806
2807 disabling = drm_atomic_plane_disabling(old_plane_state,
2808 new_plane_state);
2809
2810 if (active_only) {
2811 /*
2812 * Skip planes related to inactive CRTCs. If the plane
2813 * is enabled use the state of the current CRTC. If the
2814 * plane is being disabled use the state of the old
2815 * CRTC to avoid skipping planes being disabled on an
2816 * active CRTC.
2817 */
2818 if (!disabling && !plane_crtc_active(new_plane_state))
2819 continue;
2820 if (disabling && !plane_crtc_active(old_plane_state))
2821 continue;
2822 }
2823
2824 /*
2825 * Special-case disabling the plane if drivers support it.
2826 */
2827 if (disabling && funcs->atomic_disable) {
2828 struct drm_crtc_state *crtc_state;
2829
2830 crtc_state = old_plane_state->crtc->state;
2831
2832 if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2833 no_disable)
2834 continue;
2835
2836 funcs->atomic_disable(plane, state);
2837 } else if (new_plane_state->crtc || disabling) {
2838 funcs->atomic_update(plane, state);
2839
2840 if (!disabling && funcs->atomic_enable) {
2841 if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
2842 funcs->atomic_enable(plane, state);
2843 }
2844 }
2845 }
2846
2847 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2848 const struct drm_crtc_helper_funcs *funcs;
2849
2850 funcs = crtc->helper_private;
2851
2852 if (!funcs || !funcs->atomic_flush)
2853 continue;
2854
2855 if (active_only && !new_crtc_state->active)
2856 continue;
2857
2858 funcs->atomic_flush(crtc, state);
2859 }
2860
2861 /*
2862 * Signal end of framebuffer access here before hw_done. After hw_done,
2863 * a later commit might have already released the plane state.
2864 */
2865 for_each_old_plane_in_state(state, plane, old_plane_state, i) {
2866 const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2867
2868 if (funcs->end_fb_access)
2869 funcs->end_fb_access(plane, old_plane_state);
2870 }
2871 }
2872 EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
2873
2874 /**
2875 * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a CRTC
2876 * @old_crtc_state: atomic state object with the old CRTC state
2877 *
2878 * This function commits the new plane state using the plane and atomic helper
2879 * functions for planes on the specific CRTC. It assumes that the atomic state
2880 * has already been pushed into the relevant object state pointers, since this
2881 * step can no longer fail.
2882 *
2883 * This function is useful when plane updates should be done CRTC-by-CRTC
2884 * instead of one global step like drm_atomic_helper_commit_planes() does.
2885 *
2886 * This function can only be savely used when planes are not allowed to move
2887 * between different CRTCs because this function doesn't handle inter-CRTC
2888 * dependencies. Callers need to ensure that either no such dependencies exist,
2889 * resolve them through ordering of commit calls or through some other means.
2890 */
2891 void
drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state * old_crtc_state)2892 drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
2893 {
2894 const struct drm_crtc_helper_funcs *crtc_funcs;
2895 struct drm_crtc *crtc = old_crtc_state->crtc;
2896 struct drm_atomic_state *old_state = old_crtc_state->state;
2897 struct drm_crtc_state *new_crtc_state =
2898 drm_atomic_get_new_crtc_state(old_state, crtc);
2899 struct drm_plane *plane;
2900 unsigned int plane_mask;
2901
2902 plane_mask = old_crtc_state->plane_mask;
2903 plane_mask |= new_crtc_state->plane_mask;
2904
2905 crtc_funcs = crtc->helper_private;
2906 if (crtc_funcs && crtc_funcs->atomic_begin)
2907 crtc_funcs->atomic_begin(crtc, old_state);
2908
2909 drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
2910 struct drm_plane_state *old_plane_state =
2911 drm_atomic_get_old_plane_state(old_state, plane);
2912 struct drm_plane_state *new_plane_state =
2913 drm_atomic_get_new_plane_state(old_state, plane);
2914 const struct drm_plane_helper_funcs *plane_funcs;
2915 bool disabling;
2916
2917 plane_funcs = plane->helper_private;
2918
2919 if (!old_plane_state || !plane_funcs)
2920 continue;
2921
2922 WARN_ON(new_plane_state->crtc &&
2923 new_plane_state->crtc != crtc);
2924
2925 disabling = drm_atomic_plane_disabling(old_plane_state, new_plane_state);
2926
2927 if (disabling && plane_funcs->atomic_disable) {
2928 plane_funcs->atomic_disable(plane, old_state);
2929 } else if (new_plane_state->crtc || disabling) {
2930 plane_funcs->atomic_update(plane, old_state);
2931
2932 if (!disabling && plane_funcs->atomic_enable) {
2933 if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
2934 plane_funcs->atomic_enable(plane, old_state);
2935 }
2936 }
2937 }
2938
2939 if (crtc_funcs && crtc_funcs->atomic_flush)
2940 crtc_funcs->atomic_flush(crtc, old_state);
2941 }
2942 EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
2943
2944 /**
2945 * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
2946 * @old_crtc_state: atomic state object with the old CRTC state
2947 * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
2948 *
2949 * Disables all planes associated with the given CRTC. This can be
2950 * used for instance in the CRTC helper atomic_disable callback to disable
2951 * all planes.
2952 *
2953 * If the atomic-parameter is set the function calls the CRTC's
2954 * atomic_begin hook before and atomic_flush hook after disabling the
2955 * planes.
2956 *
2957 * It is a bug to call this function without having implemented the
2958 * &drm_plane_helper_funcs.atomic_disable plane hook.
2959 */
2960 void
drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state * old_crtc_state,bool atomic)2961 drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
2962 bool atomic)
2963 {
2964 struct drm_crtc *crtc = old_crtc_state->crtc;
2965 const struct drm_crtc_helper_funcs *crtc_funcs =
2966 crtc->helper_private;
2967 struct drm_plane *plane;
2968
2969 if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
2970 crtc_funcs->atomic_begin(crtc, NULL);
2971
2972 drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
2973 const struct drm_plane_helper_funcs *plane_funcs =
2974 plane->helper_private;
2975
2976 if (!plane_funcs)
2977 continue;
2978
2979 WARN_ON(!plane_funcs->atomic_disable);
2980 if (plane_funcs->atomic_disable)
2981 plane_funcs->atomic_disable(plane, NULL);
2982 }
2983
2984 if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
2985 crtc_funcs->atomic_flush(crtc, NULL);
2986 }
2987 EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
2988
2989 /**
2990 * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
2991 * @dev: DRM device
2992 * @state: atomic state object being committed
2993 *
2994 * This function cleans up plane state, specifically framebuffers, from the old
2995 * configuration. Hence the old configuration must be perserved in @state to
2996 * be able to call this function.
2997 *
2998 * This function may not be called on the new state when the atomic update
2999 * fails at any point after calling drm_atomic_helper_prepare_planes(). Use
3000 * drm_atomic_helper_unprepare_planes() in this case.
3001 */
drm_atomic_helper_cleanup_planes(struct drm_device * dev,struct drm_atomic_state * state)3002 void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
3003 struct drm_atomic_state *state)
3004 {
3005 struct drm_plane *plane;
3006 struct drm_plane_state *old_plane_state;
3007 int i;
3008
3009 for_each_old_plane_in_state(state, plane, old_plane_state, i) {
3010 const struct drm_plane_helper_funcs *funcs = plane->helper_private;
3011
3012 if (funcs->cleanup_fb)
3013 funcs->cleanup_fb(plane, old_plane_state);
3014 }
3015 }
3016 EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
3017
3018 /**
3019 * drm_atomic_helper_swap_state - store atomic state into current sw state
3020 * @state: atomic state
3021 * @stall: stall for preceding commits
3022 *
3023 * This function stores the atomic state into the current state pointers in all
3024 * driver objects. It should be called after all failing steps have been done
3025 * and succeeded, but before the actual hardware state is committed.
3026 *
3027 * For cleanup and error recovery the current state for all changed objects will
3028 * be swapped into @state.
3029 *
3030 * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
3031 *
3032 * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
3033 *
3034 * 2. Do any other steps that might fail.
3035 *
3036 * 3. Put the staged state into the current state pointers with this function.
3037 *
3038 * 4. Actually commit the hardware state.
3039 *
3040 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
3041 * contains the old state. Also do any other cleanup required with that state.
3042 *
3043 * @stall must be set when nonblocking commits for this driver directly access
3044 * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
3045 * the current atomic helpers this is almost always the case, since the helpers
3046 * don't pass the right state structures to the callbacks.
3047 *
3048 * Returns:
3049 * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
3050 * waiting for the previous commits has been interrupted.
3051 */
drm_atomic_helper_swap_state(struct drm_atomic_state * state,bool stall)3052 int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
3053 bool stall)
3054 {
3055 int i, ret;
3056 unsigned long flags = 0;
3057 struct drm_connector *connector;
3058 struct drm_connector_state *old_conn_state, *new_conn_state;
3059 struct drm_crtc *crtc;
3060 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
3061 struct drm_plane *plane;
3062 struct drm_plane_state *old_plane_state, *new_plane_state;
3063 struct drm_crtc_commit *commit;
3064 struct drm_private_obj *obj;
3065 struct drm_private_state *old_obj_state, *new_obj_state;
3066
3067 if (stall) {
3068 /*
3069 * We have to stall for hw_done here before
3070 * drm_atomic_helper_wait_for_dependencies() because flip
3071 * depth > 1 is not yet supported by all drivers. As long as
3072 * obj->state is directly dereferenced anywhere in the drivers
3073 * atomic_commit_tail function, then it's unsafe to swap state
3074 * before drm_atomic_helper_commit_hw_done() is called.
3075 */
3076
3077 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
3078 commit = old_crtc_state->commit;
3079
3080 if (!commit)
3081 continue;
3082
3083 ret = wait_for_completion_interruptible(&commit->hw_done);
3084 if (ret)
3085 return ret;
3086 }
3087
3088 for_each_old_connector_in_state(state, connector, old_conn_state, i) {
3089 commit = old_conn_state->commit;
3090
3091 if (!commit)
3092 continue;
3093
3094 ret = wait_for_completion_interruptible(&commit->hw_done);
3095 if (ret)
3096 return ret;
3097 }
3098
3099 for_each_old_plane_in_state(state, plane, old_plane_state, i) {
3100 commit = old_plane_state->commit;
3101
3102 if (!commit)
3103 continue;
3104
3105 ret = wait_for_completion_interruptible(&commit->hw_done);
3106 if (ret)
3107 return ret;
3108 }
3109 }
3110
3111 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
3112 WARN_ON(connector->state != old_conn_state);
3113
3114 old_conn_state->state = state;
3115 new_conn_state->state = NULL;
3116
3117 state->connectors[i].state = old_conn_state;
3118 connector->state = new_conn_state;
3119 }
3120
3121 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3122 WARN_ON(crtc->state != old_crtc_state);
3123
3124 old_crtc_state->state = state;
3125 new_crtc_state->state = NULL;
3126
3127 state->crtcs[i].state = old_crtc_state;
3128 crtc->state = new_crtc_state;
3129
3130 if (new_crtc_state->commit) {
3131 spin_lock(&crtc->commit_lock);
3132 list_add(&new_crtc_state->commit->commit_entry,
3133 &crtc->commit_list);
3134 spin_unlock(&crtc->commit_lock);
3135
3136 new_crtc_state->commit->event = NULL;
3137 }
3138 }
3139
3140 drm_panic_lock(state->dev, flags);
3141 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
3142 WARN_ON(plane->state != old_plane_state);
3143
3144 old_plane_state->state = state;
3145 new_plane_state->state = NULL;
3146
3147 state->planes[i].state = old_plane_state;
3148 plane->state = new_plane_state;
3149 }
3150 drm_panic_unlock(state->dev, flags);
3151
3152 for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
3153 WARN_ON(obj->state != old_obj_state);
3154
3155 old_obj_state->state = state;
3156 new_obj_state->state = NULL;
3157
3158 state->private_objs[i].state = old_obj_state;
3159 obj->state = new_obj_state;
3160 }
3161
3162 return 0;
3163 }
3164 EXPORT_SYMBOL(drm_atomic_helper_swap_state);
3165
3166 /**
3167 * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
3168 * @plane: plane object to update
3169 * @crtc: owning CRTC of owning plane
3170 * @fb: framebuffer to flip onto plane
3171 * @crtc_x: x offset of primary plane on @crtc
3172 * @crtc_y: y offset of primary plane on @crtc
3173 * @crtc_w: width of primary plane rectangle on @crtc
3174 * @crtc_h: height of primary plane rectangle on @crtc
3175 * @src_x: x offset of @fb for panning
3176 * @src_y: y offset of @fb for panning
3177 * @src_w: width of source rectangle in @fb
3178 * @src_h: height of source rectangle in @fb
3179 * @ctx: lock acquire context
3180 *
3181 * Provides a default plane update handler using the atomic driver interface.
3182 *
3183 * RETURNS:
3184 * Zero on success, error code on failure
3185 */
drm_atomic_helper_update_plane(struct drm_plane * plane,struct drm_crtc * crtc,struct drm_framebuffer * fb,int crtc_x,int crtc_y,unsigned int crtc_w,unsigned int crtc_h,uint32_t src_x,uint32_t src_y,uint32_t src_w,uint32_t src_h,struct drm_modeset_acquire_ctx * ctx)3186 int drm_atomic_helper_update_plane(struct drm_plane *plane,
3187 struct drm_crtc *crtc,
3188 struct drm_framebuffer *fb,
3189 int crtc_x, int crtc_y,
3190 unsigned int crtc_w, unsigned int crtc_h,
3191 uint32_t src_x, uint32_t src_y,
3192 uint32_t src_w, uint32_t src_h,
3193 struct drm_modeset_acquire_ctx *ctx)
3194 {
3195 struct drm_atomic_state *state;
3196 struct drm_plane_state *plane_state;
3197 int ret = 0;
3198
3199 state = drm_atomic_state_alloc(plane->dev);
3200 if (!state)
3201 return -ENOMEM;
3202
3203 state->acquire_ctx = ctx;
3204 plane_state = drm_atomic_get_plane_state(state, plane);
3205 if (IS_ERR(plane_state)) {
3206 ret = PTR_ERR(plane_state);
3207 goto fail;
3208 }
3209
3210 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3211 if (ret != 0)
3212 goto fail;
3213 drm_atomic_set_fb_for_plane(plane_state, fb);
3214 plane_state->crtc_x = crtc_x;
3215 plane_state->crtc_y = crtc_y;
3216 plane_state->crtc_w = crtc_w;
3217 plane_state->crtc_h = crtc_h;
3218 plane_state->src_x = src_x;
3219 plane_state->src_y = src_y;
3220 plane_state->src_w = src_w;
3221 plane_state->src_h = src_h;
3222
3223 if (plane == crtc->cursor)
3224 state->legacy_cursor_update = true;
3225
3226 ret = drm_atomic_commit(state);
3227 fail:
3228 drm_atomic_state_put(state);
3229 return ret;
3230 }
3231 EXPORT_SYMBOL(drm_atomic_helper_update_plane);
3232
3233 /**
3234 * drm_atomic_helper_disable_plane - Helper for primary plane disable using atomic
3235 * @plane: plane to disable
3236 * @ctx: lock acquire context
3237 *
3238 * Provides a default plane disable handler using the atomic driver interface.
3239 *
3240 * RETURNS:
3241 * Zero on success, error code on failure
3242 */
drm_atomic_helper_disable_plane(struct drm_plane * plane,struct drm_modeset_acquire_ctx * ctx)3243 int drm_atomic_helper_disable_plane(struct drm_plane *plane,
3244 struct drm_modeset_acquire_ctx *ctx)
3245 {
3246 struct drm_atomic_state *state;
3247 struct drm_plane_state *plane_state;
3248 int ret = 0;
3249
3250 state = drm_atomic_state_alloc(plane->dev);
3251 if (!state)
3252 return -ENOMEM;
3253
3254 state->acquire_ctx = ctx;
3255 plane_state = drm_atomic_get_plane_state(state, plane);
3256 if (IS_ERR(plane_state)) {
3257 ret = PTR_ERR(plane_state);
3258 goto fail;
3259 }
3260
3261 if (plane_state->crtc && plane_state->crtc->cursor == plane)
3262 plane_state->state->legacy_cursor_update = true;
3263
3264 ret = __drm_atomic_helper_disable_plane(plane, plane_state);
3265 if (ret != 0)
3266 goto fail;
3267
3268 ret = drm_atomic_commit(state);
3269 fail:
3270 drm_atomic_state_put(state);
3271 return ret;
3272 }
3273 EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
3274
3275 /**
3276 * drm_atomic_helper_set_config - set a new config from userspace
3277 * @set: mode set configuration
3278 * @ctx: lock acquisition context
3279 *
3280 * Provides a default CRTC set_config handler using the atomic driver interface.
3281 *
3282 * NOTE: For backwards compatibility with old userspace this automatically
3283 * resets the "link-status" property to GOOD, to force any link
3284 * re-training. The SETCRTC ioctl does not define whether an update does
3285 * need a full modeset or just a plane update, hence we're allowed to do
3286 * that. See also drm_connector_set_link_status_property().
3287 *
3288 * Returns:
3289 * Returns 0 on success, negative errno numbers on failure.
3290 */
drm_atomic_helper_set_config(struct drm_mode_set * set,struct drm_modeset_acquire_ctx * ctx)3291 int drm_atomic_helper_set_config(struct drm_mode_set *set,
3292 struct drm_modeset_acquire_ctx *ctx)
3293 {
3294 struct drm_atomic_state *state;
3295 struct drm_crtc *crtc = set->crtc;
3296 int ret = 0;
3297
3298 state = drm_atomic_state_alloc(crtc->dev);
3299 if (!state)
3300 return -ENOMEM;
3301
3302 state->acquire_ctx = ctx;
3303 ret = __drm_atomic_helper_set_config(set, state);
3304 if (ret != 0)
3305 goto fail;
3306
3307 ret = handle_conflicting_encoders(state, true);
3308 if (ret)
3309 goto fail;
3310
3311 ret = drm_atomic_commit(state);
3312
3313 fail:
3314 drm_atomic_state_put(state);
3315 return ret;
3316 }
3317 EXPORT_SYMBOL(drm_atomic_helper_set_config);
3318
3319 /**
3320 * drm_atomic_helper_disable_all - disable all currently active outputs
3321 * @dev: DRM device
3322 * @ctx: lock acquisition context
3323 *
3324 * Loops through all connectors, finding those that aren't turned off and then
3325 * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
3326 * that they are connected to.
3327 *
3328 * This is used for example in suspend/resume to disable all currently active
3329 * functions when suspending. If you just want to shut down everything at e.g.
3330 * driver unload, look at drm_atomic_helper_shutdown().
3331 *
3332 * Note that if callers haven't already acquired all modeset locks this might
3333 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3334 *
3335 * Returns:
3336 * 0 on success or a negative error code on failure.
3337 *
3338 * See also:
3339 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
3340 * drm_atomic_helper_shutdown().
3341 */
drm_atomic_helper_disable_all(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)3342 int drm_atomic_helper_disable_all(struct drm_device *dev,
3343 struct drm_modeset_acquire_ctx *ctx)
3344 {
3345 struct drm_atomic_state *state;
3346 struct drm_connector_state *conn_state;
3347 struct drm_connector *conn;
3348 struct drm_plane_state *plane_state;
3349 struct drm_plane *plane;
3350 struct drm_crtc_state *crtc_state;
3351 struct drm_crtc *crtc;
3352 int ret, i;
3353
3354 state = drm_atomic_state_alloc(dev);
3355 if (!state)
3356 return -ENOMEM;
3357
3358 state->acquire_ctx = ctx;
3359
3360 drm_for_each_crtc(crtc, dev) {
3361 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3362 if (IS_ERR(crtc_state)) {
3363 ret = PTR_ERR(crtc_state);
3364 goto free;
3365 }
3366
3367 crtc_state->active = false;
3368
3369 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
3370 if (ret < 0)
3371 goto free;
3372
3373 ret = drm_atomic_add_affected_planes(state, crtc);
3374 if (ret < 0)
3375 goto free;
3376
3377 ret = drm_atomic_add_affected_connectors(state, crtc);
3378 if (ret < 0)
3379 goto free;
3380 }
3381
3382 for_each_new_connector_in_state(state, conn, conn_state, i) {
3383 ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
3384 if (ret < 0)
3385 goto free;
3386 }
3387
3388 for_each_new_plane_in_state(state, plane, plane_state, i) {
3389 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
3390 if (ret < 0)
3391 goto free;
3392
3393 drm_atomic_set_fb_for_plane(plane_state, NULL);
3394 }
3395
3396 ret = drm_atomic_commit(state);
3397 free:
3398 drm_atomic_state_put(state);
3399 return ret;
3400 }
3401 EXPORT_SYMBOL(drm_atomic_helper_disable_all);
3402
3403 /**
3404 * drm_atomic_helper_reset_crtc - reset the active outputs of a CRTC
3405 * @crtc: DRM CRTC
3406 * @ctx: lock acquisition context
3407 *
3408 * Reset the active outputs by indicating that connectors have changed.
3409 * This implies a reset of all active components available between the CRTC and
3410 * connectors.
3411 *
3412 * NOTE: This relies on resetting &drm_crtc_state.connectors_changed.
3413 * For drivers which optimize out unnecessary modesets this will result in
3414 * a no-op commit, achieving nothing.
3415 *
3416 * Returns:
3417 * 0 on success or a negative error code on failure.
3418 */
drm_atomic_helper_reset_crtc(struct drm_crtc * crtc,struct drm_modeset_acquire_ctx * ctx)3419 int drm_atomic_helper_reset_crtc(struct drm_crtc *crtc,
3420 struct drm_modeset_acquire_ctx *ctx)
3421 {
3422 struct drm_atomic_state *state;
3423 struct drm_crtc_state *crtc_state;
3424 int ret;
3425
3426 state = drm_atomic_state_alloc(crtc->dev);
3427 if (!state)
3428 return -ENOMEM;
3429
3430 state->acquire_ctx = ctx;
3431
3432 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3433 if (IS_ERR(crtc_state)) {
3434 ret = PTR_ERR(crtc_state);
3435 goto out;
3436 }
3437
3438 crtc_state->connectors_changed = true;
3439
3440 ret = drm_atomic_commit(state);
3441 out:
3442 drm_atomic_state_put(state);
3443
3444 return ret;
3445 }
3446 EXPORT_SYMBOL(drm_atomic_helper_reset_crtc);
3447
3448 /**
3449 * drm_atomic_helper_shutdown - shutdown all CRTC
3450 * @dev: DRM device
3451 *
3452 * This shuts down all CRTC, which is useful for driver unloading. Shutdown on
3453 * suspend should instead be handled with drm_atomic_helper_suspend(), since
3454 * that also takes a snapshot of the modeset state to be restored on resume.
3455 *
3456 * This is just a convenience wrapper around drm_atomic_helper_disable_all(),
3457 * and it is the atomic version of drm_helper_force_disable_all().
3458 */
drm_atomic_helper_shutdown(struct drm_device * dev)3459 void drm_atomic_helper_shutdown(struct drm_device *dev)
3460 {
3461 struct drm_modeset_acquire_ctx ctx;
3462 int ret;
3463
3464 if (dev == NULL)
3465 return;
3466
3467 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
3468
3469 ret = drm_atomic_helper_disable_all(dev, &ctx);
3470 if (ret)
3471 drm_err(dev,
3472 "Disabling all crtc's during unload failed with %i\n",
3473 ret);
3474
3475 DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
3476 }
3477 EXPORT_SYMBOL(drm_atomic_helper_shutdown);
3478
3479 /**
3480 * drm_atomic_helper_duplicate_state - duplicate an atomic state object
3481 * @dev: DRM device
3482 * @ctx: lock acquisition context
3483 *
3484 * Makes a copy of the current atomic state by looping over all objects and
3485 * duplicating their respective states. This is used for example by suspend/
3486 * resume support code to save the state prior to suspend such that it can
3487 * be restored upon resume.
3488 *
3489 * Note that this treats atomic state as persistent between save and restore.
3490 * Drivers must make sure that this is possible and won't result in confusion
3491 * or erroneous behaviour.
3492 *
3493 * Note that if callers haven't already acquired all modeset locks this might
3494 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3495 *
3496 * Returns:
3497 * A pointer to the copy of the atomic state object on success or an
3498 * ERR_PTR()-encoded error code on failure.
3499 *
3500 * See also:
3501 * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
3502 */
3503 struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)3504 drm_atomic_helper_duplicate_state(struct drm_device *dev,
3505 struct drm_modeset_acquire_ctx *ctx)
3506 {
3507 struct drm_atomic_state *state;
3508 struct drm_connector *conn;
3509 struct drm_connector_list_iter conn_iter;
3510 struct drm_plane *plane;
3511 struct drm_crtc *crtc;
3512 int err = 0;
3513
3514 state = drm_atomic_state_alloc(dev);
3515 if (!state)
3516 return ERR_PTR(-ENOMEM);
3517
3518 state->acquire_ctx = ctx;
3519 state->duplicated = true;
3520
3521 drm_for_each_crtc(crtc, dev) {
3522 struct drm_crtc_state *crtc_state;
3523
3524 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3525 if (IS_ERR(crtc_state)) {
3526 err = PTR_ERR(crtc_state);
3527 goto free;
3528 }
3529 }
3530
3531 drm_for_each_plane(plane, dev) {
3532 struct drm_plane_state *plane_state;
3533
3534 plane_state = drm_atomic_get_plane_state(state, plane);
3535 if (IS_ERR(plane_state)) {
3536 err = PTR_ERR(plane_state);
3537 goto free;
3538 }
3539 }
3540
3541 drm_connector_list_iter_begin(dev, &conn_iter);
3542 drm_for_each_connector_iter(conn, &conn_iter) {
3543 struct drm_connector_state *conn_state;
3544
3545 conn_state = drm_atomic_get_connector_state(state, conn);
3546 if (IS_ERR(conn_state)) {
3547 err = PTR_ERR(conn_state);
3548 drm_connector_list_iter_end(&conn_iter);
3549 goto free;
3550 }
3551 }
3552 drm_connector_list_iter_end(&conn_iter);
3553
3554 /* clear the acquire context so that it isn't accidentally reused */
3555 state->acquire_ctx = NULL;
3556
3557 free:
3558 if (err < 0) {
3559 drm_atomic_state_put(state);
3560 state = ERR_PTR(err);
3561 }
3562
3563 return state;
3564 }
3565 EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
3566
3567 /**
3568 * drm_atomic_helper_suspend - subsystem-level suspend helper
3569 * @dev: DRM device
3570 *
3571 * Duplicates the current atomic state, disables all active outputs and then
3572 * returns a pointer to the original atomic state to the caller. Drivers can
3573 * pass this pointer to the drm_atomic_helper_resume() helper upon resume to
3574 * restore the output configuration that was active at the time the system
3575 * entered suspend.
3576 *
3577 * Note that it is potentially unsafe to use this. The atomic state object
3578 * returned by this function is assumed to be persistent. Drivers must ensure
3579 * that this holds true. Before calling this function, drivers must make sure
3580 * to suspend fbdev emulation so that nothing can be using the device.
3581 *
3582 * Returns:
3583 * A pointer to a copy of the state before suspend on success or an ERR_PTR()-
3584 * encoded error code on failure. Drivers should store the returned atomic
3585 * state object and pass it to the drm_atomic_helper_resume() helper upon
3586 * resume.
3587 *
3588 * See also:
3589 * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
3590 * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
3591 */
drm_atomic_helper_suspend(struct drm_device * dev)3592 struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
3593 {
3594 struct drm_modeset_acquire_ctx ctx;
3595 struct drm_atomic_state *state;
3596 int err;
3597
3598 /* This can never be returned, but it makes the compiler happy */
3599 state = ERR_PTR(-EINVAL);
3600
3601 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3602
3603 state = drm_atomic_helper_duplicate_state(dev, &ctx);
3604 if (IS_ERR(state))
3605 goto unlock;
3606
3607 err = drm_atomic_helper_disable_all(dev, &ctx);
3608 if (err < 0) {
3609 drm_atomic_state_put(state);
3610 state = ERR_PTR(err);
3611 goto unlock;
3612 }
3613
3614 unlock:
3615 DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3616 if (err)
3617 return ERR_PTR(err);
3618
3619 return state;
3620 }
3621 EXPORT_SYMBOL(drm_atomic_helper_suspend);
3622
3623 /**
3624 * drm_atomic_helper_commit_duplicated_state - commit duplicated state
3625 * @state: duplicated atomic state to commit
3626 * @ctx: pointer to acquire_ctx to use for commit.
3627 *
3628 * The state returned by drm_atomic_helper_duplicate_state() and
3629 * drm_atomic_helper_suspend() is partially invalid, and needs to
3630 * be fixed up before commit.
3631 *
3632 * Returns:
3633 * 0 on success or a negative error code on failure.
3634 *
3635 * See also:
3636 * drm_atomic_helper_suspend()
3637 */
drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state * state,struct drm_modeset_acquire_ctx * ctx)3638 int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
3639 struct drm_modeset_acquire_ctx *ctx)
3640 {
3641 int i, ret;
3642 struct drm_plane *plane;
3643 struct drm_plane_state *new_plane_state;
3644 struct drm_connector *connector;
3645 struct drm_connector_state *new_conn_state;
3646 struct drm_crtc *crtc;
3647 struct drm_crtc_state *new_crtc_state;
3648
3649 state->acquire_ctx = ctx;
3650
3651 for_each_new_plane_in_state(state, plane, new_plane_state, i)
3652 state->planes[i].old_state = plane->state;
3653
3654 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
3655 state->crtcs[i].old_state = crtc->state;
3656
3657 for_each_new_connector_in_state(state, connector, new_conn_state, i)
3658 state->connectors[i].old_state = connector->state;
3659
3660 ret = drm_atomic_commit(state);
3661
3662 state->acquire_ctx = NULL;
3663
3664 return ret;
3665 }
3666 EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
3667
3668 /**
3669 * drm_atomic_helper_resume - subsystem-level resume helper
3670 * @dev: DRM device
3671 * @state: atomic state to resume to
3672 *
3673 * Calls drm_mode_config_reset() to synchronize hardware and software states,
3674 * grabs all modeset locks and commits the atomic state object. This can be
3675 * used in conjunction with the drm_atomic_helper_suspend() helper to
3676 * implement suspend/resume for drivers that support atomic mode-setting.
3677 *
3678 * Returns:
3679 * 0 on success or a negative error code on failure.
3680 *
3681 * See also:
3682 * drm_atomic_helper_suspend()
3683 */
drm_atomic_helper_resume(struct drm_device * dev,struct drm_atomic_state * state)3684 int drm_atomic_helper_resume(struct drm_device *dev,
3685 struct drm_atomic_state *state)
3686 {
3687 struct drm_modeset_acquire_ctx ctx;
3688 int err;
3689
3690 drm_mode_config_reset(dev);
3691
3692 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3693
3694 err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
3695
3696 DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3697 drm_atomic_state_put(state);
3698
3699 return err;
3700 }
3701 EXPORT_SYMBOL(drm_atomic_helper_resume);
3702
page_flip_common(struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t flags)3703 static int page_flip_common(struct drm_atomic_state *state,
3704 struct drm_crtc *crtc,
3705 struct drm_framebuffer *fb,
3706 struct drm_pending_vblank_event *event,
3707 uint32_t flags)
3708 {
3709 struct drm_plane *plane = crtc->primary;
3710 struct drm_plane_state *plane_state;
3711 struct drm_crtc_state *crtc_state;
3712 int ret = 0;
3713
3714 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3715 if (IS_ERR(crtc_state))
3716 return PTR_ERR(crtc_state);
3717
3718 crtc_state->event = event;
3719 crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC;
3720
3721 plane_state = drm_atomic_get_plane_state(state, plane);
3722 if (IS_ERR(plane_state))
3723 return PTR_ERR(plane_state);
3724
3725 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3726 if (ret != 0)
3727 return ret;
3728 drm_atomic_set_fb_for_plane(plane_state, fb);
3729
3730 /* Make sure we don't accidentally do a full modeset. */
3731 state->allow_modeset = false;
3732 if (!crtc_state->active) {
3733 drm_dbg_atomic(crtc->dev,
3734 "[CRTC:%d:%s] disabled, rejecting legacy flip\n",
3735 crtc->base.id, crtc->name);
3736 return -EINVAL;
3737 }
3738
3739 return ret;
3740 }
3741
3742 /**
3743 * drm_atomic_helper_page_flip - execute a legacy page flip
3744 * @crtc: DRM CRTC
3745 * @fb: DRM framebuffer
3746 * @event: optional DRM event to signal upon completion
3747 * @flags: flip flags for non-vblank sync'ed updates
3748 * @ctx: lock acquisition context
3749 *
3750 * Provides a default &drm_crtc_funcs.page_flip implementation
3751 * using the atomic driver interface.
3752 *
3753 * Returns:
3754 * Returns 0 on success, negative errno numbers on failure.
3755 *
3756 * See also:
3757 * drm_atomic_helper_page_flip_target()
3758 */
drm_atomic_helper_page_flip(struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t flags,struct drm_modeset_acquire_ctx * ctx)3759 int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
3760 struct drm_framebuffer *fb,
3761 struct drm_pending_vblank_event *event,
3762 uint32_t flags,
3763 struct drm_modeset_acquire_ctx *ctx)
3764 {
3765 struct drm_plane *plane = crtc->primary;
3766 struct drm_atomic_state *state;
3767 int ret = 0;
3768
3769 state = drm_atomic_state_alloc(plane->dev);
3770 if (!state)
3771 return -ENOMEM;
3772
3773 state->acquire_ctx = ctx;
3774
3775 ret = page_flip_common(state, crtc, fb, event, flags);
3776 if (ret != 0)
3777 goto fail;
3778
3779 ret = drm_atomic_nonblocking_commit(state);
3780 fail:
3781 drm_atomic_state_put(state);
3782 return ret;
3783 }
3784 EXPORT_SYMBOL(drm_atomic_helper_page_flip);
3785
3786 /**
3787 * drm_atomic_helper_page_flip_target - do page flip on target vblank period.
3788 * @crtc: DRM CRTC
3789 * @fb: DRM framebuffer
3790 * @event: optional DRM event to signal upon completion
3791 * @flags: flip flags for non-vblank sync'ed updates
3792 * @target: specifying the target vblank period when the flip to take effect
3793 * @ctx: lock acquisition context
3794 *
3795 * Provides a default &drm_crtc_funcs.page_flip_target implementation.
3796 * Similar to drm_atomic_helper_page_flip() with extra parameter to specify
3797 * target vblank period to flip.
3798 *
3799 * Returns:
3800 * Returns 0 on success, negative errno numbers on failure.
3801 */
drm_atomic_helper_page_flip_target(struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t flags,uint32_t target,struct drm_modeset_acquire_ctx * ctx)3802 int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
3803 struct drm_framebuffer *fb,
3804 struct drm_pending_vblank_event *event,
3805 uint32_t flags,
3806 uint32_t target,
3807 struct drm_modeset_acquire_ctx *ctx)
3808 {
3809 struct drm_plane *plane = crtc->primary;
3810 struct drm_atomic_state *state;
3811 struct drm_crtc_state *crtc_state;
3812 int ret = 0;
3813
3814 state = drm_atomic_state_alloc(plane->dev);
3815 if (!state)
3816 return -ENOMEM;
3817
3818 state->acquire_ctx = ctx;
3819
3820 ret = page_flip_common(state, crtc, fb, event, flags);
3821 if (ret != 0)
3822 goto fail;
3823
3824 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3825 if (WARN_ON(!crtc_state)) {
3826 ret = -EINVAL;
3827 goto fail;
3828 }
3829 crtc_state->target_vblank = target;
3830
3831 ret = drm_atomic_nonblocking_commit(state);
3832 fail:
3833 drm_atomic_state_put(state);
3834 return ret;
3835 }
3836 EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
3837
3838 /**
3839 * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
3840 * the input end of a bridge
3841 * @bridge: bridge control structure
3842 * @bridge_state: new bridge state
3843 * @crtc_state: new CRTC state
3844 * @conn_state: new connector state
3845 * @output_fmt: tested output bus format
3846 * @num_input_fmts: will contain the size of the returned array
3847 *
3848 * This helper is a pluggable implementation of the
3849 * &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
3850 * modify the bus configuration between their input and their output. It
3851 * returns an array of input formats with a single element set to @output_fmt.
3852 *
3853 * RETURNS:
3854 * a valid format array of size @num_input_fmts, or NULL if the allocation
3855 * failed
3856 */
3857 u32 *
drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge * bridge,struct drm_bridge_state * bridge_state,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state,u32 output_fmt,unsigned int * num_input_fmts)3858 drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
3859 struct drm_bridge_state *bridge_state,
3860 struct drm_crtc_state *crtc_state,
3861 struct drm_connector_state *conn_state,
3862 u32 output_fmt,
3863 unsigned int *num_input_fmts)
3864 {
3865 u32 *input_fmts;
3866
3867 input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
3868 if (!input_fmts) {
3869 *num_input_fmts = 0;
3870 return NULL;
3871 }
3872
3873 *num_input_fmts = 1;
3874 input_fmts[0] = output_fmt;
3875 return input_fmts;
3876 }
3877 EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
3878