xref: /linux/drivers/gpu/drm/drm_bridge.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /*
2  * Copyright (c) 2014 Samsung Electronics Co., Ltd
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/debugfs.h>
25 #include <linux/err.h>
26 #include <linux/export.h>
27 #include <linux/media-bus-format.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 
31 #include <drm/drm_atomic_state_helper.h>
32 #include <drm/drm_bridge.h>
33 #include <drm/drm_debugfs.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drm_encoder.h>
36 #include <drm/drm_file.h>
37 #include <drm/drm_of.h>
38 #include <drm/drm_print.h>
39 
40 #include "drm_crtc_internal.h"
41 
42 /**
43  * DOC: overview
44  *
45  * &struct drm_bridge represents a device that hangs on to an encoder. These are
46  * handy when a regular &drm_encoder entity isn't enough to represent the entire
47  * encoder chain.
48  *
49  * A bridge is always attached to a single &drm_encoder at a time, but can be
50  * either connected to it directly, or through a chain of bridges::
51  *
52  *     [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
53  *
54  * Here, the output of the encoder feeds to bridge A, and that furthers feeds to
55  * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
56  * Chaining multiple bridges to the output of a bridge, or the same bridge to
57  * the output of different bridges, is not supported.
58  *
59  * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
60  * CRTCs, encoders or connectors and hence are not visible to userspace. They
61  * just provide additional hooks to get the desired output at the end of the
62  * encoder chain.
63  */
64 
65 /**
66  * DOC:	display driver integration
67  *
68  * Display drivers are responsible for linking encoders with the first bridge
69  * in the chains. This is done by acquiring the appropriate bridge with
70  * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
71  * encoder with a call to drm_bridge_attach().
72  *
73  * Bridges are responsible for linking themselves with the next bridge in the
74  * chain, if any. This is done the same way as for encoders, with the call to
75  * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
76  *
77  * Once these links are created, the bridges can participate along with encoder
78  * functions to perform mode validation and fixup (through
79  * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
80  * setting (through drm_bridge_chain_mode_set()), enable (through
81  * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
82  * and disable (through drm_atomic_bridge_chain_disable() and
83  * drm_atomic_bridge_chain_post_disable()). Those functions call the
84  * corresponding operations provided in &drm_bridge_funcs in sequence for all
85  * bridges in the chain.
86  *
87  * For display drivers that use the atomic helpers
88  * drm_atomic_helper_check_modeset(),
89  * drm_atomic_helper_commit_modeset_enables() and
90  * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
91  * commit check and commit tail handlers, or through the higher-level
92  * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
93  * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
94  * requires no intervention from the driver. For other drivers, the relevant
95  * DRM bridge chain functions shall be called manually.
96  *
97  * Bridges also participate in implementing the &drm_connector at the end of
98  * the bridge chain. Display drivers may use the drm_bridge_connector_init()
99  * helper to create the &drm_connector, or implement it manually on top of the
100  * connector-related operations exposed by the bridge (see the overview
101  * documentation of bridge operations for more details).
102  */
103 
104 /**
105  * DOC: special care dsi
106  *
107  * The interaction between the bridges and other frameworks involved in
108  * the probing of the upstream driver and the bridge driver can be
109  * challenging. Indeed, there's multiple cases that needs to be
110  * considered:
111  *
112  * - The upstream driver doesn't use the component framework and isn't a
113  *   MIPI-DSI host. In this case, the bridge driver will probe at some
114  *   point and the upstream driver should try to probe again by returning
115  *   EPROBE_DEFER as long as the bridge driver hasn't probed.
116  *
117  * - The upstream driver doesn't use the component framework, but is a
118  *   MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be
119  *   controlled. In this case, the bridge device is a child of the
120  *   display device and when it will probe it's assured that the display
121  *   device (and MIPI-DSI host) is present. The upstream driver will be
122  *   assured that the bridge driver is connected between the
123  *   &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations.
124  *   Therefore, it must run mipi_dsi_host_register() in its probe
125  *   function, and then run drm_bridge_attach() in its
126  *   &mipi_dsi_host_ops.attach hook.
127  *
128  * - The upstream driver uses the component framework and is a MIPI-DSI
129  *   host. The bridge device uses the MIPI-DCS commands to be
130  *   controlled. This is the same situation than above, and can run
131  *   mipi_dsi_host_register() in either its probe or bind hooks.
132  *
133  * - The upstream driver uses the component framework and is a MIPI-DSI
134  *   host. The bridge device uses a separate bus (such as I2C) to be
135  *   controlled. In this case, there's no correlation between the probe
136  *   of the bridge and upstream drivers, so care must be taken to avoid
137  *   an endless EPROBE_DEFER loop, with each driver waiting for the
138  *   other to probe.
139  *
140  * The ideal pattern to cover the last item (and all the others in the
141  * MIPI-DSI host driver case) is to split the operations like this:
142  *
143  * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its
144  *   probe hook. It will make sure that the MIPI-DSI host sticks around,
145  *   and that the driver's bind can be called.
146  *
147  * - In its probe hook, the bridge driver must try to find its MIPI-DSI
148  *   host, register as a MIPI-DSI device and attach the MIPI-DSI device
149  *   to its host. The bridge driver is now functional.
150  *
151  * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can
152  *   now add its component. Its bind hook will now be called and since
153  *   the bridge driver is attached and registered, we can now look for
154  *   and attach it.
155  *
156  * At this point, we're now certain that both the upstream driver and
157  * the bridge driver are functional and we can't have a deadlock-like
158  * situation when probing.
159  */
160 
161 /**
162  * DOC: dsi bridge operations
163  *
164  * DSI host interfaces are expected to be implemented as bridges rather than
165  * encoders, however there are a few aspects of their operation that need to
166  * be defined in order to provide a consistent interface.
167  *
168  * A DSI host should keep the PHY powered down until the pre_enable operation is
169  * called. All lanes are in an undefined idle state up to this point, and it
170  * must not be assumed that it is LP-11.
171  * pre_enable should initialise the PHY, set the data lanes to LP-11, and the
172  * clock lane to either LP-11 or HS depending on the mode_flag
173  * %MIPI_DSI_CLOCK_NON_CONTINUOUS.
174  *
175  * Ordinarily the downstream bridge DSI peripheral pre_enable will have been
176  * called before the DSI host. If the DSI peripheral requires LP-11 and/or
177  * the clock lane to be in HS mode prior to pre_enable, then it can set the
178  * &pre_enable_prev_first flag to request the pre_enable (and
179  * post_disable) order to be altered to enable the DSI host first.
180  *
181  * Either the CRTC being enabled, or the DSI host enable operation should switch
182  * the host to actively transmitting video on the data lanes.
183  *
184  * The reverse also applies. The DSI host disable operation or stopping the CRTC
185  * should stop transmitting video, and the data lanes should return to the LP-11
186  * state. The DSI host &post_disable operation should disable the PHY.
187  * If the &pre_enable_prev_first flag is set, then the DSI peripheral's
188  * bridge &post_disable will be called before the DSI host's post_disable.
189  *
190  * Whilst it is valid to call &host_transfer prior to pre_enable or after
191  * post_disable, the exact state of the lanes is undefined at this point. The
192  * DSI host should initialise the interface, transmit the data, and then disable
193  * the interface again.
194  *
195  * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If
196  * implemented, it therefore needs to be handled entirely within the DSI Host
197  * driver.
198  */
199 
200 static DEFINE_MUTEX(bridge_lock);
201 static LIST_HEAD(bridge_list);
202 
__drm_bridge_free(struct kref * kref)203 static void __drm_bridge_free(struct kref *kref)
204 {
205 	struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
206 
207 	if (bridge->funcs->destroy)
208 		bridge->funcs->destroy(bridge);
209 	kfree(bridge->container);
210 }
211 
212 /**
213  * drm_bridge_get - Acquire a bridge reference
214  * @bridge: DRM bridge
215  *
216  * This function increments the bridge's refcount.
217  *
218  * Returns:
219  * Pointer to @bridge.
220  */
drm_bridge_get(struct drm_bridge * bridge)221 struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge)
222 {
223 	if (bridge)
224 		kref_get(&bridge->refcount);
225 
226 	return bridge;
227 }
228 EXPORT_SYMBOL(drm_bridge_get);
229 
230 /**
231  * drm_bridge_put - Release a bridge reference
232  * @bridge: DRM bridge
233  *
234  * This function decrements the bridge's reference count and frees the
235  * object if the reference count drops to zero.
236  */
drm_bridge_put(struct drm_bridge * bridge)237 void drm_bridge_put(struct drm_bridge *bridge)
238 {
239 	if (bridge)
240 		kref_put(&bridge->refcount, __drm_bridge_free);
241 }
242 EXPORT_SYMBOL(drm_bridge_put);
243 
244 /**
245  * drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer
246  *
247  * @data: pointer to @struct drm_bridge, cast to a void pointer
248  *
249  * Wrapper of drm_bridge_put() to be used when a function taking a void
250  * pointer is needed, for example as a devm action.
251  */
drm_bridge_put_void(void * data)252 static void drm_bridge_put_void(void *data)
253 {
254 	struct drm_bridge *bridge = (struct drm_bridge *)data;
255 
256 	drm_bridge_put(bridge);
257 }
258 
__devm_drm_bridge_alloc(struct device * dev,size_t size,size_t offset,const struct drm_bridge_funcs * funcs)259 void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
260 			      const struct drm_bridge_funcs *funcs)
261 {
262 	void *container;
263 	struct drm_bridge *bridge;
264 	int err;
265 
266 	if (!funcs) {
267 		dev_warn(dev, "Missing funcs pointer\n");
268 		return ERR_PTR(-EINVAL);
269 	}
270 
271 	container = kzalloc(size, GFP_KERNEL);
272 	if (!container)
273 		return ERR_PTR(-ENOMEM);
274 
275 	bridge = container + offset;
276 	bridge->container = container;
277 	bridge->funcs = funcs;
278 	kref_init(&bridge->refcount);
279 
280 	err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge);
281 	if (err)
282 		return ERR_PTR(err);
283 
284 	return container;
285 }
286 EXPORT_SYMBOL(__devm_drm_bridge_alloc);
287 
288 /**
289  * drm_bridge_add - add the given bridge to the global bridge list
290  *
291  * @bridge: bridge control structure
292  *
293  * The bridge to be added must have been allocated by
294  * devm_drm_bridge_alloc().
295  */
drm_bridge_add(struct drm_bridge * bridge)296 void drm_bridge_add(struct drm_bridge *bridge)
297 {
298 	if (!bridge->container)
299 		DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n");
300 
301 	drm_bridge_get(bridge);
302 
303 	mutex_init(&bridge->hpd_mutex);
304 
305 	if (bridge->ops & DRM_BRIDGE_OP_HDMI)
306 		bridge->ycbcr_420_allowed = !!(bridge->supported_formats &
307 					       BIT(HDMI_COLORSPACE_YUV420));
308 
309 	mutex_lock(&bridge_lock);
310 	list_add_tail(&bridge->list, &bridge_list);
311 	mutex_unlock(&bridge_lock);
312 }
313 EXPORT_SYMBOL(drm_bridge_add);
314 
drm_bridge_remove_void(void * bridge)315 static void drm_bridge_remove_void(void *bridge)
316 {
317 	drm_bridge_remove(bridge);
318 }
319 
320 /**
321  * devm_drm_bridge_add - devm managed version of drm_bridge_add()
322  *
323  * @dev: device to tie the bridge lifetime to
324  * @bridge: bridge control structure
325  *
326  * This is the managed version of drm_bridge_add() which automatically
327  * calls drm_bridge_remove() when @dev is unbound.
328  *
329  * Return: 0 if no error or negative error code.
330  */
devm_drm_bridge_add(struct device * dev,struct drm_bridge * bridge)331 int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
332 {
333 	drm_bridge_add(bridge);
334 	return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge);
335 }
336 EXPORT_SYMBOL(devm_drm_bridge_add);
337 
338 /**
339  * drm_bridge_remove - remove the given bridge from the global bridge list
340  *
341  * @bridge: bridge control structure
342  */
drm_bridge_remove(struct drm_bridge * bridge)343 void drm_bridge_remove(struct drm_bridge *bridge)
344 {
345 	mutex_lock(&bridge_lock);
346 	list_del_init(&bridge->list);
347 	mutex_unlock(&bridge_lock);
348 
349 	mutex_destroy(&bridge->hpd_mutex);
350 
351 	drm_bridge_put(bridge);
352 }
353 EXPORT_SYMBOL(drm_bridge_remove);
354 
355 static struct drm_private_state *
drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj * obj)356 drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
357 {
358 	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
359 	struct drm_bridge_state *state;
360 
361 	state = bridge->funcs->atomic_duplicate_state(bridge);
362 	return state ? &state->base : NULL;
363 }
364 
365 static void
drm_bridge_atomic_destroy_priv_state(struct drm_private_obj * obj,struct drm_private_state * s)366 drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
367 				     struct drm_private_state *s)
368 {
369 	struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
370 	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
371 
372 	bridge->funcs->atomic_destroy_state(bridge, state);
373 }
374 
375 static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
376 	.atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
377 	.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
378 };
379 
drm_bridge_is_atomic(struct drm_bridge * bridge)380 static bool drm_bridge_is_atomic(struct drm_bridge *bridge)
381 {
382 	return bridge->funcs->atomic_reset != NULL;
383 }
384 
385 /**
386  * drm_bridge_attach - attach the bridge to an encoder's chain
387  *
388  * @encoder: DRM encoder
389  * @bridge: bridge to attach
390  * @previous: previous bridge in the chain (optional)
391  * @flags: DRM_BRIDGE_ATTACH_* flags
392  *
393  * Called by a kms driver to link the bridge to an encoder's chain. The previous
394  * argument specifies the previous bridge in the chain. If NULL, the bridge is
395  * linked directly at the encoder's output. Otherwise it is linked at the
396  * previous bridge's output.
397  *
398  * If non-NULL the previous bridge must be already attached by a call to this
399  * function.
400  *
401  * Note that bridges attached to encoders are auto-detached during encoder
402  * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally
403  * *not* be balanced with a drm_bridge_detach() in driver code.
404  *
405  * RETURNS:
406  * Zero on success, error code on failure
407  */
drm_bridge_attach(struct drm_encoder * encoder,struct drm_bridge * bridge,struct drm_bridge * previous,enum drm_bridge_attach_flags flags)408 int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
409 		      struct drm_bridge *previous,
410 		      enum drm_bridge_attach_flags flags)
411 {
412 	int ret;
413 
414 	if (!encoder || !bridge)
415 		return -EINVAL;
416 
417 	drm_bridge_get(bridge);
418 
419 	if (previous && (!previous->dev || previous->encoder != encoder)) {
420 		ret = -EINVAL;
421 		goto err_put_bridge;
422 	}
423 
424 	if (bridge->dev) {
425 		ret = -EBUSY;
426 		goto err_put_bridge;
427 	}
428 
429 	bridge->dev = encoder->dev;
430 	bridge->encoder = encoder;
431 
432 	if (previous)
433 		list_add(&bridge->chain_node, &previous->chain_node);
434 	else
435 		list_add(&bridge->chain_node, &encoder->bridge_chain);
436 
437 	if (bridge->funcs->attach) {
438 		ret = bridge->funcs->attach(bridge, encoder, flags);
439 		if (ret < 0)
440 			goto err_reset_bridge;
441 	}
442 
443 	if (drm_bridge_is_atomic(bridge)) {
444 		struct drm_bridge_state *state;
445 
446 		state = bridge->funcs->atomic_reset(bridge);
447 		if (IS_ERR(state)) {
448 			ret = PTR_ERR(state);
449 			goto err_detach_bridge;
450 		}
451 
452 		drm_atomic_private_obj_init(bridge->dev, &bridge->base,
453 					    &state->base,
454 					    &drm_bridge_priv_state_funcs);
455 	}
456 
457 	return 0;
458 
459 err_detach_bridge:
460 	if (bridge->funcs->detach)
461 		bridge->funcs->detach(bridge);
462 
463 err_reset_bridge:
464 	bridge->dev = NULL;
465 	bridge->encoder = NULL;
466 	list_del(&bridge->chain_node);
467 
468 	if (ret != -EPROBE_DEFER)
469 		DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
470 			  bridge->of_node, encoder->name, ret);
471 	else
472 		dev_err_probe(encoder->dev->dev, -EPROBE_DEFER,
473 			      "failed to attach bridge %pOF to encoder %s\n",
474 			      bridge->of_node, encoder->name);
475 
476 err_put_bridge:
477 	drm_bridge_put(bridge);
478 	return ret;
479 }
480 EXPORT_SYMBOL(drm_bridge_attach);
481 
drm_bridge_detach(struct drm_bridge * bridge)482 void drm_bridge_detach(struct drm_bridge *bridge)
483 {
484 	if (WARN_ON(!bridge))
485 		return;
486 
487 	if (WARN_ON(!bridge->dev))
488 		return;
489 
490 	if (drm_bridge_is_atomic(bridge))
491 		drm_atomic_private_obj_fini(&bridge->base);
492 
493 	if (bridge->funcs->detach)
494 		bridge->funcs->detach(bridge);
495 
496 	list_del(&bridge->chain_node);
497 	bridge->dev = NULL;
498 	drm_bridge_put(bridge);
499 }
500 
501 /**
502  * DOC: bridge operations
503  *
504  * Bridge drivers expose operations through the &drm_bridge_funcs structure.
505  * The DRM internals (atomic and CRTC helpers) use the helpers defined in
506  * drm_bridge.c to call bridge operations. Those operations are divided in
507  * three big categories to support different parts of the bridge usage.
508  *
509  * - The encoder-related operations support control of the bridges in the
510  *   chain, and are roughly counterparts to the &drm_encoder_helper_funcs
511  *   operations. They are used by the legacy CRTC and the atomic modeset
512  *   helpers to perform mode validation, fixup and setting, and enable and
513  *   disable the bridge automatically.
514  *
515  *   The enable and disable operations are split in
516  *   &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
517  *   &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
518  *   finer-grained control.
519  *
520  *   Bridge drivers may implement the legacy version of those operations, or
521  *   the atomic version (prefixed with atomic\_), in which case they shall also
522  *   implement the atomic state bookkeeping operations
523  *   (&drm_bridge_funcs.atomic_duplicate_state,
524  *   &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
525  *   Mixing atomic and non-atomic versions of the operations is not supported.
526  *
527  * - The bus format negotiation operations
528  *   &drm_bridge_funcs.atomic_get_output_bus_fmts and
529  *   &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
530  *   negotiate the formats transmitted between bridges in the chain when
531  *   multiple formats are supported. Negotiation for formats is performed
532  *   transparently for display drivers by the atomic modeset helpers. Only
533  *   atomic versions of those operations exist, bridge drivers that need to
534  *   implement them shall thus also implement the atomic version of the
535  *   encoder-related operations. This feature is not supported by the legacy
536  *   CRTC helpers.
537  *
538  * - The connector-related operations support implementing a &drm_connector
539  *   based on a chain of bridges. DRM bridges traditionally create a
540  *   &drm_connector for bridges meant to be used at the end of the chain. This
541  *   puts additional burden on bridge drivers, especially for bridges that may
542  *   be used in the middle of a chain or at the end of it. Furthermore, it
543  *   requires all operations of the &drm_connector to be handled by a single
544  *   bridge, which doesn't always match the hardware architecture.
545  *
546  *   To simplify bridge drivers and make the connector implementation more
547  *   flexible, a new model allows bridges to unconditionally skip creation of
548  *   &drm_connector and instead expose &drm_bridge_funcs operations to support
549  *   an externally-implemented &drm_connector. Those operations are
550  *   &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
551  *   &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
552  *   &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
553  *   implemented, display drivers shall create a &drm_connector instance for
554  *   each chain of bridges, and implement those connector instances based on
555  *   the bridge connector operations.
556  *
557  *   Bridge drivers shall implement the connector-related operations for all
558  *   the features that the bridge hardware support. For instance, if a bridge
559  *   supports reading EDID, the &drm_bridge_funcs.get_edid shall be
560  *   implemented. This however doesn't mean that the DDC lines are wired to the
561  *   bridge on a particular platform, as they could also be connected to an I2C
562  *   controller of the SoC. Support for the connector-related operations on the
563  *   running platform is reported through the &drm_bridge.ops flags. Bridge
564  *   drivers shall detect which operations they can support on the platform
565  *   (usually this information is provided by ACPI or DT), and set the
566  *   &drm_bridge.ops flags for all supported operations. A flag shall only be
567  *   set if the corresponding &drm_bridge_funcs operation is implemented, but
568  *   an implemented operation doesn't necessarily imply that the corresponding
569  *   flag will be set. Display drivers shall use the &drm_bridge.ops flags to
570  *   decide which bridge to delegate a connector operation to. This mechanism
571  *   allows providing a single static const &drm_bridge_funcs instance in
572  *   bridge drivers, improving security by storing function pointers in
573  *   read-only memory.
574  *
575  *   In order to ease transition, bridge drivers may support both the old and
576  *   new models by making connector creation optional and implementing the
577  *   connected-related bridge operations. Connector creation is then controlled
578  *   by the flags argument to the drm_bridge_attach() function. Display drivers
579  *   that support the new model and create connectors themselves shall set the
580  *   %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
581  *   connector creation. For intermediate bridges in the chain, the flag shall
582  *   be passed to the drm_bridge_attach() call for the downstream bridge.
583  *   Bridge drivers that implement the new model only shall return an error
584  *   from their &drm_bridge_funcs.attach handler when the
585  *   %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
586  *   should use the new model, and convert the bridge drivers they use if
587  *   needed, in order to gradually transition to the new model.
588  */
589 
590 /**
591  * drm_bridge_chain_mode_valid - validate the mode against all bridges in the
592  *				 encoder chain.
593  * @bridge: bridge control structure
594  * @info: display info against which the mode shall be validated
595  * @mode: desired mode to be validated
596  *
597  * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder
598  * chain, starting from the first bridge to the last. If at least one bridge
599  * does not accept the mode the function returns the error code.
600  *
601  * Note: the bridge passed should be the one closest to the encoder.
602  *
603  * RETURNS:
604  * MODE_OK on success, drm_mode_status Enum error code on failure
605  */
606 enum drm_mode_status
drm_bridge_chain_mode_valid(struct drm_bridge * bridge,const struct drm_display_info * info,const struct drm_display_mode * mode)607 drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
608 			    const struct drm_display_info *info,
609 			    const struct drm_display_mode *mode)
610 {
611 	struct drm_encoder *encoder;
612 
613 	if (!bridge)
614 		return MODE_OK;
615 
616 	encoder = bridge->encoder;
617 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
618 		enum drm_mode_status ret;
619 
620 		if (!bridge->funcs->mode_valid)
621 			continue;
622 
623 		ret = bridge->funcs->mode_valid(bridge, info, mode);
624 		if (ret != MODE_OK)
625 			return ret;
626 	}
627 
628 	return MODE_OK;
629 }
630 EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
631 
632 /**
633  * drm_bridge_chain_mode_set - set proposed mode for all bridges in the
634  *			       encoder chain
635  * @bridge: bridge control structure
636  * @mode: desired mode to be set for the encoder chain
637  * @adjusted_mode: updated mode that works for this encoder chain
638  *
639  * Calls &drm_bridge_funcs.mode_set op for all the bridges in the
640  * encoder chain, starting from the first bridge to the last.
641  *
642  * Note: the bridge passed should be the one closest to the encoder
643  */
drm_bridge_chain_mode_set(struct drm_bridge * bridge,const struct drm_display_mode * mode,const struct drm_display_mode * adjusted_mode)644 void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
645 			       const struct drm_display_mode *mode,
646 			       const struct drm_display_mode *adjusted_mode)
647 {
648 	struct drm_encoder *encoder;
649 
650 	if (!bridge)
651 		return;
652 
653 	encoder = bridge->encoder;
654 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
655 		if (bridge->funcs->mode_set)
656 			bridge->funcs->mode_set(bridge, mode, adjusted_mode);
657 	}
658 }
659 EXPORT_SYMBOL(drm_bridge_chain_mode_set);
660 
661 /**
662  * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
663  * @bridge: bridge control structure
664  * @state: atomic state being committed
665  *
666  * Calls &drm_bridge_funcs.atomic_disable (falls back on
667  * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
668  * starting from the last bridge to the first. These are called before calling
669  * &drm_encoder_helper_funcs.atomic_disable
670  *
671  * Note: the bridge passed should be the one closest to the encoder
672  */
drm_atomic_bridge_chain_disable(struct drm_bridge * bridge,struct drm_atomic_state * state)673 void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
674 				     struct drm_atomic_state *state)
675 {
676 	struct drm_encoder *encoder;
677 	struct drm_bridge *iter;
678 
679 	if (!bridge)
680 		return;
681 
682 	encoder = bridge->encoder;
683 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
684 		if (iter->funcs->atomic_disable) {
685 			iter->funcs->atomic_disable(iter, state);
686 		} else if (iter->funcs->disable) {
687 			iter->funcs->disable(iter);
688 		}
689 
690 		if (iter == bridge)
691 			break;
692 	}
693 }
694 EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
695 
drm_atomic_bridge_call_post_disable(struct drm_bridge * bridge,struct drm_atomic_state * state)696 static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
697 						struct drm_atomic_state *state)
698 {
699 	if (state && bridge->funcs->atomic_post_disable)
700 		bridge->funcs->atomic_post_disable(bridge, state);
701 	else if (bridge->funcs->post_disable)
702 		bridge->funcs->post_disable(bridge);
703 }
704 
705 /**
706  * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
707  *					  in the encoder chain
708  * @bridge: bridge control structure
709  * @state: atomic state being committed
710  *
711  * Calls &drm_bridge_funcs.atomic_post_disable (falls back on
712  * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
713  * starting from the first bridge to the last. These are called after completing
714  * &drm_encoder_helper_funcs.atomic_disable
715  *
716  * If a bridge sets @pre_enable_prev_first, then the @post_disable for that
717  * bridge will be called before the previous one to reverse the @pre_enable
718  * calling direction.
719  *
720  * Example:
721  * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
722  *
723  * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
724  * @post_disable order would be,
725  * Bridge B, Bridge A, Bridge E, Bridge D, Bridge C.
726  *
727  * Note: the bridge passed should be the one closest to the encoder
728  */
drm_atomic_bridge_chain_post_disable(struct drm_bridge * bridge,struct drm_atomic_state * state)729 void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
730 					  struct drm_atomic_state *state)
731 {
732 	struct drm_encoder *encoder;
733 	struct drm_bridge *next, *limit;
734 
735 	if (!bridge)
736 		return;
737 
738 	encoder = bridge->encoder;
739 
740 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
741 		limit = NULL;
742 
743 		if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) {
744 			next = list_next_entry(bridge, chain_node);
745 
746 			if (next->pre_enable_prev_first) {
747 				/* next bridge had requested that prev
748 				 * was enabled first, so disabled last
749 				 */
750 				limit = next;
751 
752 				/* Find the next bridge that has NOT requested
753 				 * prev to be enabled first / disabled last
754 				 */
755 				list_for_each_entry_from(next, &encoder->bridge_chain,
756 							 chain_node) {
757 					if (!next->pre_enable_prev_first) {
758 						next = list_prev_entry(next, chain_node);
759 						limit = next;
760 						break;
761 					}
762 
763 					if (list_is_last(&next->chain_node,
764 							 &encoder->bridge_chain)) {
765 						limit = next;
766 						break;
767 					}
768 				}
769 
770 				/* Call these bridges in reverse order */
771 				list_for_each_entry_from_reverse(next, &encoder->bridge_chain,
772 								 chain_node) {
773 					if (next == bridge)
774 						break;
775 
776 					drm_atomic_bridge_call_post_disable(next,
777 									    state);
778 				}
779 			}
780 		}
781 
782 		drm_atomic_bridge_call_post_disable(bridge, state);
783 
784 		if (limit)
785 			/* Jump all bridges that we have already post_disabled */
786 			bridge = limit;
787 	}
788 }
789 EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
790 
drm_atomic_bridge_call_pre_enable(struct drm_bridge * bridge,struct drm_atomic_state * state)791 static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
792 					      struct drm_atomic_state *state)
793 {
794 	if (state && bridge->funcs->atomic_pre_enable)
795 		bridge->funcs->atomic_pre_enable(bridge, state);
796 	else if (bridge->funcs->pre_enable)
797 		bridge->funcs->pre_enable(bridge);
798 }
799 
800 /**
801  * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
802  *					the encoder chain
803  * @bridge: bridge control structure
804  * @state: atomic state being committed
805  *
806  * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
807  * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
808  * starting from the last bridge to the first. These are called before calling
809  * &drm_encoder_helper_funcs.atomic_enable
810  *
811  * If a bridge sets @pre_enable_prev_first, then the pre_enable for the
812  * prev bridge will be called before pre_enable of this bridge.
813  *
814  * Example:
815  * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
816  *
817  * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
818  * @pre_enable order would be,
819  * Bridge C, Bridge D, Bridge E, Bridge A, Bridge B.
820  *
821  * Note: the bridge passed should be the one closest to the encoder
822  */
drm_atomic_bridge_chain_pre_enable(struct drm_bridge * bridge,struct drm_atomic_state * state)823 void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
824 					struct drm_atomic_state *state)
825 {
826 	struct drm_encoder *encoder;
827 	struct drm_bridge *iter, *next, *limit;
828 
829 	if (!bridge)
830 		return;
831 
832 	encoder = bridge->encoder;
833 
834 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
835 		if (iter->pre_enable_prev_first) {
836 			next = iter;
837 			limit = bridge;
838 			list_for_each_entry_from_reverse(next,
839 							 &encoder->bridge_chain,
840 							 chain_node) {
841 				if (next == bridge)
842 					break;
843 
844 				if (!next->pre_enable_prev_first) {
845 					/* Found first bridge that does NOT
846 					 * request prev to be enabled first
847 					 */
848 					limit = next;
849 					break;
850 				}
851 			}
852 
853 			list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) {
854 				/* Call requested prev bridge pre_enable
855 				 * in order.
856 				 */
857 				if (next == iter)
858 					/* At the first bridge to request prev
859 					 * bridges called first.
860 					 */
861 					break;
862 
863 				drm_atomic_bridge_call_pre_enable(next, state);
864 			}
865 		}
866 
867 		drm_atomic_bridge_call_pre_enable(iter, state);
868 
869 		if (iter->pre_enable_prev_first)
870 			/* Jump all bridges that we have already pre_enabled */
871 			iter = limit;
872 
873 		if (iter == bridge)
874 			break;
875 	}
876 }
877 EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
878 
879 /**
880  * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
881  * @bridge: bridge control structure
882  * @state: atomic state being committed
883  *
884  * Calls &drm_bridge_funcs.atomic_enable (falls back on
885  * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
886  * starting from the first bridge to the last. These are called after completing
887  * &drm_encoder_helper_funcs.atomic_enable
888  *
889  * Note: the bridge passed should be the one closest to the encoder
890  */
drm_atomic_bridge_chain_enable(struct drm_bridge * bridge,struct drm_atomic_state * state)891 void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
892 				    struct drm_atomic_state *state)
893 {
894 	struct drm_encoder *encoder;
895 
896 	if (!bridge)
897 		return;
898 
899 	encoder = bridge->encoder;
900 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
901 		if (bridge->funcs->atomic_enable) {
902 			bridge->funcs->atomic_enable(bridge, state);
903 		} else if (bridge->funcs->enable) {
904 			bridge->funcs->enable(bridge);
905 		}
906 	}
907 }
908 EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
909 
drm_atomic_bridge_check(struct drm_bridge * bridge,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)910 static int drm_atomic_bridge_check(struct drm_bridge *bridge,
911 				   struct drm_crtc_state *crtc_state,
912 				   struct drm_connector_state *conn_state)
913 {
914 	if (bridge->funcs->atomic_check) {
915 		struct drm_bridge_state *bridge_state;
916 		int ret;
917 
918 		bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
919 							       bridge);
920 		if (WARN_ON(!bridge_state))
921 			return -EINVAL;
922 
923 		ret = bridge->funcs->atomic_check(bridge, bridge_state,
924 						  crtc_state, conn_state);
925 		if (ret)
926 			return ret;
927 	} else if (bridge->funcs->mode_fixup) {
928 		if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
929 					       &crtc_state->adjusted_mode))
930 			return -EINVAL;
931 	}
932 
933 	return 0;
934 }
935 
select_bus_fmt_recursive(struct drm_bridge * first_bridge,struct drm_bridge * cur_bridge,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state,u32 out_bus_fmt)936 static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
937 				    struct drm_bridge *cur_bridge,
938 				    struct drm_crtc_state *crtc_state,
939 				    struct drm_connector_state *conn_state,
940 				    u32 out_bus_fmt)
941 {
942 	unsigned int i, num_in_bus_fmts = 0;
943 	struct drm_bridge_state *cur_state;
944 	struct drm_bridge *prev_bridge;
945 	u32 *in_bus_fmts;
946 	int ret;
947 
948 	prev_bridge = drm_bridge_get_prev_bridge(cur_bridge);
949 	cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
950 						    cur_bridge);
951 
952 	/*
953 	 * If bus format negotiation is not supported by this bridge, let's
954 	 * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
955 	 * hope that it can handle this situation gracefully (by providing
956 	 * appropriate default values).
957 	 */
958 	if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
959 		if (cur_bridge != first_bridge) {
960 			ret = select_bus_fmt_recursive(first_bridge,
961 						       prev_bridge, crtc_state,
962 						       conn_state,
963 						       MEDIA_BUS_FMT_FIXED);
964 			if (ret)
965 				return ret;
966 		}
967 
968 		/*
969 		 * Driver does not implement the atomic state hooks, but that's
970 		 * fine, as long as it does not access the bridge state.
971 		 */
972 		if (cur_state) {
973 			cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
974 			cur_state->output_bus_cfg.format = out_bus_fmt;
975 		}
976 
977 		return 0;
978 	}
979 
980 	/*
981 	 * If the driver implements ->atomic_get_input_bus_fmts() it
982 	 * should also implement the atomic state hooks.
983 	 */
984 	if (WARN_ON(!cur_state))
985 		return -EINVAL;
986 
987 	in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
988 							cur_state,
989 							crtc_state,
990 							conn_state,
991 							out_bus_fmt,
992 							&num_in_bus_fmts);
993 	if (!num_in_bus_fmts)
994 		return -ENOTSUPP;
995 	else if (!in_bus_fmts)
996 		return -ENOMEM;
997 
998 	if (first_bridge == cur_bridge) {
999 		cur_state->input_bus_cfg.format = in_bus_fmts[0];
1000 		cur_state->output_bus_cfg.format = out_bus_fmt;
1001 		kfree(in_bus_fmts);
1002 		return 0;
1003 	}
1004 
1005 	for (i = 0; i < num_in_bus_fmts; i++) {
1006 		ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
1007 					       crtc_state, conn_state,
1008 					       in_bus_fmts[i]);
1009 		if (ret != -ENOTSUPP)
1010 			break;
1011 	}
1012 
1013 	if (!ret) {
1014 		cur_state->input_bus_cfg.format = in_bus_fmts[i];
1015 		cur_state->output_bus_cfg.format = out_bus_fmt;
1016 	}
1017 
1018 	kfree(in_bus_fmts);
1019 	return ret;
1020 }
1021 
1022 /*
1023  * This function is called by &drm_atomic_bridge_chain_check() just before
1024  * calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
1025  * It performs bus format negotiation between bridge elements. The negotiation
1026  * happens in reverse order, starting from the last element in the chain up to
1027  * @bridge.
1028  *
1029  * Negotiation starts by retrieving supported output bus formats on the last
1030  * bridge element and testing them one by one. The test is recursive, meaning
1031  * that for each tested output format, the whole chain will be walked backward,
1032  * and each element will have to choose an input bus format that can be
1033  * transcoded to the requested output format. When a bridge element does not
1034  * support transcoding into a specific output format -ENOTSUPP is returned and
1035  * the next bridge element will have to try a different format. If none of the
1036  * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
1037  *
1038  * This implementation is relying on
1039  * &drm_bridge_funcs.atomic_get_output_bus_fmts() and
1040  * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
1041  * input/output formats.
1042  *
1043  * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
1044  * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
1045  * tries a single format: &drm_connector.display_info.bus_formats[0] if
1046  * available, MEDIA_BUS_FMT_FIXED otherwise.
1047  *
1048  * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
1049  * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
1050  * bridge element that lacks this hook and asks the previous element in the
1051  * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
1052  * to do in that case (fail if they want to enforce bus format negotiation, or
1053  * provide a reasonable default if they need to support pipelines where not
1054  * all elements support bus format negotiation).
1055  */
1056 static int
drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge * bridge,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)1057 drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
1058 					struct drm_crtc_state *crtc_state,
1059 					struct drm_connector_state *conn_state)
1060 {
1061 	struct drm_connector *conn = conn_state->connector;
1062 	struct drm_encoder *encoder = bridge->encoder;
1063 	struct drm_bridge_state *last_bridge_state;
1064 	unsigned int i, num_out_bus_fmts = 0;
1065 	struct drm_bridge *last_bridge;
1066 	u32 *out_bus_fmts;
1067 	int ret = 0;
1068 
1069 	last_bridge = list_last_entry(&encoder->bridge_chain,
1070 				      struct drm_bridge, chain_node);
1071 	last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
1072 							    last_bridge);
1073 
1074 	if (last_bridge->funcs->atomic_get_output_bus_fmts) {
1075 		const struct drm_bridge_funcs *funcs = last_bridge->funcs;
1076 
1077 		/*
1078 		 * If the driver implements ->atomic_get_output_bus_fmts() it
1079 		 * should also implement the atomic state hooks.
1080 		 */
1081 		if (WARN_ON(!last_bridge_state))
1082 			return -EINVAL;
1083 
1084 		out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
1085 							last_bridge_state,
1086 							crtc_state,
1087 							conn_state,
1088 							&num_out_bus_fmts);
1089 		if (!num_out_bus_fmts)
1090 			return -ENOTSUPP;
1091 		else if (!out_bus_fmts)
1092 			return -ENOMEM;
1093 	} else {
1094 		num_out_bus_fmts = 1;
1095 		out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL);
1096 		if (!out_bus_fmts)
1097 			return -ENOMEM;
1098 
1099 		if (conn->display_info.num_bus_formats &&
1100 		    conn->display_info.bus_formats)
1101 			out_bus_fmts[0] = conn->display_info.bus_formats[0];
1102 		else
1103 			out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
1104 	}
1105 
1106 	for (i = 0; i < num_out_bus_fmts; i++) {
1107 		ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
1108 					       conn_state, out_bus_fmts[i]);
1109 		if (ret != -ENOTSUPP)
1110 			break;
1111 	}
1112 
1113 	kfree(out_bus_fmts);
1114 
1115 	return ret;
1116 }
1117 
1118 static void
drm_atomic_bridge_propagate_bus_flags(struct drm_bridge * bridge,struct drm_connector * conn,struct drm_atomic_state * state)1119 drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
1120 				      struct drm_connector *conn,
1121 				      struct drm_atomic_state *state)
1122 {
1123 	struct drm_bridge_state *bridge_state, *next_bridge_state;
1124 	struct drm_bridge *next_bridge;
1125 	u32 output_flags = 0;
1126 
1127 	bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
1128 
1129 	/* No bridge state attached to this bridge => nothing to propagate. */
1130 	if (!bridge_state)
1131 		return;
1132 
1133 	next_bridge = drm_bridge_get_next_bridge(bridge);
1134 
1135 	/*
1136 	 * Let's try to apply the most common case here, that is, propagate
1137 	 * display_info flags for the last bridge, and propagate the input
1138 	 * flags of the next bridge element to the output end of the current
1139 	 * bridge when the bridge is not the last one.
1140 	 * There are exceptions to this rule, like when signal inversion is
1141 	 * happening at the board level, but that's something drivers can deal
1142 	 * with from their &drm_bridge_funcs.atomic_check() implementation by
1143 	 * simply overriding the flags value we've set here.
1144 	 */
1145 	if (!next_bridge) {
1146 		output_flags = conn->display_info.bus_flags;
1147 	} else {
1148 		next_bridge_state = drm_atomic_get_new_bridge_state(state,
1149 								next_bridge);
1150 		/*
1151 		 * No bridge state attached to the next bridge, just leave the
1152 		 * flags to 0.
1153 		 */
1154 		if (next_bridge_state)
1155 			output_flags = next_bridge_state->input_bus_cfg.flags;
1156 	}
1157 
1158 	bridge_state->output_bus_cfg.flags = output_flags;
1159 
1160 	/*
1161 	 * Propagate the output flags to the input end of the bridge. Again, it's
1162 	 * not necessarily what all bridges want, but that's what most of them
1163 	 * do, and by doing that by default we avoid forcing drivers to
1164 	 * duplicate the "dummy propagation" logic.
1165 	 */
1166 	bridge_state->input_bus_cfg.flags = output_flags;
1167 }
1168 
1169 /**
1170  * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
1171  * @bridge: bridge control structure
1172  * @crtc_state: new CRTC state
1173  * @conn_state: new connector state
1174  *
1175  * First trigger a bus format negotiation before calling
1176  * &drm_bridge_funcs.atomic_check() (falls back on
1177  * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
1178  * starting from the last bridge to the first. These are called before calling
1179  * &drm_encoder_helper_funcs.atomic_check()
1180  *
1181  * RETURNS:
1182  * 0 on success, a negative error code on failure
1183  */
drm_atomic_bridge_chain_check(struct drm_bridge * bridge,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)1184 int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
1185 				  struct drm_crtc_state *crtc_state,
1186 				  struct drm_connector_state *conn_state)
1187 {
1188 	struct drm_connector *conn = conn_state->connector;
1189 	struct drm_encoder *encoder;
1190 	struct drm_bridge *iter;
1191 	int ret;
1192 
1193 	if (!bridge)
1194 		return 0;
1195 
1196 	ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
1197 						      conn_state);
1198 	if (ret)
1199 		return ret;
1200 
1201 	encoder = bridge->encoder;
1202 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
1203 		int ret;
1204 
1205 		/*
1206 		 * Bus flags are propagated by default. If a bridge needs to
1207 		 * tweak the input bus flags for any reason, it should happen
1208 		 * in its &drm_bridge_funcs.atomic_check() implementation such
1209 		 * that preceding bridges in the chain can propagate the new
1210 		 * bus flags.
1211 		 */
1212 		drm_atomic_bridge_propagate_bus_flags(iter, conn,
1213 						      crtc_state->state);
1214 
1215 		ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
1216 		if (ret)
1217 			return ret;
1218 
1219 		if (iter == bridge)
1220 			break;
1221 	}
1222 
1223 	return 0;
1224 }
1225 EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
1226 
1227 /**
1228  * drm_bridge_detect - check if anything is attached to the bridge output
1229  * @bridge: bridge control structure
1230  *
1231  * If the bridge supports output detection, as reported by the
1232  * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
1233  * bridge and return the connection status. Otherwise return
1234  * connector_status_unknown.
1235  *
1236  * RETURNS:
1237  * The detection status on success, or connector_status_unknown if the bridge
1238  * doesn't support output detection.
1239  */
1240 enum drm_connector_status
drm_bridge_detect(struct drm_bridge * bridge,struct drm_connector * connector)1241 drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
1242 {
1243 	if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
1244 		return connector_status_unknown;
1245 
1246 	return bridge->funcs->detect(bridge, connector);
1247 }
1248 EXPORT_SYMBOL_GPL(drm_bridge_detect);
1249 
1250 /**
1251  * drm_bridge_get_modes - fill all modes currently valid for the sink into the
1252  * @connector
1253  * @bridge: bridge control structure
1254  * @connector: the connector to fill with modes
1255  *
1256  * If the bridge supports output modes retrieval, as reported by the
1257  * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
1258  * fill the connector with all valid modes and return the number of modes
1259  * added. Otherwise return 0.
1260  *
1261  * RETURNS:
1262  * The number of modes added to the connector.
1263  */
drm_bridge_get_modes(struct drm_bridge * bridge,struct drm_connector * connector)1264 int drm_bridge_get_modes(struct drm_bridge *bridge,
1265 			 struct drm_connector *connector)
1266 {
1267 	if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
1268 		return 0;
1269 
1270 	return bridge->funcs->get_modes(bridge, connector);
1271 }
1272 EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
1273 
1274 /**
1275  * drm_bridge_edid_read - read the EDID data of the connected display
1276  * @bridge: bridge control structure
1277  * @connector: the connector to read EDID for
1278  *
1279  * If the bridge supports output EDID retrieval, as reported by the
1280  * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get
1281  * the EDID and return it. Otherwise return NULL.
1282  *
1283  * RETURNS:
1284  * The retrieved EDID on success, or NULL otherwise.
1285  */
drm_bridge_edid_read(struct drm_bridge * bridge,struct drm_connector * connector)1286 const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
1287 					    struct drm_connector *connector)
1288 {
1289 	if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
1290 		return NULL;
1291 
1292 	return bridge->funcs->edid_read(bridge, connector);
1293 }
1294 EXPORT_SYMBOL_GPL(drm_bridge_edid_read);
1295 
1296 /**
1297  * drm_bridge_hpd_enable - enable hot plug detection for the bridge
1298  * @bridge: bridge control structure
1299  * @cb: hot-plug detection callback
1300  * @data: data to be passed to the hot-plug detection callback
1301  *
1302  * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
1303  * and @data as hot plug notification callback. From now on the @cb will be
1304  * called with @data when an output status change is detected by the bridge,
1305  * until hot plug notification gets disabled with drm_bridge_hpd_disable().
1306  *
1307  * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1308  * bridge->ops. This function shall not be called when the flag is not set.
1309  *
1310  * Only one hot plug detection callback can be registered at a time, it is an
1311  * error to call this function when hot plug detection is already enabled for
1312  * the bridge.
1313  */
drm_bridge_hpd_enable(struct drm_bridge * bridge,void (* cb)(void * data,enum drm_connector_status status),void * data)1314 void drm_bridge_hpd_enable(struct drm_bridge *bridge,
1315 			   void (*cb)(void *data,
1316 				      enum drm_connector_status status),
1317 			   void *data)
1318 {
1319 	if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1320 		return;
1321 
1322 	mutex_lock(&bridge->hpd_mutex);
1323 
1324 	if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
1325 		goto unlock;
1326 
1327 	bridge->hpd_cb = cb;
1328 	bridge->hpd_data = data;
1329 
1330 	if (bridge->funcs->hpd_enable)
1331 		bridge->funcs->hpd_enable(bridge);
1332 
1333 unlock:
1334 	mutex_unlock(&bridge->hpd_mutex);
1335 }
1336 EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
1337 
1338 /**
1339  * drm_bridge_hpd_disable - disable hot plug detection for the bridge
1340  * @bridge: bridge control structure
1341  *
1342  * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
1343  * plug detection callback previously registered with drm_bridge_hpd_enable().
1344  * Once this function returns the callback will not be called by the bridge
1345  * when an output status change occurs.
1346  *
1347  * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1348  * bridge->ops. This function shall not be called when the flag is not set.
1349  */
drm_bridge_hpd_disable(struct drm_bridge * bridge)1350 void drm_bridge_hpd_disable(struct drm_bridge *bridge)
1351 {
1352 	if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1353 		return;
1354 
1355 	mutex_lock(&bridge->hpd_mutex);
1356 	if (bridge->funcs->hpd_disable)
1357 		bridge->funcs->hpd_disable(bridge);
1358 
1359 	bridge->hpd_cb = NULL;
1360 	bridge->hpd_data = NULL;
1361 	mutex_unlock(&bridge->hpd_mutex);
1362 }
1363 EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
1364 
1365 /**
1366  * drm_bridge_hpd_notify - notify hot plug detection events
1367  * @bridge: bridge control structure
1368  * @status: output connection status
1369  *
1370  * Bridge drivers shall call this function to report hot plug events when they
1371  * detect a change in the output status, when hot plug detection has been
1372  * enabled by drm_bridge_hpd_enable().
1373  *
1374  * This function shall be called in a context that can sleep.
1375  */
drm_bridge_hpd_notify(struct drm_bridge * bridge,enum drm_connector_status status)1376 void drm_bridge_hpd_notify(struct drm_bridge *bridge,
1377 			   enum drm_connector_status status)
1378 {
1379 	mutex_lock(&bridge->hpd_mutex);
1380 	if (bridge->hpd_cb)
1381 		bridge->hpd_cb(bridge->hpd_data, status);
1382 	mutex_unlock(&bridge->hpd_mutex);
1383 }
1384 EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
1385 
1386 #ifdef CONFIG_OF
1387 /**
1388  * of_drm_find_bridge - find the bridge corresponding to the device node in
1389  *			the global bridge list
1390  *
1391  * @np: device node
1392  *
1393  * RETURNS:
1394  * drm_bridge control struct on success, NULL on failure
1395  */
of_drm_find_bridge(struct device_node * np)1396 struct drm_bridge *of_drm_find_bridge(struct device_node *np)
1397 {
1398 	struct drm_bridge *bridge;
1399 
1400 	mutex_lock(&bridge_lock);
1401 
1402 	list_for_each_entry(bridge, &bridge_list, list) {
1403 		if (bridge->of_node == np) {
1404 			mutex_unlock(&bridge_lock);
1405 			return bridge;
1406 		}
1407 	}
1408 
1409 	mutex_unlock(&bridge_lock);
1410 	return NULL;
1411 }
1412 EXPORT_SYMBOL(of_drm_find_bridge);
1413 #endif
1414 
1415 /**
1416  * devm_drm_put_bridge - Release a bridge reference obtained via devm
1417  * @dev: device that got the bridge via devm
1418  * @bridge: pointer to a struct drm_bridge obtained via devm
1419  *
1420  * Same as drm_bridge_put() for bridge pointers obtained via devm functions
1421  * such as devm_drm_bridge_alloc().
1422  *
1423  * This function is a temporary workaround and MUST NOT be used. Manual
1424  * handling of bridge lifetime is inherently unsafe.
1425  */
devm_drm_put_bridge(struct device * dev,struct drm_bridge * bridge)1426 void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge)
1427 {
1428 	devm_release_action(dev, drm_bridge_put_void, bridge);
1429 }
1430 EXPORT_SYMBOL(devm_drm_put_bridge);
1431 
drm_bridge_debugfs_show_bridge(struct drm_printer * p,struct drm_bridge * bridge,unsigned int idx)1432 static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
1433 					   struct drm_bridge *bridge,
1434 					   unsigned int idx)
1435 {
1436 	drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
1437 	drm_printf(p, "\ttype: [%d] %s\n",
1438 		   bridge->type,
1439 		   drm_get_connector_type_name(bridge->type));
1440 
1441 	if (bridge->of_node)
1442 		drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
1443 
1444 	drm_printf(p, "\tops: [0x%x]", bridge->ops);
1445 	if (bridge->ops & DRM_BRIDGE_OP_DETECT)
1446 		drm_puts(p, " detect");
1447 	if (bridge->ops & DRM_BRIDGE_OP_EDID)
1448 		drm_puts(p, " edid");
1449 	if (bridge->ops & DRM_BRIDGE_OP_HPD)
1450 		drm_puts(p, " hpd");
1451 	if (bridge->ops & DRM_BRIDGE_OP_MODES)
1452 		drm_puts(p, " modes");
1453 	if (bridge->ops & DRM_BRIDGE_OP_HDMI)
1454 		drm_puts(p, " hdmi");
1455 	drm_puts(p, "\n");
1456 }
1457 
allbridges_show(struct seq_file * m,void * data)1458 static int allbridges_show(struct seq_file *m, void *data)
1459 {
1460 	struct drm_printer p = drm_seq_file_printer(m);
1461 	struct drm_bridge *bridge;
1462 	unsigned int idx = 0;
1463 
1464 	mutex_lock(&bridge_lock);
1465 
1466 	list_for_each_entry(bridge, &bridge_list, list)
1467 		drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
1468 
1469 	mutex_unlock(&bridge_lock);
1470 
1471 	return 0;
1472 }
1473 DEFINE_SHOW_ATTRIBUTE(allbridges);
1474 
encoder_bridges_show(struct seq_file * m,void * data)1475 static int encoder_bridges_show(struct seq_file *m, void *data)
1476 {
1477 	struct drm_encoder *encoder = m->private;
1478 	struct drm_printer p = drm_seq_file_printer(m);
1479 	struct drm_bridge *bridge;
1480 	unsigned int idx = 0;
1481 
1482 	drm_for_each_bridge_in_chain(encoder, bridge)
1483 		drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
1484 
1485 	return 0;
1486 }
1487 DEFINE_SHOW_ATTRIBUTE(encoder_bridges);
1488 
drm_bridge_debugfs_params(struct dentry * root)1489 void drm_bridge_debugfs_params(struct dentry *root)
1490 {
1491 	debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops);
1492 }
1493 
drm_bridge_debugfs_encoder_params(struct dentry * root,struct drm_encoder * encoder)1494 void drm_bridge_debugfs_encoder_params(struct dentry *root,
1495 				       struct drm_encoder *encoder)
1496 {
1497 	/* bridges list */
1498 	debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops);
1499 }
1500 
1501 MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
1502 MODULE_DESCRIPTION("DRM bridge infrastructure");
1503 MODULE_LICENSE("GPL and additional rights");
1504