1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24 
25 #include "dm_services.h"
26 
27 #include "amdgpu.h"
28 
29 #include "dc.h"
30 
31 #include "core_status.h"
32 #include "core_types.h"
33 #include "hw_sequencer.h"
34 #include "dce/dce_hwseq.h"
35 
36 #include "resource.h"
37 #include "dc_state.h"
38 #include "dc_state_priv.h"
39 #include "dc_plane_priv.h"
40 
41 #include "gpio_service_interface.h"
42 #include "clk_mgr.h"
43 #include "clock_source.h"
44 #include "dc_bios_types.h"
45 
46 #include "bios_parser_interface.h"
47 #include "bios/bios_parser_helper.h"
48 #include "include/irq_service_interface.h"
49 #include "transform.h"
50 #include "dmcu.h"
51 #include "dpp.h"
52 #include "timing_generator.h"
53 #include "abm.h"
54 #include "virtual/virtual_link_encoder.h"
55 #include "hubp.h"
56 
57 #include "link_hwss.h"
58 #include "link_encoder.h"
59 #include "link_enc_cfg.h"
60 
61 #include "link.h"
62 #include "dm_helpers.h"
63 #include "mem_input.h"
64 
65 #include "dc_dmub_srv.h"
66 
67 #include "dsc.h"
68 
69 #include "vm_helper.h"
70 
71 #include "dce/dce_i2c.h"
72 
73 #include "dmub/dmub_srv.h"
74 
75 #include "dce/dmub_psr.h"
76 
77 #include "dce/dmub_hw_lock_mgr.h"
78 
79 #include "dc_trace.h"
80 
81 #include "hw_sequencer_private.h"
82 
83 #if defined(CONFIG_DRM_AMD_DC_FP)
84 #include "dml2/dml2_internal_types.h"
85 #endif
86 
87 #include "dce/dmub_outbox.h"
88 
89 #define CTX \
90 	dc->ctx
91 
92 #define DC_LOGGER \
93 	dc->ctx->logger
94 
95 static const char DC_BUILD_ID[] = "production-build";
96 
97 /**
98  * DOC: Overview
99  *
100  * DC is the OS-agnostic component of the amdgpu DC driver.
101  *
102  * DC maintains and validates a set of structs representing the state of the
103  * driver and writes that state to AMD hardware
104  *
105  * Main DC HW structs:
106  *
107  * struct dc - The central struct.  One per driver.  Created on driver load,
108  * destroyed on driver unload.
109  *
110  * struct dc_context - One per driver.
111  * Used as a backpointer by most other structs in dc.
112  *
113  * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
114  * plugpoints).  Created on driver load, destroyed on driver unload.
115  *
116  * struct dc_sink - One per display.  Created on boot or hotplug.
117  * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
118  * (the display directly attached).  It may also have one or more remote
119  * sinks (in the Multi-Stream Transport case)
120  *
121  * struct resource_pool - One per driver.  Represents the hw blocks not in the
122  * main pipeline.  Not directly accessible by dm.
123  *
124  * Main dc state structs:
125  *
126  * These structs can be created and destroyed as needed.  There is a full set of
127  * these structs in dc->current_state representing the currently programmed state.
128  *
129  * struct dc_state - The global DC state to track global state information,
130  * such as bandwidth values.
131  *
132  * struct dc_stream_state - Represents the hw configuration for the pipeline from
133  * a framebuffer to a display.  Maps one-to-one with dc_sink.
134  *
135  * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
136  * and may have more in the Multi-Plane Overlay case.
137  *
138  * struct resource_context - Represents the programmable state of everything in
139  * the resource_pool.  Not directly accessible by dm.
140  *
141  * struct pipe_ctx - A member of struct resource_context.  Represents the
142  * internal hardware pipeline components.  Each dc_plane_state has either
143  * one or two (in the pipe-split case).
144  */
145 
146 /* Private functions */
147 
elevate_update_type(enum surface_update_type * original,enum surface_update_type new)148 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
149 {
150 	if (new > *original)
151 		*original = new;
152 }
153 
destroy_links(struct dc * dc)154 static void destroy_links(struct dc *dc)
155 {
156 	uint32_t i;
157 
158 	for (i = 0; i < dc->link_count; i++) {
159 		if (NULL != dc->links[i])
160 			dc->link_srv->destroy_link(&dc->links[i]);
161 	}
162 }
163 
get_num_of_internal_disp(struct dc_link ** links,uint32_t num_links)164 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
165 {
166 	int i;
167 	uint32_t count = 0;
168 
169 	for (i = 0; i < num_links; i++) {
170 		if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
171 				links[i]->is_internal_display)
172 			count++;
173 	}
174 
175 	return count;
176 }
177 
get_seamless_boot_stream_count(struct dc_state * ctx)178 static int get_seamless_boot_stream_count(struct dc_state *ctx)
179 {
180 	uint8_t i;
181 	uint8_t seamless_boot_stream_count = 0;
182 
183 	for (i = 0; i < ctx->stream_count; i++)
184 		if (ctx->streams[i]->apply_seamless_boot_optimization)
185 			seamless_boot_stream_count++;
186 
187 	return seamless_boot_stream_count;
188 }
189 
create_links(struct dc * dc,uint32_t num_virtual_links)190 static bool create_links(
191 		struct dc *dc,
192 		uint32_t num_virtual_links)
193 {
194 	int i;
195 	int connectors_num;
196 	struct dc_bios *bios = dc->ctx->dc_bios;
197 
198 	dc->link_count = 0;
199 
200 	connectors_num = bios->funcs->get_connectors_number(bios);
201 
202 	DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
203 
204 	if (connectors_num > ENUM_ID_COUNT) {
205 		dm_error(
206 			"DC: Number of connectors %d exceeds maximum of %d!\n",
207 			connectors_num,
208 			ENUM_ID_COUNT);
209 		return false;
210 	}
211 
212 	dm_output_to_console(
213 		"DC: %s: connectors_num: physical:%d, virtual:%d\n",
214 		__func__,
215 		connectors_num,
216 		num_virtual_links);
217 
218 	// condition loop on link_count to allow skipping invalid indices
219 	for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
220 		struct link_init_data link_init_params = {0};
221 		struct dc_link *link;
222 
223 		DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
224 
225 		link_init_params.ctx = dc->ctx;
226 		/* next BIOS object table connector */
227 		link_init_params.connector_index = i;
228 		link_init_params.link_index = dc->link_count;
229 		link_init_params.dc = dc;
230 		link = dc->link_srv->create_link(&link_init_params);
231 
232 		if (link) {
233 			dc->links[dc->link_count] = link;
234 			link->dc = dc;
235 			++dc->link_count;
236 		}
237 	}
238 
239 	DC_LOG_DC("BIOS object table - end");
240 
241 	/* Create a link for each usb4 dpia port */
242 	for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
243 		struct link_init_data link_init_params = {0};
244 		struct dc_link *link;
245 
246 		link_init_params.ctx = dc->ctx;
247 		link_init_params.connector_index = i;
248 		link_init_params.link_index = dc->link_count;
249 		link_init_params.dc = dc;
250 		link_init_params.is_dpia_link = true;
251 
252 		link = dc->link_srv->create_link(&link_init_params);
253 		if (link) {
254 			dc->links[dc->link_count] = link;
255 			link->dc = dc;
256 			++dc->link_count;
257 		}
258 	}
259 
260 	for (i = 0; i < num_virtual_links; i++) {
261 		struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
262 		struct encoder_init_data enc_init = {0};
263 
264 		if (link == NULL) {
265 			BREAK_TO_DEBUGGER();
266 			goto failed_alloc;
267 		}
268 
269 		link->link_index = dc->link_count;
270 		dc->links[dc->link_count] = link;
271 		dc->link_count++;
272 
273 		link->ctx = dc->ctx;
274 		link->dc = dc;
275 		link->connector_signal = SIGNAL_TYPE_VIRTUAL;
276 		link->link_id.type = OBJECT_TYPE_CONNECTOR;
277 		link->link_id.id = CONNECTOR_ID_VIRTUAL;
278 		link->link_id.enum_id = ENUM_ID_1;
279 		link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
280 		link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
281 
282 		if (!link->link_enc) {
283 			BREAK_TO_DEBUGGER();
284 			goto failed_alloc;
285 		}
286 
287 		link->link_status.dpcd_caps = &link->dpcd_caps;
288 
289 		enc_init.ctx = dc->ctx;
290 		enc_init.channel = CHANNEL_ID_UNKNOWN;
291 		enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
292 		enc_init.transmitter = TRANSMITTER_UNKNOWN;
293 		enc_init.connector = link->link_id;
294 		enc_init.encoder.type = OBJECT_TYPE_ENCODER;
295 		enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
296 		enc_init.encoder.enum_id = ENUM_ID_1;
297 		virtual_link_encoder_construct(link->link_enc, &enc_init);
298 	}
299 
300 	dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
301 
302 	return true;
303 
304 failed_alloc:
305 	return false;
306 }
307 
308 /* Create additional DIG link encoder objects if fewer than the platform
309  * supports were created during link construction. This can happen if the
310  * number of physical connectors is less than the number of DIGs.
311  */
create_link_encoders(struct dc * dc)312 static bool create_link_encoders(struct dc *dc)
313 {
314 	bool res = true;
315 	unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
316 	unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
317 	int i;
318 
319 	/* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
320 	 * link encoders and physical display endpoints and does not require
321 	 * additional link encoder objects.
322 	 */
323 	if (num_usb4_dpia == 0)
324 		return res;
325 
326 	/* Create as many link encoder objects as the platform supports. DPIA
327 	 * endpoints can be programmably mapped to any DIG.
328 	 */
329 	if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
330 		for (i = 0; i < num_dig_link_enc; i++) {
331 			struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
332 
333 			if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
334 				link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
335 						(enum engine_id)(ENGINE_ID_DIGA + i));
336 				if (link_enc) {
337 					dc->res_pool->link_encoders[i] = link_enc;
338 					dc->res_pool->dig_link_enc_count++;
339 				} else {
340 					res = false;
341 				}
342 			}
343 		}
344 	}
345 
346 	return res;
347 }
348 
349 /* Destroy any additional DIG link encoder objects created by
350  * create_link_encoders().
351  * NB: Must only be called after destroy_links().
352  */
destroy_link_encoders(struct dc * dc)353 static void destroy_link_encoders(struct dc *dc)
354 {
355 	unsigned int num_usb4_dpia;
356 	unsigned int num_dig_link_enc;
357 	int i;
358 
359 	if (!dc->res_pool)
360 		return;
361 
362 	num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
363 	num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
364 
365 	/* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
366 	 * link encoders and physical display endpoints and does not require
367 	 * additional link encoder objects.
368 	 */
369 	if (num_usb4_dpia == 0)
370 		return;
371 
372 	for (i = 0; i < num_dig_link_enc; i++) {
373 		struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
374 
375 		if (link_enc) {
376 			link_enc->funcs->destroy(&link_enc);
377 			dc->res_pool->link_encoders[i] = NULL;
378 			dc->res_pool->dig_link_enc_count--;
379 		}
380 	}
381 }
382 
dc_perf_trace_create(void)383 static struct dc_perf_trace *dc_perf_trace_create(void)
384 {
385 	return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
386 }
387 
dc_perf_trace_destroy(struct dc_perf_trace ** perf_trace)388 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
389 {
390 	kfree(*perf_trace);
391 	*perf_trace = NULL;
392 }
393 
set_long_vtotal(struct dc * dc,struct dc_stream_state * stream,struct dc_crtc_timing_adjust * adjust)394 static bool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust)
395 {
396 	if (!dc || !stream || !adjust)
397 		return false;
398 
399 	if (!dc->current_state)
400 		return false;
401 
402 	int i;
403 
404 	for (i = 0; i < MAX_PIPES; i++) {
405 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
406 
407 		if (pipe->stream == stream && pipe->stream_res.tg) {
408 			if (dc->hwss.set_long_vtotal)
409 				dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max);
410 
411 			return true;
412 		}
413 	}
414 
415 	return false;
416 }
417 
418 /**
419  *  dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
420  *  @dc:     dc reference
421  *  @stream: Initial dc stream state
422  *  @adjust: Updated parameters for vertical_total_min and vertical_total_max
423  *
424  *  Looks up the pipe context of dc_stream_state and updates the
425  *  vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
426  *  Rate, which is a power-saving feature that targets reducing panel
427  *  refresh rate while the screen is static
428  *
429  *  Return: %true if the pipe context is found and adjusted;
430  *          %false if the pipe context is not found.
431  */
dc_stream_adjust_vmin_vmax(struct dc * dc,struct dc_stream_state * stream,struct dc_crtc_timing_adjust * adjust)432 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
433 		struct dc_stream_state *stream,
434 		struct dc_crtc_timing_adjust *adjust)
435 {
436 	int i;
437 
438 	/*
439 	 * Don't adjust DRR while there's bandwidth optimizations pending to
440 	 * avoid conflicting with firmware updates.
441 	 */
442 	if (dc->ctx->dce_version > DCE_VERSION_MAX) {
443 		if (dc->optimized_required || dc->wm_optimized_required) {
444 			stream->adjust.timing_adjust_pending = true;
445 			return false;
446 		}
447 	}
448 
449 	dc_exit_ips_for_hw_access(dc);
450 
451 	stream->adjust.v_total_max = adjust->v_total_max;
452 	stream->adjust.v_total_mid = adjust->v_total_mid;
453 	stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
454 	stream->adjust.v_total_min = adjust->v_total_min;
455 	stream->adjust.allow_otg_v_count_halt = adjust->allow_otg_v_count_halt;
456 
457 	if (dc->caps.max_v_total != 0 &&
458 		(adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) {
459 		stream->adjust.timing_adjust_pending = false;
460 		if (adjust->allow_otg_v_count_halt)
461 			return set_long_vtotal(dc, stream, adjust);
462 		else
463 			return false;
464 	}
465 
466 	for (i = 0; i < MAX_PIPES; i++) {
467 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
468 
469 		if (pipe->stream == stream && pipe->stream_res.tg) {
470 			dc->hwss.set_drr(&pipe,
471 					1,
472 					*adjust);
473 			stream->adjust.timing_adjust_pending = false;
474 			return true;
475 		}
476 	}
477 	return false;
478 }
479 
480 /**
481  * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
482  * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
483  *
484  * @dc: [in] dc reference
485  * @stream: [in] Initial dc stream state
486  * @refresh_rate: [in] new refresh_rate
487  *
488  * Return: %true if the pipe context is found and there is an associated
489  *         timing_generator for the DC;
490  *         %false if the pipe context is not found or there is no
491  *         timing_generator for the DC.
492  */
dc_stream_get_last_used_drr_vtotal(struct dc * dc,struct dc_stream_state * stream,uint32_t * refresh_rate)493 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
494 		struct dc_stream_state *stream,
495 		uint32_t *refresh_rate)
496 {
497 	bool status = false;
498 
499 	int i = 0;
500 
501 	dc_exit_ips_for_hw_access(dc);
502 
503 	for (i = 0; i < MAX_PIPES; i++) {
504 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
505 
506 		if (pipe->stream == stream && pipe->stream_res.tg) {
507 			/* Only execute if a function pointer has been defined for
508 			 * the DC version in question
509 			 */
510 			if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
511 				pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
512 
513 				status = true;
514 
515 				break;
516 			}
517 		}
518 	}
519 
520 	return status;
521 }
522 
523 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
524 static inline void
dc_stream_forward_dmub_crc_window(struct dc_dmub_srv * dmub_srv,struct rect * rect,struct otg_phy_mux * mux_mapping,bool is_stop)525 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
526 		struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
527 {
528 	union dmub_rb_cmd cmd = {0};
529 
530 	cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num;
531 	cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num;
532 
533 	if (is_stop) {
534 		cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
535 		cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE;
536 	} else {
537 		cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
538 		cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY;
539 		cmd.secure_display.roi_info.x_start = rect->x;
540 		cmd.secure_display.roi_info.y_start = rect->y;
541 		cmd.secure_display.roi_info.x_end = rect->x + rect->width;
542 		cmd.secure_display.roi_info.y_end = rect->y + rect->height;
543 	}
544 
545 	dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
546 }
547 
548 static inline void
dc_stream_forward_dmcu_crc_window(struct dmcu * dmcu,struct rect * rect,struct otg_phy_mux * mux_mapping,bool is_stop)549 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
550 		struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
551 {
552 	if (is_stop)
553 		dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
554 	else
555 		dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping);
556 }
557 
558 bool
dc_stream_forward_crc_window(struct dc_stream_state * stream,struct rect * rect,uint8_t phy_id,bool is_stop)559 dc_stream_forward_crc_window(struct dc_stream_state *stream,
560 		struct rect *rect, uint8_t phy_id, bool is_stop)
561 {
562 	struct dmcu *dmcu;
563 	struct dc_dmub_srv *dmub_srv;
564 	struct otg_phy_mux mux_mapping;
565 	struct pipe_ctx *pipe;
566 	int i;
567 	struct dc *dc = stream->ctx->dc;
568 
569 	for (i = 0; i < MAX_PIPES; i++) {
570 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
571 		if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
572 			break;
573 	}
574 
575 	/* Stream not found */
576 	if (i == MAX_PIPES)
577 		return false;
578 
579 	mux_mapping.phy_output_num = phy_id;
580 	mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
581 
582 	dmcu = dc->res_pool->dmcu;
583 	dmub_srv = dc->ctx->dmub_srv;
584 
585 	/* forward to dmub */
586 	if (dmub_srv)
587 		dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop);
588 	/* forward to dmcu */
589 	else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
590 		dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop);
591 	else
592 		return false;
593 
594 	return true;
595 }
596 
597 static void
dc_stream_forward_dmub_multiple_crc_window(struct dc_dmub_srv * dmub_srv,struct crc_window * window,struct otg_phy_mux * mux_mapping,bool stop)598 dc_stream_forward_dmub_multiple_crc_window(struct dc_dmub_srv *dmub_srv,
599 		struct crc_window *window, struct otg_phy_mux *mux_mapping, bool stop)
600 {
601 	int i;
602 	union dmub_rb_cmd cmd = {0};
603 
604 	cmd.secure_display.mul_roi_ctl.phy_id = mux_mapping->phy_output_num;
605 	cmd.secure_display.mul_roi_ctl.otg_id = mux_mapping->otg_output_num;
606 
607 	cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
608 
609 	if (stop) {
610 		cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_STOP_UPDATE;
611 	} else {
612 		cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_WIN_NOTIFY;
613 		for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
614 			cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_start = window[i].rect.x;
615 			cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_start = window[i].rect.y;
616 			cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_end = window[i].rect.x + window[i].rect.width;
617 			cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_end = window[i].rect.y + window[i].rect.height;
618 			cmd.secure_display.mul_roi_ctl.roi_ctl[i].enable = window[i].enable;
619 		}
620 	}
621 
622 	dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
623 }
624 
625 bool
dc_stream_forward_multiple_crc_window(struct dc_stream_state * stream,struct crc_window * window,uint8_t phy_id,bool stop)626 dc_stream_forward_multiple_crc_window(struct dc_stream_state *stream,
627 		struct crc_window *window, uint8_t phy_id, bool stop)
628 {
629 	struct dc_dmub_srv *dmub_srv;
630 	struct otg_phy_mux mux_mapping;
631 	struct pipe_ctx *pipe;
632 	int i;
633 	struct dc *dc = stream->ctx->dc;
634 
635 	for (i = 0; i < MAX_PIPES; i++) {
636 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
637 		if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
638 			break;
639 	}
640 
641 	/* Stream not found */
642 	if (i == MAX_PIPES)
643 		return false;
644 
645 	mux_mapping.phy_output_num = phy_id;
646 	mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
647 
648 	dmub_srv = dc->ctx->dmub_srv;
649 
650 	/* forward to dmub only. no dmcu support*/
651 	if (dmub_srv)
652 		dc_stream_forward_dmub_multiple_crc_window(dmub_srv, window, &mux_mapping, stop);
653 	else
654 		return false;
655 
656 	return true;
657 }
658 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
659 
660 /**
661  * dc_stream_configure_crc() - Configure CRC capture for the given stream.
662  * @dc: DC Object
663  * @stream: The stream to configure CRC on.
664  * @crc_window: CRC window (x/y start/end) information
665  * @enable: Enable CRC if true, disable otherwise.
666  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
667  *              once.
668  * @idx: Capture CRC on which CRC engine instance
669  * @reset: Reset CRC engine before the configuration
670  *
671  * By default, the entire frame is used to calculate the CRC.
672  *
673  * Return: %false if the stream is not found or CRC capture is not supported;
674  *         %true if the stream has been configured.
675  */
dc_stream_configure_crc(struct dc * dc,struct dc_stream_state * stream,struct crc_params * crc_window,bool enable,bool continuous,uint8_t idx,bool reset)676 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
677 			     struct crc_params *crc_window, bool enable, bool continuous,
678 			     uint8_t idx, bool reset)
679 {
680 	struct pipe_ctx *pipe;
681 	struct crc_params param;
682 	struct timing_generator *tg;
683 
684 	pipe = resource_get_otg_master_for_stream(
685 			&dc->current_state->res_ctx, stream);
686 
687 	/* Stream not found */
688 	if (pipe == NULL)
689 		return false;
690 
691 	dc_exit_ips_for_hw_access(dc);
692 
693 	/* By default, capture the full frame */
694 	param.windowa_x_start = 0;
695 	param.windowa_y_start = 0;
696 	param.windowa_x_end = pipe->stream->timing.h_addressable;
697 	param.windowa_y_end = pipe->stream->timing.v_addressable;
698 	param.windowb_x_start = 0;
699 	param.windowb_y_start = 0;
700 	param.windowb_x_end = pipe->stream->timing.h_addressable;
701 	param.windowb_y_end = pipe->stream->timing.v_addressable;
702 
703 	if (crc_window) {
704 		param.windowa_x_start = crc_window->windowa_x_start;
705 		param.windowa_y_start = crc_window->windowa_y_start;
706 		param.windowa_x_end = crc_window->windowa_x_end;
707 		param.windowa_y_end = crc_window->windowa_y_end;
708 		param.windowb_x_start = crc_window->windowb_x_start;
709 		param.windowb_y_start = crc_window->windowb_y_start;
710 		param.windowb_x_end = crc_window->windowb_x_end;
711 		param.windowb_y_end = crc_window->windowb_y_end;
712 	}
713 
714 	param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
715 	param.odm_mode = pipe->next_odm_pipe ? 1:0;
716 
717 	/* Default to the union of both windows */
718 	param.selection = UNION_WINDOW_A_B;
719 	param.continuous_mode = continuous;
720 	param.enable = enable;
721 
722 	param.crc_eng_inst = idx;
723 	param.reset = reset;
724 
725 	tg = pipe->stream_res.tg;
726 
727 	/* Only call if supported */
728 	if (tg->funcs->configure_crc)
729 		return tg->funcs->configure_crc(tg, &param);
730 	DC_LOG_WARNING("CRC capture not supported.");
731 	return false;
732 }
733 
734 /**
735  * dc_stream_get_crc() - Get CRC values for the given stream.
736  *
737  * @dc: DC object.
738  * @stream: The DC stream state of the stream to get CRCs from.
739  * @idx: index of crc engine to get CRC from
740  * @r_cr: CRC value for the red component.
741  * @g_y:  CRC value for the green component.
742  * @b_cb: CRC value for the blue component.
743  *
744  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
745  *
746  * Return:
747  * %false if stream is not found, or if CRCs are not enabled.
748  */
dc_stream_get_crc(struct dc * dc,struct dc_stream_state * stream,uint8_t idx,uint32_t * r_cr,uint32_t * g_y,uint32_t * b_cb)749 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, uint8_t idx,
750 		       uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
751 {
752 	int i;
753 	struct pipe_ctx *pipe;
754 	struct timing_generator *tg;
755 
756 	dc_exit_ips_for_hw_access(dc);
757 
758 	for (i = 0; i < MAX_PIPES; i++) {
759 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
760 		if (pipe->stream == stream)
761 			break;
762 	}
763 	/* Stream not found */
764 	if (i == MAX_PIPES)
765 		return false;
766 
767 	tg = pipe->stream_res.tg;
768 
769 	if (tg->funcs->get_crc)
770 		return tg->funcs->get_crc(tg, idx, r_cr, g_y, b_cb);
771 	DC_LOG_WARNING("CRC capture not supported.");
772 	return false;
773 }
774 
dc_stream_set_dyn_expansion(struct dc * dc,struct dc_stream_state * stream,enum dc_dynamic_expansion option)775 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
776 		enum dc_dynamic_expansion option)
777 {
778 	/* OPP FMT dyn expansion updates*/
779 	int i;
780 	struct pipe_ctx *pipe_ctx;
781 
782 	dc_exit_ips_for_hw_access(dc);
783 
784 	for (i = 0; i < MAX_PIPES; i++) {
785 		if (dc->current_state->res_ctx.pipe_ctx[i].stream
786 				== stream) {
787 			pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
788 			pipe_ctx->stream_res.opp->dyn_expansion = option;
789 			pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
790 					pipe_ctx->stream_res.opp,
791 					COLOR_SPACE_YCBCR601,
792 					stream->timing.display_color_depth,
793 					stream->signal);
794 		}
795 	}
796 }
797 
dc_stream_set_dither_option(struct dc_stream_state * stream,enum dc_dither_option option)798 void dc_stream_set_dither_option(struct dc_stream_state *stream,
799 		enum dc_dither_option option)
800 {
801 	struct bit_depth_reduction_params params;
802 	struct dc_link *link = stream->link;
803 	struct pipe_ctx *pipes = NULL;
804 	int i;
805 
806 	for (i = 0; i < MAX_PIPES; i++) {
807 		if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
808 				stream) {
809 			pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
810 			break;
811 		}
812 	}
813 
814 	if (!pipes)
815 		return;
816 	if (option > DITHER_OPTION_MAX)
817 		return;
818 
819 	dc_exit_ips_for_hw_access(stream->ctx->dc);
820 
821 	stream->dither_option = option;
822 
823 	memset(&params, 0, sizeof(params));
824 	resource_build_bit_depth_reduction_params(stream, &params);
825 	stream->bit_depth_params = params;
826 
827 	if (pipes->plane_res.xfm &&
828 	    pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
829 		pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
830 			pipes->plane_res.xfm,
831 			pipes->plane_res.scl_data.lb_params.depth,
832 			&stream->bit_depth_params);
833 	}
834 
835 	pipes->stream_res.opp->funcs->
836 		opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
837 }
838 
dc_stream_set_gamut_remap(struct dc * dc,const struct dc_stream_state * stream)839 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
840 {
841 	int i;
842 	bool ret = false;
843 	struct pipe_ctx *pipes;
844 
845 	dc_exit_ips_for_hw_access(dc);
846 
847 	for (i = 0; i < MAX_PIPES; i++) {
848 		if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
849 			pipes = &dc->current_state->res_ctx.pipe_ctx[i];
850 			dc->hwss.program_gamut_remap(pipes);
851 			ret = true;
852 		}
853 	}
854 
855 	return ret;
856 }
857 
dc_stream_program_csc_matrix(struct dc * dc,struct dc_stream_state * stream)858 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
859 {
860 	int i;
861 	bool ret = false;
862 	struct pipe_ctx *pipes;
863 
864 	dc_exit_ips_for_hw_access(dc);
865 
866 	for (i = 0; i < MAX_PIPES; i++) {
867 		if (dc->current_state->res_ctx.pipe_ctx[i].stream
868 				== stream) {
869 
870 			pipes = &dc->current_state->res_ctx.pipe_ctx[i];
871 			dc->hwss.program_output_csc(dc,
872 					pipes,
873 					stream->output_color_space,
874 					stream->csc_color_matrix.matrix,
875 					pipes->stream_res.opp->inst);
876 			ret = true;
877 		}
878 	}
879 
880 	return ret;
881 }
882 
dc_stream_set_static_screen_params(struct dc * dc,struct dc_stream_state ** streams,int num_streams,const struct dc_static_screen_params * params)883 void dc_stream_set_static_screen_params(struct dc *dc,
884 		struct dc_stream_state **streams,
885 		int num_streams,
886 		const struct dc_static_screen_params *params)
887 {
888 	int i, j;
889 	struct pipe_ctx *pipes_affected[MAX_PIPES];
890 	int num_pipes_affected = 0;
891 
892 	dc_exit_ips_for_hw_access(dc);
893 
894 	for (i = 0; i < num_streams; i++) {
895 		struct dc_stream_state *stream = streams[i];
896 
897 		for (j = 0; j < MAX_PIPES; j++) {
898 			if (dc->current_state->res_ctx.pipe_ctx[j].stream
899 					== stream) {
900 				pipes_affected[num_pipes_affected++] =
901 						&dc->current_state->res_ctx.pipe_ctx[j];
902 			}
903 		}
904 	}
905 
906 	dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
907 }
908 
dc_destruct(struct dc * dc)909 static void dc_destruct(struct dc *dc)
910 {
911 	// reset link encoder assignment table on destruct
912 	if (dc->res_pool && dc->res_pool->funcs->link_encs_assign &&
913 			!dc->config.unify_link_enc_assignment)
914 		link_enc_cfg_init(dc, dc->current_state);
915 
916 	if (dc->current_state) {
917 		dc_state_release(dc->current_state);
918 		dc->current_state = NULL;
919 	}
920 
921 	destroy_links(dc);
922 
923 	destroy_link_encoders(dc);
924 
925 	if (dc->clk_mgr) {
926 		dc_destroy_clk_mgr(dc->clk_mgr);
927 		dc->clk_mgr = NULL;
928 	}
929 
930 	dc_destroy_resource_pool(dc);
931 
932 	if (dc->link_srv)
933 		link_destroy_link_service(&dc->link_srv);
934 
935 	if (dc->ctx->gpio_service)
936 		dal_gpio_service_destroy(&dc->ctx->gpio_service);
937 
938 	if (dc->ctx->created_bios)
939 		dal_bios_parser_destroy(&dc->ctx->dc_bios);
940 
941 	kfree(dc->ctx->logger);
942 	dc_perf_trace_destroy(&dc->ctx->perf_trace);
943 
944 	kfree(dc->ctx);
945 	dc->ctx = NULL;
946 
947 	kfree(dc->bw_vbios);
948 	dc->bw_vbios = NULL;
949 
950 	kfree(dc->bw_dceip);
951 	dc->bw_dceip = NULL;
952 
953 	kfree(dc->dcn_soc);
954 	dc->dcn_soc = NULL;
955 
956 	kfree(dc->dcn_ip);
957 	dc->dcn_ip = NULL;
958 
959 	kfree(dc->vm_helper);
960 	dc->vm_helper = NULL;
961 
962 }
963 
dc_construct_ctx(struct dc * dc,const struct dc_init_data * init_params)964 static bool dc_construct_ctx(struct dc *dc,
965 		const struct dc_init_data *init_params)
966 {
967 	struct dc_context *dc_ctx;
968 
969 	dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
970 	if (!dc_ctx)
971 		return false;
972 
973 	dc_ctx->cgs_device = init_params->cgs_device;
974 	dc_ctx->driver_context = init_params->driver;
975 	dc_ctx->dc = dc;
976 	dc_ctx->asic_id = init_params->asic_id;
977 	dc_ctx->dc_sink_id_count = 0;
978 	dc_ctx->dc_stream_id_count = 0;
979 	dc_ctx->dce_environment = init_params->dce_environment;
980 	dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
981 	dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
982 	dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets;
983 
984 	/* Create logger */
985 	dc_ctx->logger = kmalloc(sizeof(*dc_ctx->logger), GFP_KERNEL);
986 
987 	if (!dc_ctx->logger) {
988 		kfree(dc_ctx);
989 		return false;
990 	}
991 
992 	dc_ctx->logger->dev = adev_to_drm(init_params->driver);
993 	dc->dml.logger = dc_ctx->logger;
994 
995 	dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id);
996 
997 	dc_ctx->perf_trace = dc_perf_trace_create();
998 	if (!dc_ctx->perf_trace) {
999 		kfree(dc_ctx);
1000 		ASSERT_CRITICAL(false);
1001 		return false;
1002 	}
1003 
1004 	dc->ctx = dc_ctx;
1005 
1006 	dc->link_srv = link_create_link_service();
1007 	if (!dc->link_srv)
1008 		return false;
1009 
1010 	return true;
1011 }
1012 
dc_construct(struct dc * dc,const struct dc_init_data * init_params)1013 static bool dc_construct(struct dc *dc,
1014 		const struct dc_init_data *init_params)
1015 {
1016 	struct dc_context *dc_ctx;
1017 	struct bw_calcs_dceip *dc_dceip;
1018 	struct bw_calcs_vbios *dc_vbios;
1019 	struct dcn_soc_bounding_box *dcn_soc;
1020 	struct dcn_ip_params *dcn_ip;
1021 
1022 	dc->config = init_params->flags;
1023 
1024 	// Allocate memory for the vm_helper
1025 	dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
1026 	if (!dc->vm_helper) {
1027 		dm_error("%s: failed to create dc->vm_helper\n", __func__);
1028 		goto fail;
1029 	}
1030 
1031 	memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
1032 
1033 	dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
1034 	if (!dc_dceip) {
1035 		dm_error("%s: failed to create dceip\n", __func__);
1036 		goto fail;
1037 	}
1038 
1039 	dc->bw_dceip = dc_dceip;
1040 
1041 	dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
1042 	if (!dc_vbios) {
1043 		dm_error("%s: failed to create vbios\n", __func__);
1044 		goto fail;
1045 	}
1046 
1047 	dc->bw_vbios = dc_vbios;
1048 	dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
1049 	if (!dcn_soc) {
1050 		dm_error("%s: failed to create dcn_soc\n", __func__);
1051 		goto fail;
1052 	}
1053 
1054 	dc->dcn_soc = dcn_soc;
1055 
1056 	dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
1057 	if (!dcn_ip) {
1058 		dm_error("%s: failed to create dcn_ip\n", __func__);
1059 		goto fail;
1060 	}
1061 
1062 	dc->dcn_ip = dcn_ip;
1063 
1064 	if (init_params->bb_from_dmub)
1065 		dc->dml2_options.bb_from_dmub = init_params->bb_from_dmub;
1066 	else
1067 		dc->dml2_options.bb_from_dmub = NULL;
1068 
1069 	if (!dc_construct_ctx(dc, init_params)) {
1070 		dm_error("%s: failed to create ctx\n", __func__);
1071 		goto fail;
1072 	}
1073 
1074 	dc_ctx = dc->ctx;
1075 
1076 	/* Resource should construct all asic specific resources.
1077 	 * This should be the only place where we need to parse the asic id
1078 	 */
1079 	if (init_params->vbios_override)
1080 		dc_ctx->dc_bios = init_params->vbios_override;
1081 	else {
1082 		/* Create BIOS parser */
1083 		struct bp_init_data bp_init_data;
1084 
1085 		bp_init_data.ctx = dc_ctx;
1086 		bp_init_data.bios = init_params->asic_id.atombios_base_address;
1087 
1088 		dc_ctx->dc_bios = dal_bios_parser_create(
1089 				&bp_init_data, dc_ctx->dce_version);
1090 
1091 		if (!dc_ctx->dc_bios) {
1092 			ASSERT_CRITICAL(false);
1093 			goto fail;
1094 		}
1095 
1096 		dc_ctx->created_bios = true;
1097 	}
1098 
1099 	dc->vendor_signature = init_params->vendor_signature;
1100 
1101 	/* Create GPIO service */
1102 	dc_ctx->gpio_service = dal_gpio_service_create(
1103 			dc_ctx->dce_version,
1104 			dc_ctx->dce_environment,
1105 			dc_ctx);
1106 
1107 	if (!dc_ctx->gpio_service) {
1108 		ASSERT_CRITICAL(false);
1109 		goto fail;
1110 	}
1111 
1112 	dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
1113 	if (!dc->res_pool)
1114 		goto fail;
1115 
1116 	/* set i2c speed if not done by the respective dcnxxx__resource.c */
1117 	if (dc->caps.i2c_speed_in_khz_hdcp == 0)
1118 		dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
1119 	if (dc->caps.max_optimizable_video_width == 0)
1120 		dc->caps.max_optimizable_video_width = 5120;
1121 	dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
1122 	if (!dc->clk_mgr)
1123 		goto fail;
1124 #ifdef CONFIG_DRM_AMD_DC_FP
1125 	dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
1126 
1127 	if (dc->res_pool->funcs->update_bw_bounding_box) {
1128 		DC_FP_START();
1129 		dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
1130 		DC_FP_END();
1131 	}
1132 #endif
1133 
1134 	if (!create_links(dc, init_params->num_virtual_links))
1135 		goto fail;
1136 
1137 	/* Create additional DIG link encoder objects if fewer than the platform
1138 	 * supports were created during link construction.
1139 	 */
1140 	if (!create_link_encoders(dc))
1141 		goto fail;
1142 
1143 	/* Creation of current_state must occur after dc->dml
1144 	 * is initialized in dc_create_resource_pool because
1145 	 * on creation it copies the contents of dc->dml
1146 	 */
1147 	dc->current_state = dc_state_create(dc, NULL);
1148 
1149 	if (!dc->current_state) {
1150 		dm_error("%s: failed to create validate ctx\n", __func__);
1151 		goto fail;
1152 	}
1153 
1154 	return true;
1155 
1156 fail:
1157 	return false;
1158 }
1159 
disable_all_writeback_pipes_for_stream(const struct dc * dc,struct dc_stream_state * stream,struct dc_state * context)1160 static void disable_all_writeback_pipes_for_stream(
1161 		const struct dc *dc,
1162 		struct dc_stream_state *stream,
1163 		struct dc_state *context)
1164 {
1165 	int i;
1166 
1167 	for (i = 0; i < stream->num_wb_info; i++)
1168 		stream->writeback_info[i].wb_enabled = false;
1169 }
1170 
apply_ctx_interdependent_lock(struct dc * dc,struct dc_state * context,struct dc_stream_state * stream,bool lock)1171 static void apply_ctx_interdependent_lock(struct dc *dc,
1172 					  struct dc_state *context,
1173 					  struct dc_stream_state *stream,
1174 					  bool lock)
1175 {
1176 	int i;
1177 
1178 	/* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1179 	if (dc->hwss.interdependent_update_lock)
1180 		dc->hwss.interdependent_update_lock(dc, context, lock);
1181 	else {
1182 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1183 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1184 			struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1185 
1186 			// Copied conditions that were previously in dce110_apply_ctx_for_surface
1187 			if (stream == pipe_ctx->stream) {
1188 				if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) &&
1189 					(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1190 					dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1191 			}
1192 		}
1193 	}
1194 }
1195 
dc_update_visual_confirm_color(struct dc * dc,struct dc_state * context,struct pipe_ctx * pipe_ctx)1196 static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
1197 {
1198 	if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
1199 		memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
1200 
1201 		if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
1202 			get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1203 		else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
1204 			get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1205 		else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
1206 			get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1207 		else if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR)
1208 			get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1209 		else if (dc->debug.visual_confirm == VISUAL_CONFIRM_DCC)
1210 			get_dcc_visual_confirm_color(dc, pipe_ctx, &(pipe_ctx->visual_confirm_color));
1211 		else {
1212 			if (dc->ctx->dce_version < DCN_VERSION_2_0)
1213 				color_space_to_black_color(
1214 					dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color));
1215 		}
1216 		if (dc->ctx->dce_version >= DCN_VERSION_2_0) {
1217 			if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
1218 				get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1219 			else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
1220 				get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1221 			else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
1222 				get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1223 			else if (dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2)
1224 				get_fams2_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
1225 			else if (dc->debug.visual_confirm == VISUAL_CONFIRM_VABC)
1226 				get_vabc_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1227 		}
1228 	}
1229 }
1230 
disable_dangling_plane(struct dc * dc,struct dc_state * context)1231 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1232 {
1233 	int i, j;
1234 	struct dc_state *dangling_context = dc_state_create_current_copy(dc);
1235 	struct dc_state *current_ctx;
1236 	struct pipe_ctx *pipe;
1237 	struct timing_generator *tg;
1238 
1239 	if (dangling_context == NULL)
1240 		return;
1241 
1242 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1243 		struct dc_stream_state *old_stream =
1244 				dc->current_state->res_ctx.pipe_ctx[i].stream;
1245 		bool should_disable = true;
1246 		bool pipe_split_change = false;
1247 
1248 		if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
1249 			(dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
1250 			pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
1251 				dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
1252 		else
1253 			pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
1254 				dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1255 
1256 		for (j = 0; j < context->stream_count; j++) {
1257 			if (old_stream == context->streams[j]) {
1258 				should_disable = false;
1259 				break;
1260 			}
1261 		}
1262 		if (!should_disable && pipe_split_change &&
1263 				dc->current_state->stream_count != context->stream_count)
1264 			should_disable = true;
1265 
1266 		if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
1267 				!dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
1268 			struct pipe_ctx *old_pipe, *new_pipe;
1269 
1270 			old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1271 			new_pipe = &context->res_ctx.pipe_ctx[i];
1272 
1273 			if (old_pipe->plane_state && !new_pipe->plane_state)
1274 				should_disable = true;
1275 		}
1276 
1277 		if (should_disable && old_stream) {
1278 			bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM;
1279 			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1280 			tg = pipe->stream_res.tg;
1281 			/* When disabling plane for a phantom pipe, we must turn on the
1282 			 * phantom OTG so the disable programming gets the double buffer
1283 			 * update. Otherwise the pipe will be left in a partially disabled
1284 			 * state that can result in underflow or hang when enabling it
1285 			 * again for different use.
1286 			 */
1287 			if (is_phantom) {
1288 				if (tg->funcs->enable_crtc) {
1289 					if (dc->hwseq->funcs.blank_pixel_data)
1290 						dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
1291 					tg->funcs->enable_crtc(tg);
1292 				}
1293 			}
1294 
1295 			if (is_phantom)
1296 				dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true);
1297 			else
1298 				dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1299 			disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1300 
1301 			if (pipe->stream && pipe->plane_state) {
1302 				if (!dc->debug.using_dml2)
1303 					set_p_state_switch_method(dc, context, pipe);
1304 				dc_update_visual_confirm_color(dc, context, pipe);
1305 			}
1306 
1307 			if (dc->hwss.apply_ctx_for_surface) {
1308 				apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1309 				dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1310 				apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1311 				dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1312 			}
1313 
1314 			if (dc->res_pool->funcs->prepare_mcache_programming)
1315 				dc->res_pool->funcs->prepare_mcache_programming(dc, dangling_context);
1316 			if (dc->hwss.program_front_end_for_ctx) {
1317 				dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1318 				dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1319 				dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1320 				dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1321 			}
1322 			/* We need to put the phantom OTG back into it's default (disabled) state or we
1323 			 * can get corruption when transition from one SubVP config to a different one.
1324 			 * The OTG is set to disable on falling edge of VUPDATE so the plane disable
1325 			 * will still get it's double buffer update.
1326 			 */
1327 			if (is_phantom) {
1328 				if (tg->funcs->disable_phantom_crtc)
1329 					tg->funcs->disable_phantom_crtc(tg);
1330 			}
1331 		}
1332 	}
1333 
1334 	current_ctx = dc->current_state;
1335 	dc->current_state = dangling_context;
1336 	dc_state_release(current_ctx);
1337 }
1338 
disable_vbios_mode_if_required(struct dc * dc,struct dc_state * context)1339 static void disable_vbios_mode_if_required(
1340 		struct dc *dc,
1341 		struct dc_state *context)
1342 {
1343 	unsigned int i, j;
1344 
1345 	/* check if timing_changed, disable stream*/
1346 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1347 		struct dc_stream_state *stream = NULL;
1348 		struct dc_link *link = NULL;
1349 		struct pipe_ctx *pipe = NULL;
1350 
1351 		pipe = &context->res_ctx.pipe_ctx[i];
1352 		stream = pipe->stream;
1353 		if (stream == NULL)
1354 			continue;
1355 
1356 		if (stream->apply_seamless_boot_optimization)
1357 			continue;
1358 
1359 		// only looking for first odm pipe
1360 		if (pipe->prev_odm_pipe)
1361 			continue;
1362 
1363 		if (stream->link->local_sink &&
1364 			stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1365 			link = stream->link;
1366 		}
1367 
1368 		if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1369 			unsigned int enc_inst, tg_inst = 0;
1370 			unsigned int pix_clk_100hz = 0;
1371 
1372 			enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1373 			if (enc_inst != ENGINE_ID_UNKNOWN) {
1374 				for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1375 					if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1376 						tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1377 							dc->res_pool->stream_enc[j]);
1378 						break;
1379 					}
1380 				}
1381 
1382 				dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1383 					dc->res_pool->dp_clock_source,
1384 					tg_inst, &pix_clk_100hz);
1385 
1386 				if (link->link_status.link_active) {
1387 					uint32_t requested_pix_clk_100hz =
1388 						pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1389 
1390 					if (pix_clk_100hz != requested_pix_clk_100hz) {
1391 						dc->link_srv->set_dpms_off(pipe);
1392 						pipe->stream->dpms_off = false;
1393 					}
1394 				}
1395 			}
1396 		}
1397 	}
1398 }
1399 
1400 /* Public functions */
1401 
dc_create(const struct dc_init_data * init_params)1402 struct dc *dc_create(const struct dc_init_data *init_params)
1403 {
1404 	struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1405 	unsigned int full_pipe_count;
1406 
1407 	if (!dc)
1408 		return NULL;
1409 
1410 	if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1411 		dc->caps.linear_pitch_alignment = 64;
1412 		if (!dc_construct_ctx(dc, init_params))
1413 			goto destruct_dc;
1414 	} else {
1415 		if (!dc_construct(dc, init_params))
1416 			goto destruct_dc;
1417 
1418 		full_pipe_count = dc->res_pool->pipe_count;
1419 		if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1420 			full_pipe_count--;
1421 		dc->caps.max_streams = min(
1422 				full_pipe_count,
1423 				dc->res_pool->stream_enc_count);
1424 
1425 		dc->caps.max_links = dc->link_count;
1426 		dc->caps.max_audios = dc->res_pool->audio_count;
1427 		dc->caps.linear_pitch_alignment = 64;
1428 
1429 		dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1430 
1431 		dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1432 
1433 		if (dc->res_pool->dmcu != NULL)
1434 			dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1435 	}
1436 
1437 	dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
1438 	dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
1439 	dc->clk_reg_offsets = init_params->clk_reg_offsets;
1440 
1441 	/* Populate versioning information */
1442 	dc->versions.dc_ver = DC_VER;
1443 
1444 	dc->build_id = DC_BUILD_ID;
1445 
1446 	DC_LOG_DC("Display Core initialized\n");
1447 
1448 	return dc;
1449 
1450 destruct_dc:
1451 	dc_destruct(dc);
1452 	kfree(dc);
1453 	return NULL;
1454 }
1455 
detect_edp_presence(struct dc * dc)1456 static void detect_edp_presence(struct dc *dc)
1457 {
1458 	struct dc_link *edp_links[MAX_NUM_EDP];
1459 	struct dc_link *edp_link = NULL;
1460 	enum dc_connection_type type;
1461 	int i;
1462 	int edp_num;
1463 
1464 	dc_get_edp_links(dc, edp_links, &edp_num);
1465 	if (!edp_num)
1466 		return;
1467 
1468 	for (i = 0; i < edp_num; i++) {
1469 		edp_link = edp_links[i];
1470 		if (dc->config.edp_not_connected) {
1471 			edp_link->edp_sink_present = false;
1472 		} else {
1473 			dc_link_detect_connection_type(edp_link, &type);
1474 			edp_link->edp_sink_present = (type != dc_connection_none);
1475 		}
1476 	}
1477 }
1478 
dc_hardware_init(struct dc * dc)1479 void dc_hardware_init(struct dc *dc)
1480 {
1481 
1482 	detect_edp_presence(dc);
1483 	if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1484 		dc->hwss.init_hw(dc);
1485 	dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
1486 }
1487 
dc_init_callbacks(struct dc * dc,const struct dc_callback_init * init_params)1488 void dc_init_callbacks(struct dc *dc,
1489 		const struct dc_callback_init *init_params)
1490 {
1491 	dc->ctx->cp_psp = init_params->cp_psp;
1492 }
1493 
dc_deinit_callbacks(struct dc * dc)1494 void dc_deinit_callbacks(struct dc *dc)
1495 {
1496 	memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1497 }
1498 
dc_destroy(struct dc ** dc)1499 void dc_destroy(struct dc **dc)
1500 {
1501 	dc_destruct(*dc);
1502 	kfree(*dc);
1503 	*dc = NULL;
1504 }
1505 
enable_timing_multisync(struct dc * dc,struct dc_state * ctx)1506 static void enable_timing_multisync(
1507 		struct dc *dc,
1508 		struct dc_state *ctx)
1509 {
1510 	int i, multisync_count = 0;
1511 	int pipe_count = dc->res_pool->pipe_count;
1512 	struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1513 
1514 	for (i = 0; i < pipe_count; i++) {
1515 		if (!ctx->res_ctx.pipe_ctx[i].stream ||
1516 				!ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1517 			continue;
1518 		if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1519 			continue;
1520 		multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1521 		multisync_count++;
1522 	}
1523 
1524 	if (multisync_count > 0) {
1525 		dc->hwss.enable_per_frame_crtc_position_reset(
1526 			dc, multisync_count, multisync_pipes);
1527 	}
1528 }
1529 
program_timing_sync(struct dc * dc,struct dc_state * ctx)1530 static void program_timing_sync(
1531 		struct dc *dc,
1532 		struct dc_state *ctx)
1533 {
1534 	int i, j, k;
1535 	int group_index = 0;
1536 	int num_group = 0;
1537 	int pipe_count = dc->res_pool->pipe_count;
1538 	struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1539 
1540 	for (i = 0; i < pipe_count; i++) {
1541 		if (!ctx->res_ctx.pipe_ctx[i].stream
1542 				|| ctx->res_ctx.pipe_ctx[i].top_pipe
1543 				|| ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
1544 			continue;
1545 
1546 		unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1547 	}
1548 
1549 	for (i = 0; i < pipe_count; i++) {
1550 		int group_size = 1;
1551 		enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1552 		struct pipe_ctx *pipe_set[MAX_PIPES];
1553 
1554 		if (!unsynced_pipes[i])
1555 			continue;
1556 
1557 		pipe_set[0] = unsynced_pipes[i];
1558 		unsynced_pipes[i] = NULL;
1559 
1560 		/* Add tg to the set, search rest of the tg's for ones with
1561 		 * same timing, add all tgs with same timing to the group
1562 		 */
1563 		for (j = i + 1; j < pipe_count; j++) {
1564 			if (!unsynced_pipes[j])
1565 				continue;
1566 			if (sync_type != TIMING_SYNCHRONIZABLE &&
1567 				dc->hwss.enable_vblanks_synchronization &&
1568 				unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1569 				resource_are_vblanks_synchronizable(
1570 					unsynced_pipes[j]->stream,
1571 					pipe_set[0]->stream)) {
1572 				sync_type = VBLANK_SYNCHRONIZABLE;
1573 				pipe_set[group_size] = unsynced_pipes[j];
1574 				unsynced_pipes[j] = NULL;
1575 				group_size++;
1576 			} else
1577 			if (sync_type != VBLANK_SYNCHRONIZABLE &&
1578 				resource_are_streams_timing_synchronizable(
1579 					unsynced_pipes[j]->stream,
1580 					pipe_set[0]->stream)) {
1581 				sync_type = TIMING_SYNCHRONIZABLE;
1582 				pipe_set[group_size] = unsynced_pipes[j];
1583 				unsynced_pipes[j] = NULL;
1584 				group_size++;
1585 			}
1586 		}
1587 
1588 		/* set first unblanked pipe as master */
1589 		for (j = 0; j < group_size; j++) {
1590 			bool is_blanked;
1591 
1592 			if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1593 				is_blanked =
1594 					pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1595 			else
1596 				is_blanked =
1597 					pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1598 			if (!is_blanked) {
1599 				if (j == 0)
1600 					break;
1601 
1602 				swap(pipe_set[0], pipe_set[j]);
1603 				break;
1604 			}
1605 		}
1606 
1607 		for (k = 0; k < group_size; k++) {
1608 			struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
1609 
1610 			if (!status)
1611 				continue;
1612 
1613 			status->timing_sync_info.group_id = num_group;
1614 			status->timing_sync_info.group_size = group_size;
1615 			if (k == 0)
1616 				status->timing_sync_info.master = true;
1617 			else
1618 				status->timing_sync_info.master = false;
1619 
1620 		}
1621 
1622 		/* remove any other unblanked pipes as they have already been synced */
1623 		if (dc->config.use_pipe_ctx_sync_logic) {
1624 			/* check pipe's syncd to decide which pipe to be removed */
1625 			for (j = 1; j < group_size; j++) {
1626 				if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1627 					group_size--;
1628 					pipe_set[j] = pipe_set[group_size];
1629 					j--;
1630 				} else
1631 					/* link slave pipe's syncd with master pipe */
1632 					pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1633 			}
1634 		} else {
1635 			/* remove any other pipes by checking valid plane */
1636 			for (j = j + 1; j < group_size; j++) {
1637 				bool is_blanked;
1638 
1639 				if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1640 					is_blanked =
1641 						pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1642 				else
1643 					is_blanked =
1644 						pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1645 				if (!is_blanked) {
1646 					group_size--;
1647 					pipe_set[j] = pipe_set[group_size];
1648 					j--;
1649 				}
1650 			}
1651 		}
1652 
1653 		if (group_size > 1) {
1654 			if (sync_type == TIMING_SYNCHRONIZABLE) {
1655 				dc->hwss.enable_timing_synchronization(
1656 					dc, ctx, group_index, group_size, pipe_set);
1657 			} else
1658 				if (sync_type == VBLANK_SYNCHRONIZABLE) {
1659 				dc->hwss.enable_vblanks_synchronization(
1660 					dc, group_index, group_size, pipe_set);
1661 				}
1662 			group_index++;
1663 		}
1664 		num_group++;
1665 	}
1666 }
1667 
streams_changed(struct dc * dc,struct dc_stream_state * streams[],uint8_t stream_count)1668 static bool streams_changed(struct dc *dc,
1669 			    struct dc_stream_state *streams[],
1670 			    uint8_t stream_count)
1671 {
1672 	uint8_t i;
1673 
1674 	if (stream_count != dc->current_state->stream_count)
1675 		return true;
1676 
1677 	for (i = 0; i < dc->current_state->stream_count; i++) {
1678 		if (dc->current_state->streams[i] != streams[i])
1679 			return true;
1680 		if (!streams[i]->link->link_state_valid)
1681 			return true;
1682 	}
1683 
1684 	return false;
1685 }
1686 
dc_validate_boot_timing(const struct dc * dc,const struct dc_sink * sink,struct dc_crtc_timing * crtc_timing)1687 bool dc_validate_boot_timing(const struct dc *dc,
1688 				const struct dc_sink *sink,
1689 				struct dc_crtc_timing *crtc_timing)
1690 {
1691 	struct timing_generator *tg;
1692 	struct stream_encoder *se = NULL;
1693 
1694 	struct dc_crtc_timing hw_crtc_timing = {0};
1695 
1696 	struct dc_link *link = sink->link;
1697 	unsigned int i, enc_inst, tg_inst = 0;
1698 
1699 	/* Support seamless boot on EDP displays only */
1700 	if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1701 		return false;
1702 	}
1703 
1704 	if (dc->debug.force_odm_combine) {
1705 		DC_LOG_DEBUG("boot timing validation failed due to force_odm_combine\n");
1706 		return false;
1707 	}
1708 
1709 	/* Check for enabled DIG to identify enabled display */
1710 	if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1711 		DC_LOG_DEBUG("boot timing validation failed due to disabled DIG\n");
1712 		return false;
1713 	}
1714 
1715 	enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1716 
1717 	if (enc_inst == ENGINE_ID_UNKNOWN) {
1718 		DC_LOG_DEBUG("boot timing validation failed due to unknown DIG engine ID\n");
1719 		return false;
1720 	}
1721 
1722 	for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1723 		if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1724 
1725 			se = dc->res_pool->stream_enc[i];
1726 
1727 			tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1728 				dc->res_pool->stream_enc[i]);
1729 			break;
1730 		}
1731 	}
1732 
1733 	// tg_inst not found
1734 	if (i == dc->res_pool->stream_enc_count) {
1735 		DC_LOG_DEBUG("boot timing validation failed due to timing generator instance not found\n");
1736 		return false;
1737 	}
1738 
1739 	if (tg_inst >= dc->res_pool->timing_generator_count) {
1740 		DC_LOG_DEBUG("boot timing validation failed due to invalid timing generator count\n");
1741 		return false;
1742 	}
1743 
1744 	if (tg_inst != link->link_enc->preferred_engine) {
1745 		DC_LOG_DEBUG("boot timing validation failed due to non-preferred timing generator\n");
1746 		return false;
1747 	}
1748 
1749 	tg = dc->res_pool->timing_generators[tg_inst];
1750 
1751 	if (!tg->funcs->get_hw_timing) {
1752 		DC_LOG_DEBUG("boot timing validation failed due to missing get_hw_timing callback\n");
1753 		return false;
1754 	}
1755 
1756 	if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) {
1757 		DC_LOG_DEBUG("boot timing validation failed due to failed get_hw_timing return\n");
1758 		return false;
1759 	}
1760 
1761 	if (crtc_timing->h_total != hw_crtc_timing.h_total) {
1762 		DC_LOG_DEBUG("boot timing validation failed due to h_total mismatch\n");
1763 		return false;
1764 	}
1765 
1766 	if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) {
1767 		DC_LOG_DEBUG("boot timing validation failed due to h_border_left mismatch\n");
1768 		return false;
1769 	}
1770 
1771 	if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) {
1772 		DC_LOG_DEBUG("boot timing validation failed due to h_addressable mismatch\n");
1773 		return false;
1774 	}
1775 
1776 	if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) {
1777 		DC_LOG_DEBUG("boot timing validation failed due to h_border_right mismatch\n");
1778 		return false;
1779 	}
1780 
1781 	if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) {
1782 		DC_LOG_DEBUG("boot timing validation failed due to h_front_porch mismatch\n");
1783 		return false;
1784 	}
1785 
1786 	if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) {
1787 		DC_LOG_DEBUG("boot timing validation failed due to h_sync_width mismatch\n");
1788 		return false;
1789 	}
1790 
1791 	if (crtc_timing->v_total != hw_crtc_timing.v_total) {
1792 		DC_LOG_DEBUG("boot timing validation failed due to v_total mismatch\n");
1793 		return false;
1794 	}
1795 
1796 	if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) {
1797 		DC_LOG_DEBUG("boot timing validation failed due to v_border_top mismatch\n");
1798 		return false;
1799 	}
1800 
1801 	if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) {
1802 		DC_LOG_DEBUG("boot timing validation failed due to v_addressable mismatch\n");
1803 		return false;
1804 	}
1805 
1806 	if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) {
1807 		DC_LOG_DEBUG("boot timing validation failed due to v_border_bottom mismatch\n");
1808 		return false;
1809 	}
1810 
1811 	if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) {
1812 		DC_LOG_DEBUG("boot timing validation failed due to v_front_porch mismatch\n");
1813 		return false;
1814 	}
1815 
1816 	if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) {
1817 		DC_LOG_DEBUG("boot timing validation failed due to v_sync_width mismatch\n");
1818 		return false;
1819 	}
1820 
1821 	/* block DSC for now, as VBIOS does not currently support DSC timings */
1822 	if (crtc_timing->flags.DSC) {
1823 		DC_LOG_DEBUG("boot timing validation failed due to DSC\n");
1824 		return false;
1825 	}
1826 
1827 	if (dc_is_dp_signal(link->connector_signal)) {
1828 		unsigned int pix_clk_100hz = 0;
1829 		uint32_t numOdmPipes = 1;
1830 		uint32_t id_src[4] = {0};
1831 
1832 		dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1833 			dc->res_pool->dp_clock_source,
1834 			tg_inst, &pix_clk_100hz);
1835 
1836 		if (tg->funcs->get_optc_source)
1837 			tg->funcs->get_optc_source(tg,
1838 						&numOdmPipes, &id_src[0], &id_src[1]);
1839 
1840 		if (numOdmPipes == 2) {
1841 			pix_clk_100hz *= 2;
1842 		} else if (numOdmPipes == 4) {
1843 			pix_clk_100hz *= 4;
1844 		} else if (se && se->funcs->get_pixels_per_cycle) {
1845 			uint32_t pixels_per_cycle = se->funcs->get_pixels_per_cycle(se);
1846 
1847 			if (pixels_per_cycle != 1 && !dc->debug.enable_dp_dig_pixel_rate_div_policy) {
1848 				DC_LOG_DEBUG("boot timing validation failed due to pixels_per_cycle\n");
1849 				return false;
1850 			}
1851 
1852 			pix_clk_100hz *= pixels_per_cycle;
1853 		}
1854 
1855 		// Note: In rare cases, HW pixclk may differ from crtc's pixclk
1856 		// slightly due to rounding issues in 10 kHz units.
1857 		if (crtc_timing->pix_clk_100hz != pix_clk_100hz) {
1858 			DC_LOG_DEBUG("boot timing validation failed due to pix_clk_100hz mismatch\n");
1859 			return false;
1860 		}
1861 
1862 		if (!se || !se->funcs->dp_get_pixel_format) {
1863 			DC_LOG_DEBUG("boot timing validation failed due to missing dp_get_pixel_format\n");
1864 			return false;
1865 		}
1866 
1867 		if (!se->funcs->dp_get_pixel_format(
1868 			se,
1869 			&hw_crtc_timing.pixel_encoding,
1870 			&hw_crtc_timing.display_color_depth)) {
1871 			DC_LOG_DEBUG("boot timing validation failed due to dp_get_pixel_format failure\n");
1872 			return false;
1873 		}
1874 
1875 		if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) {
1876 			DC_LOG_DEBUG("boot timing validation failed due to display_color_depth mismatch\n");
1877 			return false;
1878 		}
1879 
1880 		if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) {
1881 			DC_LOG_DEBUG("boot timing validation failed due to pixel_encoding mismatch\n");
1882 			return false;
1883 		}
1884 	}
1885 
1886 
1887 	if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1888 		DC_LOG_DEBUG("boot timing validation failed due to VSC SDP colorimetry\n");
1889 		return false;
1890 	}
1891 
1892 	if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
1893 		DC_LOG_DEBUG("boot timing validation failed due to DP 128b/132b\n");
1894 		return false;
1895 	}
1896 
1897 	if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
1898 		DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1899 		return false;
1900 	}
1901 
1902 	return true;
1903 }
1904 
should_update_pipe_for_stream(struct dc_state * context,struct pipe_ctx * pipe_ctx,struct dc_stream_state * stream)1905 static inline bool should_update_pipe_for_stream(
1906 		struct dc_state *context,
1907 		struct pipe_ctx *pipe_ctx,
1908 		struct dc_stream_state *stream)
1909 {
1910 	return (pipe_ctx->stream && pipe_ctx->stream == stream);
1911 }
1912 
should_update_pipe_for_plane(struct dc_state * context,struct pipe_ctx * pipe_ctx,struct dc_plane_state * plane_state)1913 static inline bool should_update_pipe_for_plane(
1914 		struct dc_state *context,
1915 		struct pipe_ctx *pipe_ctx,
1916 		struct dc_plane_state *plane_state)
1917 {
1918 	return (pipe_ctx->plane_state == plane_state);
1919 }
1920 
dc_enable_stereo(struct dc * dc,struct dc_state * context,struct dc_stream_state * streams[],uint8_t stream_count)1921 void dc_enable_stereo(
1922 	struct dc *dc,
1923 	struct dc_state *context,
1924 	struct dc_stream_state *streams[],
1925 	uint8_t stream_count)
1926 {
1927 	int i, j;
1928 	struct pipe_ctx *pipe;
1929 
1930 	dc_exit_ips_for_hw_access(dc);
1931 
1932 	for (i = 0; i < MAX_PIPES; i++) {
1933 		if (context != NULL) {
1934 			pipe = &context->res_ctx.pipe_ctx[i];
1935 		} else {
1936 			context = dc->current_state;
1937 			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1938 		}
1939 
1940 		for (j = 0; pipe && j < stream_count; j++)  {
1941 			if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1942 				dc->hwss.setup_stereo)
1943 				dc->hwss.setup_stereo(pipe, dc);
1944 		}
1945 	}
1946 }
1947 
dc_trigger_sync(struct dc * dc,struct dc_state * context)1948 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1949 {
1950 	if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1951 		dc_exit_ips_for_hw_access(dc);
1952 
1953 		enable_timing_multisync(dc, context);
1954 		program_timing_sync(dc, context);
1955 	}
1956 }
1957 
get_stream_mask(struct dc * dc,struct dc_state * context)1958 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1959 {
1960 	int i;
1961 	unsigned int stream_mask = 0;
1962 
1963 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1964 		if (context->res_ctx.pipe_ctx[i].stream)
1965 			stream_mask |= 1 << i;
1966 	}
1967 
1968 	return stream_mask;
1969 }
1970 
dc_z10_restore(const struct dc * dc)1971 void dc_z10_restore(const struct dc *dc)
1972 {
1973 	if (dc->hwss.z10_restore)
1974 		dc->hwss.z10_restore(dc);
1975 }
1976 
dc_z10_save_init(struct dc * dc)1977 void dc_z10_save_init(struct dc *dc)
1978 {
1979 	if (dc->hwss.z10_save_init)
1980 		dc->hwss.z10_save_init(dc);
1981 }
1982 
1983 /* Set a pipe unlock order based on the change in DET allocation and stores it in dc scratch memory
1984  * Prevents over allocation of DET during unlock process
1985  * e.g. 2 pipe config with different streams with a max of 20 DET segments
1986  *	Before:								After:
1987  *		- Pipe0: 10 DET segments			- Pipe0: 12 DET segments
1988  *		- Pipe1: 10 DET segments			- Pipe1: 8 DET segments
1989  * If Pipe0 gets updated first, 22 DET segments will be allocated
1990  */
determine_pipe_unlock_order(struct dc * dc,struct dc_state * context)1991 static void determine_pipe_unlock_order(struct dc *dc, struct dc_state *context)
1992 {
1993 	unsigned int i = 0;
1994 	struct pipe_ctx *pipe = NULL;
1995 	struct timing_generator *tg = NULL;
1996 
1997 	if (!dc->config.set_pipe_unlock_order)
1998 		return;
1999 
2000 	memset(dc->scratch.pipes_to_unlock_first, 0, sizeof(dc->scratch.pipes_to_unlock_first));
2001 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2002 		pipe = &context->res_ctx.pipe_ctx[i];
2003 		tg = pipe->stream_res.tg;
2004 
2005 		if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
2006 				!tg->funcs->is_tg_enabled(tg) ||
2007 				dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
2008 			continue;
2009 		}
2010 
2011 		if (resource_calculate_det_for_stream(context, pipe) <
2012 				resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i])) {
2013 			dc->scratch.pipes_to_unlock_first[i] = true;
2014 		}
2015 	}
2016 }
2017 
2018 /**
2019  * dc_commit_state_no_check - Apply context to the hardware
2020  *
2021  * @dc: DC object with the current status to be updated
2022  * @context: New state that will become the current status at the end of this function
2023  *
2024  * Applies given context to the hardware and copy it into current context.
2025  * It's up to the user to release the src context afterwards.
2026  *
2027  * Return: an enum dc_status result code for the operation
2028  */
dc_commit_state_no_check(struct dc * dc,struct dc_state * context)2029 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
2030 {
2031 	struct dc_bios *dcb = dc->ctx->dc_bios;
2032 	enum dc_status result = DC_ERROR_UNEXPECTED;
2033 	struct pipe_ctx *pipe;
2034 	int i, k, l;
2035 	struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
2036 	struct dc_state *old_state;
2037 	bool subvp_prev_use = false;
2038 
2039 	dc_z10_restore(dc);
2040 	dc_allow_idle_optimizations(dc, false);
2041 
2042 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2043 		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2044 
2045 		/* Check old context for SubVP */
2046 		subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
2047 		if (subvp_prev_use)
2048 			break;
2049 	}
2050 
2051 	for (i = 0; i < context->stream_count; i++)
2052 		dc_streams[i] =  context->streams[i];
2053 
2054 	if (!dcb->funcs->is_accelerated_mode(dcb)) {
2055 		disable_vbios_mode_if_required(dc, context);
2056 		dc->hwss.enable_accelerated_mode(dc, context);
2057 	}
2058 
2059 	if (context->stream_count > get_seamless_boot_stream_count(context) ||
2060 		context->stream_count == 0)
2061 		dc->hwss.prepare_bandwidth(dc, context);
2062 
2063 	/* When SubVP is active, all HW programming must be done while
2064 	 * SubVP lock is acquired
2065 	 */
2066 	if (dc->hwss.subvp_pipe_control_lock)
2067 		dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
2068 	if (dc->hwss.fams2_global_control_lock)
2069 		dc->hwss.fams2_global_control_lock(dc, context, true);
2070 
2071 	if (dc->hwss.update_dsc_pg)
2072 		dc->hwss.update_dsc_pg(dc, context, false);
2073 
2074 	disable_dangling_plane(dc, context);
2075 	/* re-program planes for existing stream, in case we need to
2076 	 * free up plane resource for later use
2077 	 */
2078 	if (dc->hwss.apply_ctx_for_surface) {
2079 		for (i = 0; i < context->stream_count; i++) {
2080 			if (context->streams[i]->mode_changed)
2081 				continue;
2082 			apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
2083 			dc->hwss.apply_ctx_for_surface(
2084 				dc, context->streams[i],
2085 				context->stream_status[i].plane_count,
2086 				context); /* use new pipe config in new context */
2087 			apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
2088 			dc->hwss.post_unlock_program_front_end(dc, context);
2089 		}
2090 	}
2091 
2092 	/* Program hardware */
2093 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2094 		pipe = &context->res_ctx.pipe_ctx[i];
2095 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
2096 	}
2097 
2098 	result = dc->hwss.apply_ctx_to_hw(dc, context);
2099 
2100 	if (result != DC_OK) {
2101 		/* Application of dc_state to hardware stopped. */
2102 		dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
2103 		return result;
2104 	}
2105 
2106 	dc_trigger_sync(dc, context);
2107 
2108 	/* Full update should unconditionally be triggered when dc_commit_state_no_check is called */
2109 	for (i = 0; i < context->stream_count; i++) {
2110 		uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed;
2111 
2112 		context->streams[i]->update_flags.raw = 0xFFFFFFFF;
2113 		context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed;
2114 	}
2115 
2116 	determine_pipe_unlock_order(dc, context);
2117 	/* Program all planes within new context*/
2118 	if (dc->res_pool->funcs->prepare_mcache_programming)
2119 		dc->res_pool->funcs->prepare_mcache_programming(dc, context);
2120 	if (dc->hwss.program_front_end_for_ctx) {
2121 		dc->hwss.interdependent_update_lock(dc, context, true);
2122 		dc->hwss.program_front_end_for_ctx(dc, context);
2123 		dc->hwss.interdependent_update_lock(dc, context, false);
2124 		dc->hwss.post_unlock_program_front_end(dc, context);
2125 	}
2126 
2127 	if (dc->hwss.commit_subvp_config)
2128 		dc->hwss.commit_subvp_config(dc, context);
2129 	if (dc->hwss.subvp_pipe_control_lock)
2130 		dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
2131 	if (dc->hwss.fams2_global_control_lock)
2132 		dc->hwss.fams2_global_control_lock(dc, context, false);
2133 
2134 	for (i = 0; i < context->stream_count; i++) {
2135 		const struct dc_link *link = context->streams[i]->link;
2136 
2137 		if (!context->streams[i]->mode_changed)
2138 			continue;
2139 
2140 		if (dc->hwss.apply_ctx_for_surface) {
2141 			apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
2142 			dc->hwss.apply_ctx_for_surface(
2143 					dc, context->streams[i],
2144 					context->stream_status[i].plane_count,
2145 					context);
2146 			apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
2147 			dc->hwss.post_unlock_program_front_end(dc, context);
2148 		}
2149 
2150 		/*
2151 		 * enable stereo
2152 		 * TODO rework dc_enable_stereo call to work with validation sets?
2153 		 */
2154 		for (k = 0; k < MAX_PIPES; k++) {
2155 			pipe = &context->res_ctx.pipe_ctx[k];
2156 
2157 			for (l = 0 ; pipe && l < context->stream_count; l++)  {
2158 				if (context->streams[l] &&
2159 					context->streams[l] == pipe->stream &&
2160 					dc->hwss.setup_stereo)
2161 					dc->hwss.setup_stereo(pipe, dc);
2162 			}
2163 		}
2164 
2165 		CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
2166 				context->streams[i]->timing.h_addressable,
2167 				context->streams[i]->timing.v_addressable,
2168 				context->streams[i]->timing.h_total,
2169 				context->streams[i]->timing.v_total,
2170 				context->streams[i]->timing.pix_clk_100hz / 10);
2171 	}
2172 
2173 	dc_enable_stereo(dc, context, dc_streams, context->stream_count);
2174 
2175 	if (get_seamless_boot_stream_count(context) == 0 ||
2176 		context->stream_count == 0) {
2177 		/* Must wait for no flips to be pending before doing optimize bw */
2178 		hwss_wait_for_no_pipes_pending(dc, context);
2179 		/*
2180 		 * optimized dispclk depends on ODM setup. Need to wait for ODM
2181 		 * update pending complete before optimizing bandwidth.
2182 		 */
2183 		hwss_wait_for_odm_update_pending_complete(dc, context);
2184 		/* pplib is notified if disp_num changed */
2185 		dc->hwss.optimize_bandwidth(dc, context);
2186 		/* Need to do otg sync again as otg could be out of sync due to otg
2187 		 * workaround applied during clock update
2188 		 */
2189 		dc_trigger_sync(dc, context);
2190 	}
2191 
2192 	if (dc->hwss.update_dsc_pg)
2193 		dc->hwss.update_dsc_pg(dc, context, true);
2194 
2195 	if (dc->ctx->dce_version >= DCE_VERSION_MAX)
2196 		TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2197 	else
2198 		TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2199 
2200 	context->stream_mask = get_stream_mask(dc, context);
2201 
2202 	if (context->stream_mask != dc->current_state->stream_mask)
2203 		dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
2204 
2205 	for (i = 0; i < context->stream_count; i++)
2206 		context->streams[i]->mode_changed = false;
2207 
2208 	/* Clear update flags that were set earlier to avoid redundant programming */
2209 	for (i = 0; i < context->stream_count; i++) {
2210 		context->streams[i]->update_flags.raw = 0x0;
2211 	}
2212 
2213 	old_state = dc->current_state;
2214 	dc->current_state = context;
2215 
2216 	dc_state_release(old_state);
2217 
2218 	dc_state_retain(dc->current_state);
2219 
2220 	return result;
2221 }
2222 
2223 static bool commit_minimal_transition_state(struct dc *dc,
2224 		struct dc_state *transition_base_context);
2225 
2226 /**
2227  * dc_commit_streams - Commit current stream state
2228  *
2229  * @dc: DC object with the commit state to be configured in the hardware
2230  * @params: Parameters for the commit, including the streams to be committed
2231  *
2232  * Function responsible for commit streams change to the hardware.
2233  *
2234  * Return:
2235  * Return DC_OK if everything work as expected, otherwise, return a dc_status
2236  * code.
2237  */
dc_commit_streams(struct dc * dc,struct dc_commit_streams_params * params)2238 enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params)
2239 {
2240 	int i, j;
2241 	struct dc_state *context;
2242 	enum dc_status res = DC_OK;
2243 	struct dc_validation_set set[MAX_STREAMS] = {0};
2244 	struct pipe_ctx *pipe;
2245 	bool handle_exit_odm2to1 = false;
2246 
2247 	if (!params)
2248 		return DC_ERROR_UNEXPECTED;
2249 
2250 	if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
2251 		return res;
2252 
2253 	if (!streams_changed(dc, params->streams, params->stream_count) &&
2254 			dc->current_state->power_source == params->power_source)
2255 		return res;
2256 
2257 	dc_exit_ips_for_hw_access(dc);
2258 
2259 	DC_LOG_DC("%s: %d streams\n", __func__, params->stream_count);
2260 
2261 	for (i = 0; i < params->stream_count; i++) {
2262 		struct dc_stream_state *stream = params->streams[i];
2263 		struct dc_stream_status *status = dc_stream_get_status(stream);
2264 
2265 		/* revalidate streams */
2266 		res = dc_validate_stream(dc, stream);
2267 		if (res != DC_OK)
2268 			return res;
2269 
2270 		dc_stream_log(dc, stream);
2271 
2272 		set[i].stream = stream;
2273 
2274 		if (status) {
2275 			set[i].plane_count = status->plane_count;
2276 			for (j = 0; j < status->plane_count; j++)
2277 				set[i].plane_states[j] = status->plane_states[j];
2278 		}
2279 	}
2280 
2281 	/* ODM Combine 2:1 power optimization is only applied for single stream
2282 	 * scenario, it uses extra pipes than needed to reduce power consumption
2283 	 * We need to switch off this feature to make room for new streams.
2284 	 */
2285 	if (params->stream_count > dc->current_state->stream_count &&
2286 			dc->current_state->stream_count == 1) {
2287 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2288 			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2289 			if (pipe->next_odm_pipe)
2290 				handle_exit_odm2to1 = true;
2291 		}
2292 	}
2293 
2294 	if (handle_exit_odm2to1)
2295 		res = commit_minimal_transition_state(dc, dc->current_state);
2296 
2297 	context = dc_state_create_current_copy(dc);
2298 	if (!context)
2299 		goto context_alloc_fail;
2300 
2301 	context->power_source = params->power_source;
2302 
2303 	res = dc_validate_with_context(dc, set, params->stream_count, context, false);
2304 
2305 	/*
2306 	 * Only update link encoder to stream assignment after bandwidth validation passed.
2307 	 */
2308 	if (res == DC_OK && dc->res_pool->funcs->link_encs_assign && !dc->config.unify_link_enc_assignment)
2309 		dc->res_pool->funcs->link_encs_assign(
2310 			dc, context, context->streams, context->stream_count);
2311 
2312 	if (res != DC_OK) {
2313 		BREAK_TO_DEBUGGER();
2314 		goto fail;
2315 	}
2316 
2317 	res = dc_commit_state_no_check(dc, context);
2318 
2319 	for (i = 0; i < params->stream_count; i++) {
2320 		for (j = 0; j < context->stream_count; j++) {
2321 			if (params->streams[i]->stream_id == context->streams[j]->stream_id)
2322 				params->streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
2323 
2324 			if (dc_is_embedded_signal(params->streams[i]->signal)) {
2325 				struct dc_stream_status *status = dc_state_get_stream_status(context, params->streams[i]);
2326 
2327 				if (!status)
2328 					continue;
2329 
2330 				if (dc->hwss.is_abm_supported)
2331 					status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]);
2332 				else
2333 					status->is_abm_supported = true;
2334 			}
2335 		}
2336 	}
2337 
2338 fail:
2339 	dc_state_release(context);
2340 
2341 context_alloc_fail:
2342 
2343 	DC_LOG_DC("%s Finished.\n", __func__);
2344 
2345 	return res;
2346 }
2347 
dc_acquire_release_mpc_3dlut(struct dc * dc,bool acquire,struct dc_stream_state * stream,struct dc_3dlut ** lut,struct dc_transfer_func ** shaper)2348 bool dc_acquire_release_mpc_3dlut(
2349 		struct dc *dc, bool acquire,
2350 		struct dc_stream_state *stream,
2351 		struct dc_3dlut **lut,
2352 		struct dc_transfer_func **shaper)
2353 {
2354 	int pipe_idx;
2355 	bool ret = false;
2356 	bool found_pipe_idx = false;
2357 	const struct resource_pool *pool = dc->res_pool;
2358 	struct resource_context *res_ctx = &dc->current_state->res_ctx;
2359 	int mpcc_id = 0;
2360 
2361 	if (pool && res_ctx) {
2362 		if (acquire) {
2363 			/*find pipe idx for the given stream*/
2364 			for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
2365 				if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
2366 					found_pipe_idx = true;
2367 					mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
2368 					break;
2369 				}
2370 			}
2371 		} else
2372 			found_pipe_idx = true;/*for release pipe_idx is not required*/
2373 
2374 		if (found_pipe_idx) {
2375 			if (acquire && pool->funcs->acquire_post_bldn_3dlut)
2376 				ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
2377 			else if (!acquire && pool->funcs->release_post_bldn_3dlut)
2378 				ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
2379 		}
2380 	}
2381 	return ret;
2382 }
2383 
is_flip_pending_in_pipes(struct dc * dc,struct dc_state * context)2384 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
2385 {
2386 	int i;
2387 	struct pipe_ctx *pipe;
2388 
2389 	for (i = 0; i < MAX_PIPES; i++) {
2390 		pipe = &context->res_ctx.pipe_ctx[i];
2391 
2392 		// Don't check flip pending on phantom pipes
2393 		if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM))
2394 			continue;
2395 
2396 		/* Must set to false to start with, due to OR in update function */
2397 		pipe->plane_state->status.is_flip_pending = false;
2398 		dc->hwss.update_pending_status(pipe);
2399 		if (pipe->plane_state->status.is_flip_pending)
2400 			return true;
2401 	}
2402 	return false;
2403 }
2404 
2405 /* Perform updates here which need to be deferred until next vupdate
2406  *
2407  * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
2408  * but forcing lut memory to shutdown state is immediate. This causes
2409  * single frame corruption as lut gets disabled mid-frame unless shutdown
2410  * is deferred until after entering bypass.
2411  */
process_deferred_updates(struct dc * dc)2412 static void process_deferred_updates(struct dc *dc)
2413 {
2414 	int i = 0;
2415 
2416 	if (dc->debug.enable_mem_low_power.bits.cm) {
2417 		ASSERT(dc->dcn_ip->max_num_dpp);
2418 		for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
2419 			if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
2420 				dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
2421 	}
2422 }
2423 
dc_post_update_surfaces_to_stream(struct dc * dc)2424 void dc_post_update_surfaces_to_stream(struct dc *dc)
2425 {
2426 	int i;
2427 	struct dc_state *context = dc->current_state;
2428 
2429 	if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
2430 		return;
2431 
2432 	post_surface_trace(dc);
2433 
2434 	/*
2435 	 * Only relevant for DCN behavior where we can guarantee the optimization
2436 	 * is safe to apply - retain the legacy behavior for DCE.
2437 	 */
2438 
2439 	if (dc->ctx->dce_version < DCE_VERSION_MAX)
2440 		TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2441 	else {
2442 		TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2443 
2444 		if (is_flip_pending_in_pipes(dc, context))
2445 			return;
2446 
2447 		for (i = 0; i < dc->res_pool->pipe_count; i++)
2448 			if (context->res_ctx.pipe_ctx[i].stream == NULL ||
2449 					context->res_ctx.pipe_ctx[i].plane_state == NULL) {
2450 				context->res_ctx.pipe_ctx[i].pipe_idx = i;
2451 				dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]);
2452 			}
2453 
2454 		process_deferred_updates(dc);
2455 
2456 		dc->hwss.optimize_bandwidth(dc, context);
2457 
2458 		if (dc->hwss.update_dsc_pg)
2459 			dc->hwss.update_dsc_pg(dc, context, true);
2460 	}
2461 
2462 	dc->optimized_required = false;
2463 	dc->wm_optimized_required = false;
2464 }
2465 
dc_set_generic_gpio_for_stereo(bool enable,struct gpio_service * gpio_service)2466 bool dc_set_generic_gpio_for_stereo(bool enable,
2467 		struct gpio_service *gpio_service)
2468 {
2469 	enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2470 	struct gpio_pin_info pin_info;
2471 	struct gpio *generic;
2472 	struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2473 			   GFP_KERNEL);
2474 
2475 	if (!config)
2476 		return false;
2477 	pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2478 
2479 	if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2480 		kfree(config);
2481 		return false;
2482 	} else {
2483 		generic = dal_gpio_service_create_generic_mux(
2484 			gpio_service,
2485 			pin_info.offset,
2486 			pin_info.mask);
2487 	}
2488 
2489 	if (!generic) {
2490 		kfree(config);
2491 		return false;
2492 	}
2493 
2494 	gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2495 
2496 	config->enable_output_from_mux = enable;
2497 	config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2498 
2499 	if (gpio_result == GPIO_RESULT_OK)
2500 		gpio_result = dal_mux_setup_config(generic, config);
2501 
2502 	if (gpio_result == GPIO_RESULT_OK) {
2503 		dal_gpio_close(generic);
2504 		dal_gpio_destroy_generic_mux(&generic);
2505 		kfree(config);
2506 		return true;
2507 	} else {
2508 		dal_gpio_close(generic);
2509 		dal_gpio_destroy_generic_mux(&generic);
2510 		kfree(config);
2511 		return false;
2512 	}
2513 }
2514 
is_surface_in_context(const struct dc_state * context,const struct dc_plane_state * plane_state)2515 static bool is_surface_in_context(
2516 		const struct dc_state *context,
2517 		const struct dc_plane_state *plane_state)
2518 {
2519 	int j;
2520 
2521 	for (j = 0; j < MAX_PIPES; j++) {
2522 		const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2523 
2524 		if (plane_state == pipe_ctx->plane_state) {
2525 			return true;
2526 		}
2527 	}
2528 
2529 	return false;
2530 }
2531 
get_plane_info_update_type(const struct dc * dc,const struct dc_surface_update * u)2532 static enum surface_update_type get_plane_info_update_type(const struct dc *dc, const struct dc_surface_update *u)
2533 {
2534 	union surface_update_flags *update_flags = &u->surface->update_flags;
2535 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
2536 
2537 	if (!u->plane_info)
2538 		return UPDATE_TYPE_FAST;
2539 
2540 	if (u->plane_info->color_space != u->surface->color_space) {
2541 		update_flags->bits.color_space_change = 1;
2542 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2543 	}
2544 
2545 	if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2546 		update_flags->bits.horizontal_mirror_change = 1;
2547 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2548 	}
2549 
2550 	if (u->plane_info->rotation != u->surface->rotation) {
2551 		update_flags->bits.rotation_change = 1;
2552 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2553 	}
2554 
2555 	if (u->plane_info->format != u->surface->format) {
2556 		update_flags->bits.pixel_format_change = 1;
2557 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2558 	}
2559 
2560 	if (u->plane_info->stereo_format != u->surface->stereo_format) {
2561 		update_flags->bits.stereo_format_change = 1;
2562 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2563 	}
2564 
2565 	if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2566 		update_flags->bits.per_pixel_alpha_change = 1;
2567 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2568 	}
2569 
2570 	if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2571 		update_flags->bits.global_alpha_change = 1;
2572 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2573 	}
2574 
2575 	if (u->plane_info->dcc.enable != u->surface->dcc.enable
2576 			|| u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2577 			|| u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2578 		/* During DCC on/off, stutter period is calculated before
2579 		 * DCC has fully transitioned. This results in incorrect
2580 		 * stutter period calculation. Triggering a full update will
2581 		 * recalculate stutter period.
2582 		 */
2583 		update_flags->bits.dcc_change = 1;
2584 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2585 	}
2586 
2587 	if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2588 			resource_pixel_format_to_bpp(u->surface->format)) {
2589 		/* different bytes per element will require full bandwidth
2590 		 * and DML calculation
2591 		 */
2592 		update_flags->bits.bpp_change = 1;
2593 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2594 	}
2595 
2596 	if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2597 			|| u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2598 		update_flags->bits.plane_size_change = 1;
2599 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2600 	}
2601 
2602 
2603 	if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2604 			sizeof(struct dc_tiling_info)) != 0) {
2605 		update_flags->bits.swizzle_change = 1;
2606 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2607 
2608 		/* todo: below are HW dependent, we should add a hook to
2609 		 * DCE/N resource and validated there.
2610 		 */
2611 		if (!dc->debug.skip_full_updated_if_possible) {
2612 			/* swizzled mode requires RQ to be setup properly,
2613 			 * thus need to run DML to calculate RQ settings
2614 			 */
2615 			update_flags->bits.bandwidth_change = 1;
2616 			elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2617 		}
2618 	}
2619 
2620 	/* This should be UPDATE_TYPE_FAST if nothing has changed. */
2621 	return update_type;
2622 }
2623 
get_scaling_info_update_type(const struct dc * dc,const struct dc_surface_update * u)2624 static enum surface_update_type get_scaling_info_update_type(
2625 		const struct dc *dc,
2626 		const struct dc_surface_update *u)
2627 {
2628 	union surface_update_flags *update_flags = &u->surface->update_flags;
2629 
2630 	if (!u->scaling_info)
2631 		return UPDATE_TYPE_FAST;
2632 
2633 	if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2634 			|| u->scaling_info->src_rect.height != u->surface->src_rect.height
2635 			|| u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2636 			|| u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2637 			|| u->scaling_info->clip_rect.width != u->surface->clip_rect.width
2638 			|| u->scaling_info->clip_rect.height != u->surface->clip_rect.height
2639 			|| u->scaling_info->scaling_quality.integer_scaling !=
2640 					u->surface->scaling_quality.integer_scaling) {
2641 		update_flags->bits.scaling_change = 1;
2642 
2643 		if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2644 				|| u->scaling_info->src_rect.height > u->surface->src_rect.height)
2645 			/* Making src rect bigger requires a bandwidth change */
2646 			update_flags->bits.clock_change = 1;
2647 
2648 		if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2649 			|| u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2650 				&& (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2651 					|| u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2652 			/* Making dst rect smaller requires a bandwidth change */
2653 			update_flags->bits.bandwidth_change = 1;
2654 
2655 		if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
2656 			(u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
2657 			 u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
2658 			 /* Changing clip size of a large surface may result in MPC slice count change */
2659 			update_flags->bits.bandwidth_change = 1;
2660 	}
2661 
2662 	if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2663 			|| u->scaling_info->src_rect.y != u->surface->src_rect.y
2664 			|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2665 			|| u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2666 			|| u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2667 			|| u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2668 		update_flags->bits.position_change = 1;
2669 
2670 	/* process every update flag before returning */
2671 	if (update_flags->bits.clock_change
2672 			|| update_flags->bits.bandwidth_change
2673 			|| update_flags->bits.scaling_change)
2674 		return UPDATE_TYPE_FULL;
2675 
2676 	if (update_flags->bits.position_change)
2677 		return UPDATE_TYPE_MED;
2678 
2679 	return UPDATE_TYPE_FAST;
2680 }
2681 
det_surface_update(const struct dc * dc,const struct dc_surface_update * u)2682 static enum surface_update_type det_surface_update(const struct dc *dc,
2683 		const struct dc_surface_update *u)
2684 {
2685 	const struct dc_state *context = dc->current_state;
2686 	enum surface_update_type type;
2687 	enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2688 	union surface_update_flags *update_flags = &u->surface->update_flags;
2689 
2690 	if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2691 		update_flags->raw = 0xFFFFFFFF;
2692 		return UPDATE_TYPE_FULL;
2693 	}
2694 
2695 	update_flags->raw = 0; // Reset all flags
2696 
2697 	type = get_plane_info_update_type(dc, u);
2698 	elevate_update_type(&overall_type, type);
2699 
2700 	type = get_scaling_info_update_type(dc, u);
2701 	elevate_update_type(&overall_type, type);
2702 
2703 	if (u->flip_addr) {
2704 		update_flags->bits.addr_update = 1;
2705 		if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
2706 			update_flags->bits.tmz_changed = 1;
2707 			elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
2708 		}
2709 	}
2710 	if (u->in_transfer_func)
2711 		update_flags->bits.in_transfer_func_change = 1;
2712 
2713 	if (u->input_csc_color_matrix)
2714 		update_flags->bits.input_csc_change = 1;
2715 
2716 	if (u->coeff_reduction_factor)
2717 		update_flags->bits.coeff_reduction_change = 1;
2718 
2719 	if (u->gamut_remap_matrix)
2720 		update_flags->bits.gamut_remap_change = 1;
2721 
2722 	if (u->blend_tf)
2723 		update_flags->bits.gamma_change = 1;
2724 
2725 	if (u->gamma) {
2726 		enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2727 
2728 		if (u->plane_info)
2729 			format = u->plane_info->format;
2730 		else
2731 			format = u->surface->format;
2732 
2733 		if (dce_use_lut(format))
2734 			update_flags->bits.gamma_change = 1;
2735 	}
2736 
2737 	if (u->lut3d_func || u->func_shaper)
2738 		update_flags->bits.lut_3d = 1;
2739 
2740 	if (u->hdr_mult.value)
2741 		if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2742 			update_flags->bits.hdr_mult = 1;
2743 			elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2744 		}
2745 
2746 	if (u->sdr_white_level_nits)
2747 		if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) {
2748 			update_flags->bits.sdr_white_level_nits = 1;
2749 			elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
2750 		}
2751 
2752 	if (u->cm2_params) {
2753 		if ((u->cm2_params->component_settings.shaper_3dlut_setting
2754 					!= u->surface->mcm_shaper_3dlut_setting)
2755 				|| (u->cm2_params->component_settings.lut1d_enable
2756 					!= u->surface->mcm_lut1d_enable))
2757 			update_flags->bits.mcm_transfer_function_enable_change = 1;
2758 		if (u->cm2_params->cm2_luts.lut3d_data.lut3d_src
2759 				!= u->surface->mcm_luts.lut3d_data.lut3d_src)
2760 			update_flags->bits.mcm_transfer_function_enable_change = 1;
2761 	}
2762 	if (update_flags->bits.in_transfer_func_change) {
2763 		type = UPDATE_TYPE_MED;
2764 		elevate_update_type(&overall_type, type);
2765 	}
2766 
2767 	if (update_flags->bits.lut_3d &&
2768 			u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
2769 		type = UPDATE_TYPE_FULL;
2770 		elevate_update_type(&overall_type, type);
2771 	}
2772 	if (update_flags->bits.mcm_transfer_function_enable_change) {
2773 		type = UPDATE_TYPE_FULL;
2774 		elevate_update_type(&overall_type, type);
2775 	}
2776 
2777 	if (dc->debug.enable_legacy_fast_update &&
2778 			(update_flags->bits.gamma_change ||
2779 			update_flags->bits.gamut_remap_change ||
2780 			update_flags->bits.input_csc_change ||
2781 			update_flags->bits.coeff_reduction_change)) {
2782 		type = UPDATE_TYPE_FULL;
2783 		elevate_update_type(&overall_type, type);
2784 	}
2785 	return overall_type;
2786 }
2787 
2788 /* May need to flip the desktop plane in cases where MPO plane receives a flip but desktop plane doesn't
2789  * while both planes are flip_immediate
2790  */
force_immediate_gsl_plane_flip(struct dc * dc,struct dc_surface_update * updates,int surface_count)2791 static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_update *updates, int surface_count)
2792 {
2793 	bool has_flip_immediate_plane = false;
2794 	int i;
2795 
2796 	for (i = 0; i < surface_count; i++) {
2797 		if (updates[i].surface->flip_immediate) {
2798 			has_flip_immediate_plane = true;
2799 			break;
2800 		}
2801 	}
2802 
2803 	if (has_flip_immediate_plane && surface_count > 1) {
2804 		for (i = 0; i < surface_count; i++) {
2805 			if (updates[i].surface->flip_immediate)
2806 				updates[i].surface->update_flags.bits.addr_update = 1;
2807 		}
2808 	}
2809 }
2810 
check_update_surfaces_for_stream(struct dc * dc,struct dc_surface_update * updates,int surface_count,struct dc_stream_update * stream_update,const struct dc_stream_status * stream_status)2811 static enum surface_update_type check_update_surfaces_for_stream(
2812 		struct dc *dc,
2813 		struct dc_surface_update *updates,
2814 		int surface_count,
2815 		struct dc_stream_update *stream_update,
2816 		const struct dc_stream_status *stream_status)
2817 {
2818 	int i;
2819 	enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2820 
2821 	if (dc->idle_optimizations_allowed)
2822 		overall_type = UPDATE_TYPE_FULL;
2823 
2824 	if (stream_status == NULL || stream_status->plane_count != surface_count)
2825 		overall_type = UPDATE_TYPE_FULL;
2826 
2827 	if (stream_update && stream_update->pending_test_pattern) {
2828 		overall_type = UPDATE_TYPE_FULL;
2829 	}
2830 
2831 	if (stream_update && stream_update->hw_cursor_req) {
2832 		overall_type = UPDATE_TYPE_FULL;
2833 	}
2834 
2835 	/* some stream updates require passive update */
2836 	if (stream_update) {
2837 		union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2838 
2839 		if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2840 			(stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2841 			stream_update->integer_scaling_update)
2842 			su_flags->bits.scaling = 1;
2843 
2844 		if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2845 			su_flags->bits.out_tf = 1;
2846 
2847 		if (stream_update->abm_level)
2848 			su_flags->bits.abm_level = 1;
2849 
2850 		if (stream_update->dpms_off)
2851 			su_flags->bits.dpms_off = 1;
2852 
2853 		if (stream_update->gamut_remap)
2854 			su_flags->bits.gamut_remap = 1;
2855 
2856 		if (stream_update->wb_update)
2857 			su_flags->bits.wb_update = 1;
2858 
2859 		if (stream_update->dsc_config)
2860 			su_flags->bits.dsc_changed = 1;
2861 
2862 		if (stream_update->mst_bw_update)
2863 			su_flags->bits.mst_bw = 1;
2864 
2865 		if (stream_update->stream->freesync_on_desktop &&
2866 			(stream_update->vrr_infopacket || stream_update->allow_freesync ||
2867 				stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
2868 			su_flags->bits.fams_changed = 1;
2869 
2870 		if (stream_update->scaler_sharpener_update)
2871 			su_flags->bits.scaler_sharpener = 1;
2872 
2873 		if (stream_update->sharpening_required)
2874 			su_flags->bits.sharpening_required = 1;
2875 
2876 		if (stream_update->output_color_space)
2877 			su_flags->bits.out_csc = 1;
2878 
2879 		if (su_flags->raw != 0)
2880 			overall_type = UPDATE_TYPE_FULL;
2881 
2882 		if (stream_update->output_csc_transform)
2883 			su_flags->bits.out_csc = 1;
2884 
2885 		/* Output transfer function changes do not require bandwidth recalculation,
2886 		 * so don't trigger a full update
2887 		 */
2888 		if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2889 			su_flags->bits.out_tf = 1;
2890 	}
2891 
2892 	for (i = 0 ; i < surface_count; i++) {
2893 		enum surface_update_type type =
2894 				det_surface_update(dc, &updates[i]);
2895 
2896 		elevate_update_type(&overall_type, type);
2897 	}
2898 
2899 	return overall_type;
2900 }
2901 
2902 /*
2903  * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2904  *
2905  * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2906  */
dc_check_update_surfaces_for_stream(struct dc * dc,struct dc_surface_update * updates,int surface_count,struct dc_stream_update * stream_update,const struct dc_stream_status * stream_status)2907 enum surface_update_type dc_check_update_surfaces_for_stream(
2908 		struct dc *dc,
2909 		struct dc_surface_update *updates,
2910 		int surface_count,
2911 		struct dc_stream_update *stream_update,
2912 		const struct dc_stream_status *stream_status)
2913 {
2914 	int i;
2915 	enum surface_update_type type;
2916 
2917 	if (stream_update)
2918 		stream_update->stream->update_flags.raw = 0;
2919 	for (i = 0; i < surface_count; i++)
2920 		updates[i].surface->update_flags.raw = 0;
2921 
2922 	type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2923 	if (type == UPDATE_TYPE_FULL) {
2924 		if (stream_update) {
2925 			uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2926 			stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2927 			stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2928 		}
2929 		for (i = 0; i < surface_count; i++)
2930 			updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2931 	}
2932 
2933 	if (type == UPDATE_TYPE_FAST) {
2934 		// If there's an available clock comparator, we use that.
2935 		if (dc->clk_mgr->funcs->are_clock_states_equal) {
2936 			if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2937 				dc->optimized_required = true;
2938 		// Else we fallback to mem compare.
2939 		} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2940 			dc->optimized_required = true;
2941 		}
2942 
2943 		dc->optimized_required |= dc->wm_optimized_required;
2944 	}
2945 
2946 	return type;
2947 }
2948 
stream_get_status(struct dc_state * ctx,struct dc_stream_state * stream)2949 static struct dc_stream_status *stream_get_status(
2950 	struct dc_state *ctx,
2951 	struct dc_stream_state *stream)
2952 {
2953 	uint8_t i;
2954 
2955 	for (i = 0; i < ctx->stream_count; i++) {
2956 		if (stream == ctx->streams[i]) {
2957 			return &ctx->stream_status[i];
2958 		}
2959 	}
2960 
2961 	return NULL;
2962 }
2963 
2964 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2965 
copy_surface_update_to_plane(struct dc_plane_state * surface,struct dc_surface_update * srf_update)2966 static void copy_surface_update_to_plane(
2967 		struct dc_plane_state *surface,
2968 		struct dc_surface_update *srf_update)
2969 {
2970 	if (srf_update->flip_addr) {
2971 		surface->address = srf_update->flip_addr->address;
2972 		surface->flip_immediate =
2973 			srf_update->flip_addr->flip_immediate;
2974 		surface->time.time_elapsed_in_us[surface->time.index] =
2975 			srf_update->flip_addr->flip_timestamp_in_us -
2976 				surface->time.prev_update_time_in_us;
2977 		surface->time.prev_update_time_in_us =
2978 			srf_update->flip_addr->flip_timestamp_in_us;
2979 		surface->time.index++;
2980 		if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2981 			surface->time.index = 0;
2982 
2983 		surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2984 	}
2985 
2986 	if (srf_update->scaling_info) {
2987 		surface->scaling_quality =
2988 				srf_update->scaling_info->scaling_quality;
2989 		surface->dst_rect =
2990 				srf_update->scaling_info->dst_rect;
2991 		surface->src_rect =
2992 				srf_update->scaling_info->src_rect;
2993 		surface->clip_rect =
2994 				srf_update->scaling_info->clip_rect;
2995 	}
2996 
2997 	if (srf_update->plane_info) {
2998 		surface->color_space =
2999 				srf_update->plane_info->color_space;
3000 		surface->format =
3001 				srf_update->plane_info->format;
3002 		surface->plane_size =
3003 				srf_update->plane_info->plane_size;
3004 		surface->rotation =
3005 				srf_update->plane_info->rotation;
3006 		surface->horizontal_mirror =
3007 				srf_update->plane_info->horizontal_mirror;
3008 		surface->stereo_format =
3009 				srf_update->plane_info->stereo_format;
3010 		surface->tiling_info =
3011 				srf_update->plane_info->tiling_info;
3012 		surface->visible =
3013 				srf_update->plane_info->visible;
3014 		surface->per_pixel_alpha =
3015 				srf_update->plane_info->per_pixel_alpha;
3016 		surface->global_alpha =
3017 				srf_update->plane_info->global_alpha;
3018 		surface->global_alpha_value =
3019 				srf_update->plane_info->global_alpha_value;
3020 		surface->dcc =
3021 				srf_update->plane_info->dcc;
3022 		surface->layer_index =
3023 				srf_update->plane_info->layer_index;
3024 	}
3025 
3026 	if (srf_update->gamma) {
3027 		memcpy(&surface->gamma_correction.entries,
3028 			&srf_update->gamma->entries,
3029 			sizeof(struct dc_gamma_entries));
3030 		surface->gamma_correction.is_identity =
3031 			srf_update->gamma->is_identity;
3032 		surface->gamma_correction.num_entries =
3033 			srf_update->gamma->num_entries;
3034 		surface->gamma_correction.type =
3035 			srf_update->gamma->type;
3036 	}
3037 
3038 	if (srf_update->in_transfer_func) {
3039 		surface->in_transfer_func.sdr_ref_white_level =
3040 			srf_update->in_transfer_func->sdr_ref_white_level;
3041 		surface->in_transfer_func.tf =
3042 			srf_update->in_transfer_func->tf;
3043 		surface->in_transfer_func.type =
3044 			srf_update->in_transfer_func->type;
3045 		memcpy(&surface->in_transfer_func.tf_pts,
3046 			&srf_update->in_transfer_func->tf_pts,
3047 			sizeof(struct dc_transfer_func_distributed_points));
3048 	}
3049 
3050 	if (srf_update->cm2_params) {
3051 		surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting;
3052 		surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable;
3053 		surface->mcm_luts = srf_update->cm2_params->cm2_luts;
3054 	}
3055 
3056 	if (srf_update->func_shaper) {
3057 		memcpy(&surface->in_shaper_func, srf_update->func_shaper,
3058 		sizeof(surface->in_shaper_func));
3059 
3060 		if (surface->mcm_shaper_3dlut_setting >= DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER)
3061 			surface->mcm_luts.shaper = &surface->in_shaper_func;
3062 	}
3063 
3064 	if (srf_update->lut3d_func)
3065 		memcpy(&surface->lut3d_func, srf_update->lut3d_func,
3066 		sizeof(surface->lut3d_func));
3067 
3068 	if (srf_update->hdr_mult.value)
3069 		surface->hdr_mult =
3070 				srf_update->hdr_mult;
3071 
3072 	if (srf_update->sdr_white_level_nits)
3073 		surface->sdr_white_level_nits =
3074 				srf_update->sdr_white_level_nits;
3075 
3076 	if (srf_update->blend_tf) {
3077 		memcpy(&surface->blend_tf, srf_update->blend_tf,
3078 		sizeof(surface->blend_tf));
3079 
3080 		if (surface->mcm_lut1d_enable)
3081 			surface->mcm_luts.lut1d_func = &surface->blend_tf;
3082 	}
3083 
3084 	if (srf_update->cm2_params || srf_update->blend_tf)
3085 		surface->lut_bank_a = !surface->lut_bank_a;
3086 
3087 	if (srf_update->input_csc_color_matrix)
3088 		surface->input_csc_color_matrix =
3089 			*srf_update->input_csc_color_matrix;
3090 
3091 	if (srf_update->coeff_reduction_factor)
3092 		surface->coeff_reduction_factor =
3093 			*srf_update->coeff_reduction_factor;
3094 
3095 	if (srf_update->gamut_remap_matrix)
3096 		surface->gamut_remap_matrix =
3097 			*srf_update->gamut_remap_matrix;
3098 
3099 	if (srf_update->cursor_csc_color_matrix)
3100 		surface->cursor_csc_color_matrix =
3101 			*srf_update->cursor_csc_color_matrix;
3102 
3103 	if (srf_update->bias_and_scale.bias_and_scale_valid)
3104 			surface->bias_and_scale =
3105 					srf_update->bias_and_scale;
3106 }
3107 
copy_stream_update_to_stream(struct dc * dc,struct dc_state * context,struct dc_stream_state * stream,struct dc_stream_update * update)3108 static void copy_stream_update_to_stream(struct dc *dc,
3109 					 struct dc_state *context,
3110 					 struct dc_stream_state *stream,
3111 					 struct dc_stream_update *update)
3112 {
3113 	struct dc_context *dc_ctx = dc->ctx;
3114 
3115 	if (update == NULL || stream == NULL)
3116 		return;
3117 
3118 	if (update->src.height && update->src.width)
3119 		stream->src = update->src;
3120 
3121 	if (update->dst.height && update->dst.width)
3122 		stream->dst = update->dst;
3123 
3124 	if (update->out_transfer_func) {
3125 		stream->out_transfer_func.sdr_ref_white_level =
3126 			update->out_transfer_func->sdr_ref_white_level;
3127 		stream->out_transfer_func.tf = update->out_transfer_func->tf;
3128 		stream->out_transfer_func.type =
3129 			update->out_transfer_func->type;
3130 		memcpy(&stream->out_transfer_func.tf_pts,
3131 		       &update->out_transfer_func->tf_pts,
3132 		       sizeof(struct dc_transfer_func_distributed_points));
3133 	}
3134 
3135 	if (update->hdr_static_metadata)
3136 		stream->hdr_static_metadata = *update->hdr_static_metadata;
3137 
3138 	if (update->abm_level)
3139 		stream->abm_level = *update->abm_level;
3140 
3141 	if (update->periodic_interrupt)
3142 		stream->periodic_interrupt = *update->periodic_interrupt;
3143 
3144 	if (update->gamut_remap)
3145 		stream->gamut_remap_matrix = *update->gamut_remap;
3146 
3147 	/* Note: this being updated after mode set is currently not a use case
3148 	 * however if it arises OCSC would need to be reprogrammed at the
3149 	 * minimum
3150 	 */
3151 	if (update->output_color_space)
3152 		stream->output_color_space = *update->output_color_space;
3153 
3154 	if (update->output_csc_transform)
3155 		stream->csc_color_matrix = *update->output_csc_transform;
3156 
3157 	if (update->vrr_infopacket)
3158 		stream->vrr_infopacket = *update->vrr_infopacket;
3159 
3160 	if (update->hw_cursor_req)
3161 		stream->hw_cursor_req = *update->hw_cursor_req;
3162 
3163 	if (update->allow_freesync)
3164 		stream->allow_freesync = *update->allow_freesync;
3165 
3166 	if (update->vrr_active_variable)
3167 		stream->vrr_active_variable = *update->vrr_active_variable;
3168 
3169 	if (update->vrr_active_fixed)
3170 		stream->vrr_active_fixed = *update->vrr_active_fixed;
3171 
3172 	if (update->crtc_timing_adjust) {
3173 		if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min ||
3174 			stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max ||
3175 			stream->adjust.timing_adjust_pending)
3176 			update->crtc_timing_adjust->timing_adjust_pending = true;
3177 		stream->adjust = *update->crtc_timing_adjust;
3178 		update->crtc_timing_adjust->timing_adjust_pending = false;
3179 	}
3180 
3181 	if (update->dpms_off)
3182 		stream->dpms_off = *update->dpms_off;
3183 
3184 	if (update->hfvsif_infopacket)
3185 		stream->hfvsif_infopacket = *update->hfvsif_infopacket;
3186 
3187 	if (update->vtem_infopacket)
3188 		stream->vtem_infopacket = *update->vtem_infopacket;
3189 
3190 	if (update->vsc_infopacket)
3191 		stream->vsc_infopacket = *update->vsc_infopacket;
3192 
3193 	if (update->vsp_infopacket)
3194 		stream->vsp_infopacket = *update->vsp_infopacket;
3195 
3196 	if (update->adaptive_sync_infopacket)
3197 		stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
3198 
3199 	if (update->dither_option)
3200 		stream->dither_option = *update->dither_option;
3201 
3202 	if (update->pending_test_pattern)
3203 		stream->test_pattern = *update->pending_test_pattern;
3204 	/* update current stream with writeback info */
3205 	if (update->wb_update) {
3206 		int i;
3207 
3208 		stream->num_wb_info = update->wb_update->num_wb_info;
3209 		ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
3210 		for (i = 0; i < stream->num_wb_info; i++)
3211 			stream->writeback_info[i] =
3212 				update->wb_update->writeback_info[i];
3213 	}
3214 	if (update->dsc_config) {
3215 		struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
3216 		uint32_t old_dsc_enabled = stream->timing.flags.DSC;
3217 		uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
3218 				       update->dsc_config->num_slices_v != 0);
3219 
3220 		/* Use temporarry context for validating new DSC config */
3221 		struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state);
3222 
3223 		if (dsc_validate_context) {
3224 			stream->timing.dsc_cfg = *update->dsc_config;
3225 			stream->timing.flags.DSC = enable_dsc;
3226 			if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
3227 				stream->timing.dsc_cfg = old_dsc_cfg;
3228 				stream->timing.flags.DSC = old_dsc_enabled;
3229 				update->dsc_config = NULL;
3230 			}
3231 
3232 			dc_state_release(dsc_validate_context);
3233 		} else {
3234 			DC_ERROR("Failed to allocate new validate context for DSC change\n");
3235 			update->dsc_config = NULL;
3236 		}
3237 	}
3238 	if (update->scaler_sharpener_update)
3239 		stream->scaler_sharpener_update = *update->scaler_sharpener_update;
3240 	if (update->sharpening_required)
3241 		stream->sharpening_required = *update->sharpening_required;
3242 }
3243 
backup_planes_and_stream_state(struct dc_scratch_space * scratch,struct dc_stream_state * stream)3244 static void backup_planes_and_stream_state(
3245 		struct dc_scratch_space *scratch,
3246 		struct dc_stream_state *stream)
3247 {
3248 	int i;
3249 	struct dc_stream_status *status = dc_stream_get_status(stream);
3250 
3251 	if (!status)
3252 		return;
3253 
3254 	for (i = 0; i < status->plane_count; i++) {
3255 		scratch->plane_states[i] = *status->plane_states[i];
3256 	}
3257 	scratch->stream_state = *stream;
3258 }
3259 
restore_planes_and_stream_state(struct dc_scratch_space * scratch,struct dc_stream_state * stream)3260 static void restore_planes_and_stream_state(
3261 		struct dc_scratch_space *scratch,
3262 		struct dc_stream_state *stream)
3263 {
3264 	int i;
3265 	struct dc_stream_status *status = dc_stream_get_status(stream);
3266 
3267 	if (!status)
3268 		return;
3269 
3270 	for (i = 0; i < status->plane_count; i++) {
3271 		/* refcount will always be valid, restore everything else */
3272 		struct kref refcount = status->plane_states[i]->refcount;
3273 		*status->plane_states[i] = scratch->plane_states[i];
3274 		status->plane_states[i]->refcount = refcount;
3275 	}
3276 	*stream = scratch->stream_state;
3277 }
3278 
3279 /**
3280  * update_seamless_boot_flags() - Helper function for updating seamless boot flags
3281  *
3282  * @dc: Current DC state
3283  * @context: New DC state to be programmed
3284  * @surface_count: Number of surfaces that have an updated
3285  * @stream: Corresponding stream to be updated in the current flip
3286  *
3287  * Updating seamless boot flags do not need to be part of the commit sequence. This
3288  * helper function will update the seamless boot flags on each flip (if required)
3289  * outside of the HW commit sequence (fast or slow).
3290  *
3291  * Return: void
3292  */
update_seamless_boot_flags(struct dc * dc,struct dc_state * context,int surface_count,struct dc_stream_state * stream)3293 static void update_seamless_boot_flags(struct dc *dc,
3294 		struct dc_state *context,
3295 		int surface_count,
3296 		struct dc_stream_state *stream)
3297 {
3298 	if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
3299 		/* Optimize seamless boot flag keeps clocks and watermarks high until
3300 		 * first flip. After first flip, optimization is required to lower
3301 		 * bandwidth. Important to note that it is expected UEFI will
3302 		 * only light up a single display on POST, therefore we only expect
3303 		 * one stream with seamless boot flag set.
3304 		 */
3305 		if (stream->apply_seamless_boot_optimization) {
3306 			stream->apply_seamless_boot_optimization = false;
3307 
3308 			if (get_seamless_boot_stream_count(context) == 0)
3309 				dc->optimized_required = true;
3310 		}
3311 	}
3312 }
3313 
3314 /**
3315  * update_planes_and_stream_state() - The function takes planes and stream
3316  * updates as inputs and determines the appropriate update type. If update type
3317  * is FULL, the function allocates a new context, populates and validates it.
3318  * Otherwise, it updates current dc context. The function will return both
3319  * new_context and new_update_type back to the caller. The function also backs
3320  * up both current and new contexts into corresponding dc state scratch memory.
3321  * TODO: The function does too many things, and even conditionally allocates dc
3322  * context memory implicitly. We should consider to break it down.
3323  *
3324  * @dc: Current DC state
3325  * @srf_updates: an array of surface updates
3326  * @surface_count: surface update count
3327  * @stream: Corresponding stream to be updated
3328  * @stream_update: stream update
3329  * @new_update_type: [out] determined update type by the function
3330  * @new_context: [out] new context allocated and validated if update type is
3331  * FULL, reference to current context if update type is less than FULL.
3332  *
3333  * Return: true if a valid update is populated into new_context, false
3334  * otherwise.
3335  */
update_planes_and_stream_state(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type * new_update_type,struct dc_state ** new_context)3336 static bool update_planes_and_stream_state(struct dc *dc,
3337 		struct dc_surface_update *srf_updates, int surface_count,
3338 		struct dc_stream_state *stream,
3339 		struct dc_stream_update *stream_update,
3340 		enum surface_update_type *new_update_type,
3341 		struct dc_state **new_context)
3342 {
3343 	struct dc_state *context;
3344 	int i, j;
3345 	enum surface_update_type update_type;
3346 	const struct dc_stream_status *stream_status;
3347 	struct dc_context *dc_ctx = dc->ctx;
3348 
3349 	stream_status = dc_stream_get_status(stream);
3350 
3351 	if (!stream_status) {
3352 		if (surface_count) /* Only an error condition if surf_count non-zero*/
3353 			ASSERT(false);
3354 
3355 		return false; /* Cannot commit surface to stream that is not committed */
3356 	}
3357 
3358 	context = dc->current_state;
3359 	update_type = dc_check_update_surfaces_for_stream(
3360 			dc, srf_updates, surface_count, stream_update, stream_status);
3361 	/* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
3362 	 * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
3363 	 * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
3364 	 */
3365 	force_immediate_gsl_plane_flip(dc, srf_updates, surface_count);
3366 	if (update_type == UPDATE_TYPE_FULL)
3367 		backup_planes_and_stream_state(&dc->scratch.current_state, stream);
3368 
3369 	/* update current stream with the new updates */
3370 	copy_stream_update_to_stream(dc, context, stream, stream_update);
3371 
3372 	/* do not perform surface update if surface has invalid dimensions
3373 	 * (all zero) and no scaling_info is provided
3374 	 */
3375 	if (surface_count > 0) {
3376 		for (i = 0; i < surface_count; i++) {
3377 			if ((srf_updates[i].surface->src_rect.width == 0 ||
3378 				 srf_updates[i].surface->src_rect.height == 0 ||
3379 				 srf_updates[i].surface->dst_rect.width == 0 ||
3380 				 srf_updates[i].surface->dst_rect.height == 0) &&
3381 				(!srf_updates[i].scaling_info ||
3382 				  srf_updates[i].scaling_info->src_rect.width == 0 ||
3383 				  srf_updates[i].scaling_info->src_rect.height == 0 ||
3384 				  srf_updates[i].scaling_info->dst_rect.width == 0 ||
3385 				  srf_updates[i].scaling_info->dst_rect.height == 0)) {
3386 				DC_ERROR("Invalid src/dst rects in surface update!\n");
3387 				return false;
3388 			}
3389 		}
3390 	}
3391 
3392 	if (update_type >= update_surface_trace_level)
3393 		update_surface_trace(dc, srf_updates, surface_count);
3394 
3395 	for (i = 0; i < surface_count; i++)
3396 		copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]);
3397 
3398 	if (update_type >= UPDATE_TYPE_FULL) {
3399 		struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
3400 
3401 		for (i = 0; i < surface_count; i++)
3402 			new_planes[i] = srf_updates[i].surface;
3403 
3404 		/* initialize scratch memory for building context */
3405 		context = dc_state_create_copy(dc->current_state);
3406 		if (context == NULL) {
3407 			DC_ERROR("Failed to allocate new validate context!\n");
3408 			return false;
3409 		}
3410 
3411 		/* For each full update, remove all existing phantom pipes first.
3412 		 * Ensures that we have enough pipes for newly added MPO planes
3413 		 */
3414 		dc_state_remove_phantom_streams_and_planes(dc, context);
3415 		dc_state_release_phantom_streams_and_planes(dc, context);
3416 
3417 		/*remove old surfaces from context */
3418 		if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) {
3419 
3420 			BREAK_TO_DEBUGGER();
3421 			goto fail;
3422 		}
3423 
3424 		/* add surface to context */
3425 		if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
3426 
3427 			BREAK_TO_DEBUGGER();
3428 			goto fail;
3429 		}
3430 	}
3431 
3432 	/* save update parameters into surface */
3433 	for (i = 0; i < surface_count; i++) {
3434 		struct dc_plane_state *surface = srf_updates[i].surface;
3435 
3436 		if (update_type != UPDATE_TYPE_MED)
3437 			continue;
3438 		if (surface->update_flags.bits.position_change) {
3439 			for (j = 0; j < dc->res_pool->pipe_count; j++) {
3440 				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3441 
3442 				if (pipe_ctx->plane_state != surface)
3443 					continue;
3444 
3445 				resource_build_scaling_params(pipe_ctx);
3446 			}
3447 		}
3448 	}
3449 
3450 	if (update_type == UPDATE_TYPE_FULL) {
3451 		if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3452 			BREAK_TO_DEBUGGER();
3453 			goto fail;
3454 		}
3455 	}
3456 	update_seamless_boot_flags(dc, context, surface_count, stream);
3457 
3458 	*new_context = context;
3459 	*new_update_type = update_type;
3460 	if (update_type == UPDATE_TYPE_FULL)
3461 		backup_planes_and_stream_state(&dc->scratch.new_state, stream);
3462 
3463 	return true;
3464 
3465 fail:
3466 	dc_state_release(context);
3467 
3468 	return false;
3469 
3470 }
3471 
commit_planes_do_stream_update(struct dc * dc,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)3472 static void commit_planes_do_stream_update(struct dc *dc,
3473 		struct dc_stream_state *stream,
3474 		struct dc_stream_update *stream_update,
3475 		enum surface_update_type update_type,
3476 		struct dc_state *context)
3477 {
3478 	int j;
3479 
3480 	// Stream updates
3481 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
3482 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3483 
3484 		if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) {
3485 
3486 			if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
3487 				dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
3488 
3489 			if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
3490 					stream_update->vrr_infopacket ||
3491 					stream_update->vsc_infopacket ||
3492 					stream_update->vsp_infopacket ||
3493 					stream_update->hfvsif_infopacket ||
3494 					stream_update->adaptive_sync_infopacket ||
3495 					stream_update->vtem_infopacket) {
3496 				resource_build_info_frame(pipe_ctx);
3497 				dc->hwss.update_info_frame(pipe_ctx);
3498 
3499 				if (dc_is_dp_signal(pipe_ctx->stream->signal))
3500 					dc->link_srv->dp_trace_source_sequence(
3501 							pipe_ctx->stream->link,
3502 							DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
3503 			}
3504 
3505 			if (stream_update->hdr_static_metadata &&
3506 					stream->use_dynamic_meta &&
3507 					dc->hwss.set_dmdata_attributes &&
3508 					pipe_ctx->stream->dmdata_address.quad_part != 0)
3509 				dc->hwss.set_dmdata_attributes(pipe_ctx);
3510 
3511 			if (stream_update->gamut_remap)
3512 				dc_stream_set_gamut_remap(dc, stream);
3513 
3514 			if (stream_update->output_csc_transform)
3515 				dc_stream_program_csc_matrix(dc, stream);
3516 
3517 			if (stream_update->dither_option) {
3518 				struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
3519 				resource_build_bit_depth_reduction_params(pipe_ctx->stream,
3520 									&pipe_ctx->stream->bit_depth_params);
3521 				pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
3522 						&stream->bit_depth_params,
3523 						&stream->clamping);
3524 				while (odm_pipe) {
3525 					odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
3526 							&stream->bit_depth_params,
3527 							&stream->clamping);
3528 					odm_pipe = odm_pipe->next_odm_pipe;
3529 				}
3530 			}
3531 
3532 			if (stream_update->cursor_attributes)
3533 				program_cursor_attributes(dc, stream);
3534 
3535 			if (stream_update->cursor_position)
3536 				program_cursor_position(dc, stream);
3537 
3538 			/* Full fe update*/
3539 			if (update_type == UPDATE_TYPE_FAST)
3540 				continue;
3541 
3542 			if (stream_update->dsc_config)
3543 				dc->link_srv->update_dsc_config(pipe_ctx);
3544 
3545 			if (stream_update->mst_bw_update) {
3546 				if (stream_update->mst_bw_update->is_increase)
3547 					dc->link_srv->increase_mst_payload(pipe_ctx,
3548 							stream_update->mst_bw_update->mst_stream_bw);
3549 				else
3550 					dc->link_srv->reduce_mst_payload(pipe_ctx,
3551 							stream_update->mst_bw_update->mst_stream_bw);
3552 			}
3553 
3554 			if (stream_update->pending_test_pattern) {
3555 				/*
3556 				 * test pattern params depends on ODM topology
3557 				 * changes that we could be applying to front
3558 				 * end. Since at the current stage front end
3559 				 * changes are not yet applied. We can only
3560 				 * apply test pattern in hw based on current
3561 				 * state and populate the final test pattern
3562 				 * params in new state. If current and new test
3563 				 * pattern params are different as result of
3564 				 * different ODM topology being used, it will be
3565 				 * detected and handle during front end
3566 				 * programming update.
3567 				 */
3568 				dc->link_srv->dp_set_test_pattern(stream->link,
3569 					stream->test_pattern.type,
3570 					stream->test_pattern.color_space,
3571 					stream->test_pattern.p_link_settings,
3572 					stream->test_pattern.p_custom_pattern,
3573 					stream->test_pattern.cust_pattern_size);
3574 				resource_build_test_pattern_params(&context->res_ctx, pipe_ctx);
3575 			}
3576 
3577 			if (stream_update->dpms_off) {
3578 				if (*stream_update->dpms_off) {
3579 					dc->link_srv->set_dpms_off(pipe_ctx);
3580 					/* for dpms, keep acquired resources*/
3581 					if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
3582 						pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
3583 
3584 					dc->optimized_required = true;
3585 
3586 				} else {
3587 					if (get_seamless_boot_stream_count(context) == 0)
3588 						dc->hwss.prepare_bandwidth(dc, dc->current_state);
3589 					dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3590 				}
3591 			} else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space
3592 					&& !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) {
3593 				/*
3594 				 * Workaround for firmware issue in some receivers where they don't pick up
3595 				 * correct output color space unless DP link is disabled/re-enabled
3596 				 */
3597 				dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3598 			}
3599 
3600 			if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3601 				bool should_program_abm = true;
3602 
3603 				// if otg funcs defined check if blanked before programming
3604 				if (pipe_ctx->stream_res.tg->funcs->is_blanked)
3605 					if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
3606 						should_program_abm = false;
3607 
3608 				if (should_program_abm) {
3609 					if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
3610 						dc->hwss.set_abm_immediate_disable(pipe_ctx);
3611 					} else {
3612 						pipe_ctx->stream_res.abm->funcs->set_abm_level(
3613 							pipe_ctx->stream_res.abm, stream->abm_level);
3614 					}
3615 				}
3616 			}
3617 		}
3618 	}
3619 }
3620 
dc_dmub_should_send_dirty_rect_cmd(struct dc * dc,struct dc_stream_state * stream)3621 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
3622 {
3623 	if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
3624 			|| stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
3625 			&& stream->ctx->dce_version >= DCN_VERSION_3_1)
3626 		return true;
3627 
3628 	if (stream->link->replay_settings.config.replay_supported)
3629 		return true;
3630 
3631 	if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
3632 		return true;
3633 
3634 	return false;
3635 }
3636 
dc_dmub_update_dirty_rect(struct dc * dc,int surface_count,struct dc_stream_state * stream,struct dc_surface_update * srf_updates,struct dc_state * context)3637 void dc_dmub_update_dirty_rect(struct dc *dc,
3638 			       int surface_count,
3639 			       struct dc_stream_state *stream,
3640 			       struct dc_surface_update *srf_updates,
3641 			       struct dc_state *context)
3642 {
3643 	union dmub_rb_cmd cmd;
3644 	struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3645 	unsigned int i, j;
3646 	unsigned int panel_inst = 0;
3647 
3648 	if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3649 		return;
3650 
3651 	if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3652 		return;
3653 
3654 	memset(&cmd, 0x0, sizeof(cmd));
3655 	cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3656 	cmd.update_dirty_rect.header.sub_type = 0;
3657 	cmd.update_dirty_rect.header.payload_bytes =
3658 		sizeof(cmd.update_dirty_rect) -
3659 		sizeof(cmd.update_dirty_rect.header);
3660 	update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3661 	for (i = 0; i < surface_count; i++) {
3662 		struct dc_plane_state *plane_state = srf_updates[i].surface;
3663 		const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3664 
3665 		if (!srf_updates[i].surface || !flip_addr)
3666 			continue;
3667 		/* Do not send in immediate flip mode */
3668 		if (srf_updates[i].surface->flip_immediate)
3669 			continue;
3670 
3671 		update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3672 		update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3673 		memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3674 				sizeof(flip_addr->dirty_rects));
3675 		for (j = 0; j < dc->res_pool->pipe_count; j++) {
3676 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3677 
3678 			if (pipe_ctx->stream != stream)
3679 				continue;
3680 			if (pipe_ctx->plane_state != plane_state)
3681 				continue;
3682 
3683 			update_dirty_rect->panel_inst = panel_inst;
3684 			update_dirty_rect->pipe_idx = j;
3685 			dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
3686 		}
3687 	}
3688 }
3689 
build_dmub_update_dirty_rect(struct dc * dc,int surface_count,struct dc_stream_state * stream,struct dc_surface_update * srf_updates,struct dc_state * context,struct dc_dmub_cmd dc_dmub_cmd[],unsigned int * dmub_cmd_count)3690 static void build_dmub_update_dirty_rect(
3691 		struct dc *dc,
3692 		int surface_count,
3693 		struct dc_stream_state *stream,
3694 		struct dc_surface_update *srf_updates,
3695 		struct dc_state *context,
3696 		struct dc_dmub_cmd dc_dmub_cmd[],
3697 		unsigned int *dmub_cmd_count)
3698 {
3699 	union dmub_rb_cmd cmd;
3700 	struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3701 	unsigned int i, j;
3702 	unsigned int panel_inst = 0;
3703 
3704 	if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3705 		return;
3706 
3707 	if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3708 		return;
3709 
3710 	memset(&cmd, 0x0, sizeof(cmd));
3711 	cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3712 	cmd.update_dirty_rect.header.sub_type = 0;
3713 	cmd.update_dirty_rect.header.payload_bytes =
3714 		sizeof(cmd.update_dirty_rect) -
3715 		sizeof(cmd.update_dirty_rect.header);
3716 	update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3717 	for (i = 0; i < surface_count; i++) {
3718 		struct dc_plane_state *plane_state = srf_updates[i].surface;
3719 		const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3720 
3721 		if (!srf_updates[i].surface || !flip_addr)
3722 			continue;
3723 		/* Do not send in immediate flip mode */
3724 		if (srf_updates[i].surface->flip_immediate)
3725 			continue;
3726 		update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3727 		update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3728 		memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3729 				sizeof(flip_addr->dirty_rects));
3730 		for (j = 0; j < dc->res_pool->pipe_count; j++) {
3731 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3732 
3733 			if (pipe_ctx->stream != stream)
3734 				continue;
3735 			if (pipe_ctx->plane_state != plane_state)
3736 				continue;
3737 			update_dirty_rect->panel_inst = panel_inst;
3738 			update_dirty_rect->pipe_idx = j;
3739 			dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd;
3740 			dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
3741 			(*dmub_cmd_count)++;
3742 		}
3743 	}
3744 }
3745 
check_address_only_update(union surface_update_flags update_flags)3746 static bool check_address_only_update(union surface_update_flags update_flags)
3747 {
3748 	union surface_update_flags addr_only_update_flags;
3749 	addr_only_update_flags.raw = 0;
3750 	addr_only_update_flags.bits.addr_update = 1;
3751 
3752 	return update_flags.bits.addr_update &&
3753 			!(update_flags.raw & ~addr_only_update_flags.raw);
3754 }
3755 
3756 /**
3757  * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB
3758  *
3759  * @dc: Current DC state
3760  * @srf_updates: Array of surface updates
3761  * @surface_count: Number of surfaces that have an updated
3762  * @stream: Corresponding stream to be updated in the current flip
3763  * @context: New DC state to be programmed
3764  *
3765  * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB
3766  * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array
3767  *
3768  * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required
3769  * to build an array of commands and have them sent while the OTG lock is acquired.
3770  *
3771  * Return: void
3772  */
build_dmub_cmd_list(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_state * context,struct dc_dmub_cmd dc_dmub_cmd[],unsigned int * dmub_cmd_count)3773 static void build_dmub_cmd_list(struct dc *dc,
3774 		struct dc_surface_update *srf_updates,
3775 		int surface_count,
3776 		struct dc_stream_state *stream,
3777 		struct dc_state *context,
3778 		struct dc_dmub_cmd dc_dmub_cmd[],
3779 		unsigned int *dmub_cmd_count)
3780 {
3781 	// Initialize cmd count to 0
3782 	*dmub_cmd_count = 0;
3783 	build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count);
3784 }
3785 
commit_plane_for_stream_offload_fams2_flip(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_state * context)3786 static void commit_plane_for_stream_offload_fams2_flip(struct dc *dc,
3787 		struct dc_surface_update *srf_updates,
3788 		int surface_count,
3789 		struct dc_stream_state *stream,
3790 		struct dc_state *context)
3791 {
3792 	int i, j;
3793 
3794 	/* update dirty rect for PSR */
3795 	dc_dmub_update_dirty_rect(dc, surface_count, stream,
3796 			srf_updates, context);
3797 
3798 	/* Perform requested Updates */
3799 	for (i = 0; i < surface_count; i++) {
3800 		struct dc_plane_state *plane_state = srf_updates[i].surface;
3801 
3802 		for (j = 0; j < dc->res_pool->pipe_count; j++) {
3803 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3804 
3805 			if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3806 				continue;
3807 
3808 			if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3809 				continue;
3810 
3811 			/* update pipe context for plane */
3812 			if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3813 				dc->hwss.update_plane_addr(dc, pipe_ctx);
3814 		}
3815 	}
3816 
3817 	/* Send commands to DMCUB */
3818 	dc_dmub_srv_fams2_passthrough_flip(dc,
3819 				context,
3820 				stream,
3821 				srf_updates,
3822 				surface_count);
3823 }
3824 
commit_planes_for_stream_fast(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)3825 static void commit_planes_for_stream_fast(struct dc *dc,
3826 		struct dc_surface_update *srf_updates,
3827 		int surface_count,
3828 		struct dc_stream_state *stream,
3829 		struct dc_stream_update *stream_update,
3830 		enum surface_update_type update_type,
3831 		struct dc_state *context)
3832 {
3833 	int i, j;
3834 	struct pipe_ctx *top_pipe_to_program = NULL;
3835 	struct dc_stream_status *stream_status = NULL;
3836 	bool should_offload_fams2_flip = false;
3837 	bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3838 
3839 	if (should_lock_all_pipes)
3840 		determine_pipe_unlock_order(dc, context);
3841 
3842 	if (dc->debug.fams2_config.bits.enable &&
3843 			dc->debug.fams2_config.bits.enable_offload_flip &&
3844 			dc_state_is_fams2_in_use(dc, context)) {
3845 		/* if not offloading to HWFQ, offload to FAMS2 if needed */
3846 		should_offload_fams2_flip = true;
3847 		for (i = 0; i < surface_count; i++) {
3848 			if (srf_updates[i].surface &&
3849 					srf_updates[i].surface->update_flags.raw &&
3850 					!check_address_only_update(srf_updates[i].surface->update_flags)) {
3851 				/* more than address update, need to acquire FAMS2 lock */
3852 				should_offload_fams2_flip = false;
3853 				break;
3854 			}
3855 		}
3856 		if (stream_update) {
3857 			/* more than address update, need to acquire FAMS2 lock */
3858 			should_offload_fams2_flip = false;
3859 		}
3860 	}
3861 
3862 	dc_exit_ips_for_hw_access(dc);
3863 
3864 	dc_z10_restore(dc);
3865 
3866 	top_pipe_to_program = resource_get_otg_master_for_stream(
3867 			&context->res_ctx,
3868 			stream);
3869 
3870 	if (!top_pipe_to_program)
3871 		return;
3872 
3873 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3874 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3875 
3876 		if (pipe->stream && pipe->plane_state) {
3877 			if (!dc->debug.using_dml2)
3878 				set_p_state_switch_method(dc, context, pipe);
3879 
3880 			if (dc->debug.visual_confirm)
3881 				dc_update_visual_confirm_color(dc, context, pipe);
3882 		}
3883 	}
3884 
3885 	for (i = 0; i < surface_count; i++) {
3886 		struct dc_plane_state *plane_state = srf_updates[i].surface;
3887 		/*set logical flag for lock/unlock use*/
3888 		for (j = 0; j < dc->res_pool->pipe_count; j++) {
3889 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3890 
3891 			if (!pipe_ctx->plane_state)
3892 				continue;
3893 			if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3894 				continue;
3895 
3896 			pipe_ctx->plane_state->triplebuffer_flips = false;
3897 			if (update_type == UPDATE_TYPE_FAST &&
3898 					dc->hwss.program_triplebuffer != NULL &&
3899 					!pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3900 				/*triple buffer for VUpdate only*/
3901 				pipe_ctx->plane_state->triplebuffer_flips = true;
3902 			}
3903 		}
3904 	}
3905 
3906 	stream_status = dc_state_get_stream_status(context, stream);
3907 
3908 	if (should_offload_fams2_flip) {
3909 		commit_plane_for_stream_offload_fams2_flip(dc,
3910 				srf_updates,
3911 				surface_count,
3912 				stream,
3913 				context);
3914 	} else if (stream_status) {
3915 		build_dmub_cmd_list(dc,
3916 				srf_updates,
3917 				surface_count,
3918 				stream,
3919 				context,
3920 				context->dc_dmub_cmd,
3921 				&(context->dmub_cmd_count));
3922 		hwss_build_fast_sequence(dc,
3923 				context->dc_dmub_cmd,
3924 				context->dmub_cmd_count,
3925 				context->block_sequence,
3926 				&(context->block_sequence_steps),
3927 				top_pipe_to_program,
3928 				stream_status,
3929 				context);
3930 		hwss_execute_sequence(dc,
3931 				context->block_sequence,
3932 				context->block_sequence_steps);
3933 	}
3934 
3935 	/* Clear update flags so next flip doesn't have redundant programming
3936 	 * (if there's no stream update, the update flags are not cleared).
3937 	 * Surface updates are cleared unconditionally at the beginning of each flip,
3938 	 * so no need to clear here.
3939 	 */
3940 	if (top_pipe_to_program->stream)
3941 		top_pipe_to_program->stream->update_flags.raw = 0;
3942 }
3943 
commit_planes_for_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)3944 static void commit_planes_for_stream(struct dc *dc,
3945 		struct dc_surface_update *srf_updates,
3946 		int surface_count,
3947 		struct dc_stream_state *stream,
3948 		struct dc_stream_update *stream_update,
3949 		enum surface_update_type update_type,
3950 		struct dc_state *context)
3951 {
3952 	int i, j;
3953 	struct pipe_ctx *top_pipe_to_program = NULL;
3954 	bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3955 	bool subvp_prev_use = false;
3956 	bool subvp_curr_use = false;
3957 	uint8_t current_stream_mask = 0;
3958 
3959 	if (should_lock_all_pipes)
3960 		determine_pipe_unlock_order(dc, context);
3961 	// Once we apply the new subvp context to hardware it won't be in the
3962 	// dc->current_state anymore, so we have to cache it before we apply
3963 	// the new SubVP context
3964 	subvp_prev_use = false;
3965 	dc_exit_ips_for_hw_access(dc);
3966 
3967 	dc_z10_restore(dc);
3968 	if (update_type == UPDATE_TYPE_FULL && dc->optimized_required)
3969 		hwss_process_outstanding_hw_updates(dc, dc->current_state);
3970 
3971 	if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
3972 		dc->res_pool->funcs->prepare_mcache_programming(dc, context);
3973 
3974 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3975 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3976 
3977 		if (pipe->stream && pipe->plane_state) {
3978 			if (!dc->debug.using_dml2)
3979 				set_p_state_switch_method(dc, context, pipe);
3980 
3981 			if (dc->debug.visual_confirm)
3982 				dc_update_visual_confirm_color(dc, context, pipe);
3983 		}
3984 	}
3985 
3986 	if (update_type == UPDATE_TYPE_FULL) {
3987 		dc_allow_idle_optimizations(dc, false);
3988 
3989 		if (get_seamless_boot_stream_count(context) == 0)
3990 			dc->hwss.prepare_bandwidth(dc, context);
3991 
3992 		if (dc->hwss.update_dsc_pg)
3993 			dc->hwss.update_dsc_pg(dc, context, false);
3994 
3995 		context_clock_trace(dc, context);
3996 	}
3997 
3998 	if (update_type == UPDATE_TYPE_FULL)
3999 		hwss_wait_for_outstanding_hw_updates(dc, dc->current_state);
4000 
4001 	top_pipe_to_program = resource_get_otg_master_for_stream(
4002 				&context->res_ctx,
4003 				stream);
4004 	ASSERT(top_pipe_to_program != NULL);
4005 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
4006 		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4007 
4008 		// Check old context for SubVP
4009 		subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
4010 		if (subvp_prev_use)
4011 			break;
4012 	}
4013 
4014 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
4015 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
4016 
4017 		if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
4018 			subvp_curr_use = true;
4019 			break;
4020 		}
4021 	}
4022 
4023 	if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
4024 		struct pipe_ctx *mpcc_pipe;
4025 		struct pipe_ctx *odm_pipe;
4026 
4027 		for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
4028 			for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
4029 				odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
4030 	}
4031 
4032 	if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
4033 		if (top_pipe_to_program &&
4034 			top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
4035 			if (should_use_dmub_lock(stream->link)) {
4036 				union dmub_hw_lock_flags hw_locks = { 0 };
4037 				struct dmub_hw_lock_inst_flags inst_flags = { 0 };
4038 
4039 				hw_locks.bits.lock_dig = 1;
4040 				inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
4041 
4042 				dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
4043 							true,
4044 							&hw_locks,
4045 							&inst_flags);
4046 			} else
4047 				top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
4048 						top_pipe_to_program->stream_res.tg);
4049 		}
4050 
4051 	if (dc->hwss.wait_for_dcc_meta_propagation) {
4052 		dc->hwss.wait_for_dcc_meta_propagation(dc, top_pipe_to_program);
4053 	}
4054 
4055 	if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
4056 		if (dc->hwss.subvp_pipe_control_lock)
4057 			dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
4058 
4059 		if (dc->hwss.fams2_global_control_lock)
4060 			dc->hwss.fams2_global_control_lock(dc, context, true);
4061 
4062 		dc->hwss.interdependent_update_lock(dc, context, true);
4063 	} else {
4064 		if (dc->hwss.subvp_pipe_control_lock)
4065 			dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
4066 
4067 		if (dc->hwss.fams2_global_control_lock)
4068 			dc->hwss.fams2_global_control_lock(dc, context, true);
4069 
4070 		/* Lock the top pipe while updating plane addrs, since freesync requires
4071 		 *  plane addr update event triggers to be synchronized.
4072 		 *  top_pipe_to_program is expected to never be NULL
4073 		 */
4074 		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
4075 	}
4076 
4077 	dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
4078 
4079 	// Stream updates
4080 	if (stream_update)
4081 		commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
4082 
4083 	if (surface_count == 0) {
4084 		/*
4085 		 * In case of turning off screen, no need to program front end a second time.
4086 		 * just return after program blank.
4087 		 */
4088 		if (dc->hwss.apply_ctx_for_surface)
4089 			dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
4090 		if (dc->hwss.program_front_end_for_ctx)
4091 			dc->hwss.program_front_end_for_ctx(dc, context);
4092 
4093 		if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
4094 			dc->hwss.interdependent_update_lock(dc, context, false);
4095 		} else {
4096 			dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
4097 		}
4098 		dc->hwss.post_unlock_program_front_end(dc, context);
4099 
4100 		if (update_type != UPDATE_TYPE_FAST)
4101 			if (dc->hwss.commit_subvp_config)
4102 				dc->hwss.commit_subvp_config(dc, context);
4103 
4104 		/* Since phantom pipe programming is moved to post_unlock_program_front_end,
4105 		 * move the SubVP lock to after the phantom pipes have been setup
4106 		 */
4107 		if (dc->hwss.subvp_pipe_control_lock)
4108 			dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
4109 							 NULL, subvp_prev_use);
4110 
4111 		if (dc->hwss.fams2_global_control_lock)
4112 			dc->hwss.fams2_global_control_lock(dc, context, false);
4113 
4114 		return;
4115 	}
4116 
4117 	if (update_type != UPDATE_TYPE_FAST) {
4118 		for (j = 0; j < dc->res_pool->pipe_count; j++) {
4119 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
4120 
4121 			if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP ||
4122 				dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) &&
4123 				pipe_ctx->stream && pipe_ctx->plane_state) {
4124 				/* Only update visual confirm for SUBVP and Mclk switching here.
4125 				 * The bar appears on all pipes, so we need to update the bar on all displays,
4126 				 * so the information doesn't get stale.
4127 				 */
4128 				dc->hwss.update_visual_confirm_color(dc, pipe_ctx,
4129 						pipe_ctx->plane_res.hubp->inst);
4130 			}
4131 		}
4132 	}
4133 
4134 	for (i = 0; i < surface_count; i++) {
4135 		struct dc_plane_state *plane_state = srf_updates[i].surface;
4136 
4137 		/*set logical flag for lock/unlock use*/
4138 		for (j = 0; j < dc->res_pool->pipe_count; j++) {
4139 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
4140 			if (!pipe_ctx->plane_state)
4141 				continue;
4142 			if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
4143 				continue;
4144 			pipe_ctx->plane_state->triplebuffer_flips = false;
4145 			if (update_type == UPDATE_TYPE_FAST &&
4146 					dc->hwss.program_triplebuffer != NULL &&
4147 					!pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
4148 				/*triple buffer for VUpdate only*/
4149 				pipe_ctx->plane_state->triplebuffer_flips = true;
4150 			}
4151 		}
4152 		if (update_type == UPDATE_TYPE_FULL) {
4153 			/* force vsync flip when reconfiguring pipes to prevent underflow */
4154 			plane_state->flip_immediate = false;
4155 			plane_state->triplebuffer_flips = false;
4156 		}
4157 	}
4158 
4159 	// Update Type FULL, Surface updates
4160 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
4161 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
4162 
4163 		if (!pipe_ctx->top_pipe &&
4164 			!pipe_ctx->prev_odm_pipe &&
4165 			should_update_pipe_for_stream(context, pipe_ctx, stream)) {
4166 			struct dc_stream_status *stream_status = NULL;
4167 
4168 			if (!pipe_ctx->plane_state)
4169 				continue;
4170 
4171 			/* Full fe update*/
4172 			if (update_type == UPDATE_TYPE_FAST)
4173 				continue;
4174 
4175 			ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
4176 			if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
4177 				/*turn off triple buffer for full update*/
4178 				dc->hwss.program_triplebuffer(
4179 					dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
4180 			}
4181 			stream_status =
4182 				stream_get_status(context, pipe_ctx->stream);
4183 
4184 			if (dc->hwss.apply_ctx_for_surface && stream_status)
4185 				dc->hwss.apply_ctx_for_surface(
4186 					dc, pipe_ctx->stream, stream_status->plane_count, context);
4187 		}
4188 	}
4189 	if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
4190 		dc->hwss.program_front_end_for_ctx(dc, context);
4191 		if (dc->debug.validate_dml_output) {
4192 			for (i = 0; i < dc->res_pool->pipe_count; i++) {
4193 				struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
4194 				if (cur_pipe->stream == NULL)
4195 					continue;
4196 
4197 				cur_pipe->plane_res.hubp->funcs->validate_dml_output(
4198 						cur_pipe->plane_res.hubp, dc->ctx,
4199 						&context->res_ctx.pipe_ctx[i].rq_regs,
4200 						&context->res_ctx.pipe_ctx[i].dlg_regs,
4201 						&context->res_ctx.pipe_ctx[i].ttu_regs);
4202 			}
4203 		}
4204 	}
4205 
4206 	// Update Type FAST, Surface updates
4207 	if (update_type == UPDATE_TYPE_FAST) {
4208 		if (dc->hwss.set_flip_control_gsl)
4209 			for (i = 0; i < surface_count; i++) {
4210 				struct dc_plane_state *plane_state = srf_updates[i].surface;
4211 
4212 				for (j = 0; j < dc->res_pool->pipe_count; j++) {
4213 					struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
4214 
4215 					if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
4216 						continue;
4217 
4218 					if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
4219 						continue;
4220 
4221 					// GSL has to be used for flip immediate
4222 					dc->hwss.set_flip_control_gsl(pipe_ctx,
4223 							pipe_ctx->plane_state->flip_immediate);
4224 				}
4225 			}
4226 
4227 		/* Perform requested Updates */
4228 		for (i = 0; i < surface_count; i++) {
4229 			struct dc_plane_state *plane_state = srf_updates[i].surface;
4230 
4231 			for (j = 0; j < dc->res_pool->pipe_count; j++) {
4232 				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
4233 
4234 				if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
4235 					continue;
4236 
4237 				if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
4238 					continue;
4239 
4240 				if (srf_updates[i].cm2_params &&
4241 						srf_updates[i].cm2_params->cm2_luts.lut3d_data.lut3d_src ==
4242 								DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM &&
4243 						srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting ==
4244 								DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT &&
4245 						dc->hwss.trigger_3dlut_dma_load)
4246 					dc->hwss.trigger_3dlut_dma_load(dc, pipe_ctx);
4247 
4248 				/*program triple buffer after lock based on flip type*/
4249 				if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
4250 					/*only enable triplebuffer for fast_update*/
4251 					dc->hwss.program_triplebuffer(
4252 						dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
4253 				}
4254 				if (pipe_ctx->plane_state->update_flags.bits.addr_update)
4255 					dc->hwss.update_plane_addr(dc, pipe_ctx);
4256 			}
4257 		}
4258 	}
4259 
4260 	if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
4261 		dc->hwss.interdependent_update_lock(dc, context, false);
4262 	} else {
4263 		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
4264 	}
4265 
4266 	if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
4267 		if (top_pipe_to_program &&
4268 		    top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
4269 			top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
4270 				top_pipe_to_program->stream_res.tg,
4271 				CRTC_STATE_VACTIVE);
4272 			top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
4273 				top_pipe_to_program->stream_res.tg,
4274 				CRTC_STATE_VBLANK);
4275 			top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
4276 				top_pipe_to_program->stream_res.tg,
4277 				CRTC_STATE_VACTIVE);
4278 
4279 			if (should_use_dmub_lock(stream->link)) {
4280 				union dmub_hw_lock_flags hw_locks = { 0 };
4281 				struct dmub_hw_lock_inst_flags inst_flags = { 0 };
4282 
4283 				hw_locks.bits.lock_dig = 1;
4284 				inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
4285 
4286 				dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
4287 							false,
4288 							&hw_locks,
4289 							&inst_flags);
4290 			} else
4291 				top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
4292 					top_pipe_to_program->stream_res.tg);
4293 		}
4294 
4295 	if (subvp_curr_use) {
4296 		/* If enabling subvp or transitioning from subvp->subvp, enable the
4297 		 * phantom streams before we program front end for the phantom pipes.
4298 		 */
4299 		if (update_type != UPDATE_TYPE_FAST) {
4300 			if (dc->hwss.enable_phantom_streams)
4301 				dc->hwss.enable_phantom_streams(dc, context);
4302 		}
4303 	}
4304 
4305 	if (update_type != UPDATE_TYPE_FAST)
4306 		dc->hwss.post_unlock_program_front_end(dc, context);
4307 
4308 	if (subvp_prev_use && !subvp_curr_use) {
4309 		/* If disabling subvp, disable phantom streams after front end
4310 		 * programming has completed (we turn on phantom OTG in order
4311 		 * to complete the plane disable for phantom pipes).
4312 		 */
4313 
4314 		if (dc->hwss.disable_phantom_streams)
4315 			dc->hwss.disable_phantom_streams(dc, context);
4316 	}
4317 
4318 	if (update_type != UPDATE_TYPE_FAST)
4319 		if (dc->hwss.commit_subvp_config)
4320 			dc->hwss.commit_subvp_config(dc, context);
4321 	/* Since phantom pipe programming is moved to post_unlock_program_front_end,
4322 	 * move the SubVP lock to after the phantom pipes have been setup
4323 	 */
4324 	if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
4325 		if (dc->hwss.subvp_pipe_control_lock)
4326 			dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
4327 		if (dc->hwss.fams2_global_control_lock)
4328 			dc->hwss.fams2_global_control_lock(dc, context, false);
4329 	} else {
4330 		if (dc->hwss.subvp_pipe_control_lock)
4331 			dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
4332 		if (dc->hwss.fams2_global_control_lock)
4333 			dc->hwss.fams2_global_control_lock(dc, context, false);
4334 	}
4335 
4336 	// Fire manual trigger only when bottom plane is flipped
4337 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
4338 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
4339 
4340 		if (!pipe_ctx->plane_state)
4341 			continue;
4342 
4343 		if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
4344 				!pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
4345 				!pipe_ctx->plane_state->update_flags.bits.addr_update ||
4346 				pipe_ctx->plane_state->skip_manual_trigger)
4347 			continue;
4348 
4349 		if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
4350 			pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
4351 	}
4352 
4353 	current_stream_mask = get_stream_mask(dc, context);
4354 	if (current_stream_mask != context->stream_mask) {
4355 		context->stream_mask = current_stream_mask;
4356 		dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, current_stream_mask);
4357 	}
4358 }
4359 
4360 /**
4361  * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change
4362  *
4363  * @dc: Used to get the current state status
4364  * @stream: Target stream, which we want to remove the attached planes
4365  * @srf_updates: Array of surface updates
4366  * @surface_count: Number of surface update
4367  * @is_plane_addition: [in] Fill out with true if it is a plane addition case
4368  *
4369  * DCN32x and newer support a feature named Dynamic ODM which can conflict with
4370  * the MPO if used simultaneously in some specific configurations (e.g.,
4371  * 4k@144). This function checks if the incoming context requires applying a
4372  * transition state with unnecessary pipe splitting and ODM disabled to
4373  * circumvent our hardware limitations to prevent this edge case. If the OPP
4374  * associated with an MPCC might change due to plane additions, this function
4375  * returns true.
4376  *
4377  * Return:
4378  * Return true if OPP and MPCC might change, otherwise, return false.
4379  */
could_mpcc_tree_change_for_active_pipes(struct dc * dc,struct dc_stream_state * stream,struct dc_surface_update * srf_updates,int surface_count,bool * is_plane_addition)4380 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
4381 		struct dc_stream_state *stream,
4382 		struct dc_surface_update *srf_updates,
4383 		int surface_count,
4384 		bool *is_plane_addition)
4385 {
4386 
4387 	struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
4388 	bool force_minimal_pipe_splitting = false;
4389 	bool subvp_active = false;
4390 	uint32_t i;
4391 
4392 	*is_plane_addition = false;
4393 
4394 	if (cur_stream_status &&
4395 			dc->current_state->stream_count > 0 &&
4396 			dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
4397 		/* determine if minimal transition is required due to MPC*/
4398 		if (surface_count > 0) {
4399 			if (cur_stream_status->plane_count > surface_count) {
4400 				force_minimal_pipe_splitting = true;
4401 			} else if (cur_stream_status->plane_count < surface_count) {
4402 				force_minimal_pipe_splitting = true;
4403 				*is_plane_addition = true;
4404 			}
4405 		}
4406 	}
4407 
4408 	if (cur_stream_status &&
4409 			dc->current_state->stream_count == 1 &&
4410 			dc->debug.enable_single_display_2to1_odm_policy) {
4411 		/* determine if minimal transition is required due to dynamic ODM*/
4412 		if (surface_count > 0) {
4413 			if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
4414 				force_minimal_pipe_splitting = true;
4415 			} else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
4416 				force_minimal_pipe_splitting = true;
4417 				*is_plane_addition = true;
4418 			}
4419 		}
4420 	}
4421 
4422 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
4423 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4424 
4425 		if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) {
4426 			subvp_active = true;
4427 			break;
4428 		}
4429 	}
4430 
4431 	/* For SubVP when adding or removing planes we need to add a minimal transition
4432 	 * (even when disabling all planes). Whenever disabling a phantom pipe, we
4433 	 * must use the minimal transition path to disable the pipe correctly.
4434 	 *
4435 	 * We want to use the minimal transition whenever subvp is active, not only if
4436 	 * a plane is being added / removed from a subvp stream (MPO plane can be added
4437 	 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through
4438 	 * a min transition to disable subvp.
4439 	 */
4440 	if (cur_stream_status && subvp_active) {
4441 		/* determine if minimal transition is required due to SubVP*/
4442 		if (cur_stream_status->plane_count > surface_count) {
4443 			force_minimal_pipe_splitting = true;
4444 		} else if (cur_stream_status->plane_count < surface_count) {
4445 			force_minimal_pipe_splitting = true;
4446 			*is_plane_addition = true;
4447 		}
4448 	}
4449 
4450 	return force_minimal_pipe_splitting;
4451 }
4452 
4453 struct pipe_split_policy_backup {
4454 	bool dynamic_odm_policy;
4455 	bool subvp_policy;
4456 	enum pipe_split_policy mpc_policy;
4457 	char force_odm[MAX_PIPES];
4458 };
4459 
backup_and_set_minimal_pipe_split_policy(struct dc * dc,struct dc_state * context,struct pipe_split_policy_backup * policy)4460 static void backup_and_set_minimal_pipe_split_policy(struct dc *dc,
4461 		struct dc_state *context,
4462 		struct pipe_split_policy_backup *policy)
4463 {
4464 	int i;
4465 
4466 	if (!dc->config.is_vmin_only_asic) {
4467 		policy->mpc_policy = dc->debug.pipe_split_policy;
4468 		dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
4469 	}
4470 	policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
4471 	dc->debug.enable_single_display_2to1_odm_policy = false;
4472 	policy->subvp_policy = dc->debug.force_disable_subvp;
4473 	dc->debug.force_disable_subvp = true;
4474 	for (i = 0; i < context->stream_count; i++) {
4475 		policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments;
4476 		if (context->streams[i]->debug.allow_transition_for_forced_odm)
4477 			context->streams[i]->debug.force_odm_combine_segments = 0;
4478 	}
4479 }
4480 
restore_minimal_pipe_split_policy(struct dc * dc,struct dc_state * context,struct pipe_split_policy_backup * policy)4481 static void restore_minimal_pipe_split_policy(struct dc *dc,
4482 		struct dc_state *context,
4483 		struct pipe_split_policy_backup *policy)
4484 {
4485 	uint8_t i;
4486 
4487 	if (!dc->config.is_vmin_only_asic)
4488 		dc->debug.pipe_split_policy = policy->mpc_policy;
4489 	dc->debug.enable_single_display_2to1_odm_policy =
4490 			policy->dynamic_odm_policy;
4491 	dc->debug.force_disable_subvp = policy->subvp_policy;
4492 	for (i = 0; i < context->stream_count; i++)
4493 		context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i];
4494 }
4495 
release_minimal_transition_state(struct dc * dc,struct dc_state * minimal_transition_context,struct dc_state * base_context,struct pipe_split_policy_backup * policy)4496 static void release_minimal_transition_state(struct dc *dc,
4497 		struct dc_state *minimal_transition_context,
4498 		struct dc_state *base_context,
4499 		struct pipe_split_policy_backup *policy)
4500 {
4501 	restore_minimal_pipe_split_policy(dc, base_context, policy);
4502 	dc_state_release(minimal_transition_context);
4503 }
4504 
force_vsync_flip_in_minimal_transition_context(struct dc_state * context)4505 static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context)
4506 {
4507 	uint8_t i;
4508 	int j;
4509 	struct dc_stream_status *stream_status;
4510 
4511 	for (i = 0; i < context->stream_count; i++) {
4512 		stream_status = &context->stream_status[i];
4513 
4514 		for (j = 0; j < stream_status->plane_count; j++)
4515 			stream_status->plane_states[j]->flip_immediate = false;
4516 	}
4517 }
4518 
create_minimal_transition_state(struct dc * dc,struct dc_state * base_context,struct pipe_split_policy_backup * policy)4519 static struct dc_state *create_minimal_transition_state(struct dc *dc,
4520 		struct dc_state *base_context, struct pipe_split_policy_backup *policy)
4521 {
4522 	struct dc_state *minimal_transition_context = NULL;
4523 
4524 	minimal_transition_context = dc_state_create_copy(base_context);
4525 	if (!minimal_transition_context)
4526 		return NULL;
4527 
4528 	backup_and_set_minimal_pipe_split_policy(dc, base_context, policy);
4529 	/* commit minimal state */
4530 	if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
4531 		/* prevent underflow and corruption when reconfiguring pipes */
4532 		force_vsync_flip_in_minimal_transition_context(minimal_transition_context);
4533 	} else {
4534 		/*
4535 		 * This should never happen, minimal transition state should
4536 		 * always be validated first before adding pipe split features.
4537 		 */
4538 		release_minimal_transition_state(dc, minimal_transition_context, base_context, policy);
4539 		BREAK_TO_DEBUGGER();
4540 		minimal_transition_context = NULL;
4541 	}
4542 	return minimal_transition_context;
4543 }
4544 
is_pipe_topology_transition_seamless_with_intermediate_step(struct dc * dc,struct dc_state * initial_state,struct dc_state * intermediate_state,struct dc_state * final_state)4545 static bool is_pipe_topology_transition_seamless_with_intermediate_step(
4546 		struct dc *dc,
4547 		struct dc_state *initial_state,
4548 		struct dc_state *intermediate_state,
4549 		struct dc_state *final_state)
4550 {
4551 	return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state,
4552 			intermediate_state) &&
4553 			dc->hwss.is_pipe_topology_transition_seamless(dc,
4554 					intermediate_state, final_state);
4555 }
4556 
swap_and_release_current_context(struct dc * dc,struct dc_state * new_context,struct dc_stream_state * stream)4557 static void swap_and_release_current_context(struct dc *dc,
4558 		struct dc_state *new_context, struct dc_stream_state *stream)
4559 {
4560 
4561 	int i;
4562 	struct dc_state *old = dc->current_state;
4563 	struct pipe_ctx *pipe_ctx;
4564 
4565 	/* Since memory free requires elevated IRQ, an interrupt
4566 	 * request is generated by mem free. If this happens
4567 	 * between freeing and reassigning the context, our vsync
4568 	 * interrupt will call into dc and cause a memory
4569 	 * corruption. Hence, we first reassign the context,
4570 	 * then free the old context.
4571 	 */
4572 	dc->current_state = new_context;
4573 	dc_state_release(old);
4574 
4575 	// clear any forced full updates
4576 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
4577 		pipe_ctx = &new_context->res_ctx.pipe_ctx[i];
4578 
4579 		if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4580 			pipe_ctx->plane_state->force_full_update = false;
4581 	}
4582 }
4583 
initialize_empty_surface_updates(struct dc_stream_state * stream,struct dc_surface_update * srf_updates)4584 static int initialize_empty_surface_updates(
4585 		struct dc_stream_state *stream,
4586 		struct dc_surface_update *srf_updates)
4587 {
4588 	struct dc_stream_status *status = dc_stream_get_status(stream);
4589 	int i;
4590 
4591 	if (!status)
4592 		return 0;
4593 
4594 	for (i = 0; i < status->plane_count; i++)
4595 		srf_updates[i].surface = status->plane_states[i];
4596 
4597 	return status->plane_count;
4598 }
4599 
commit_minimal_transition_based_on_new_context(struct dc * dc,struct dc_state * new_context,struct dc_stream_state * stream,struct dc_surface_update * srf_updates,int surface_count)4600 static bool commit_minimal_transition_based_on_new_context(struct dc *dc,
4601 		struct dc_state *new_context,
4602 		struct dc_stream_state *stream,
4603 		struct dc_surface_update *srf_updates,
4604 		int surface_count)
4605 {
4606 	bool success = false;
4607 	struct pipe_split_policy_backup policy;
4608 	struct dc_state *intermediate_context =
4609 			create_minimal_transition_state(dc, new_context,
4610 					&policy);
4611 
4612 	if (intermediate_context) {
4613 		if (is_pipe_topology_transition_seamless_with_intermediate_step(
4614 				dc,
4615 				dc->current_state,
4616 				intermediate_context,
4617 				new_context)) {
4618 			DC_LOG_DC("commit minimal transition state: base = new state\n");
4619 			commit_planes_for_stream(dc, srf_updates,
4620 					surface_count, stream, NULL,
4621 					UPDATE_TYPE_FULL, intermediate_context);
4622 			swap_and_release_current_context(
4623 					dc, intermediate_context, stream);
4624 			dc_state_retain(dc->current_state);
4625 			success = true;
4626 		}
4627 		release_minimal_transition_state(
4628 				dc, intermediate_context, new_context, &policy);
4629 	}
4630 	return success;
4631 }
4632 
commit_minimal_transition_based_on_current_context(struct dc * dc,struct dc_state * new_context,struct dc_stream_state * stream)4633 static bool commit_minimal_transition_based_on_current_context(struct dc *dc,
4634 		struct dc_state *new_context, struct dc_stream_state *stream)
4635 {
4636 	bool success = false;
4637 	struct pipe_split_policy_backup policy;
4638 	struct dc_state *intermediate_context;
4639 	struct dc_state *old_current_state = dc->current_state;
4640 	struct dc_surface_update srf_updates[MAX_SURFACES] = {0};
4641 	int surface_count;
4642 
4643 	/*
4644 	 * Both current and new contexts share the same stream and plane state
4645 	 * pointers. When new context is validated, stream and planes get
4646 	 * populated with new updates such as new plane addresses. This makes
4647 	 * the current context no longer valid because stream and planes are
4648 	 * modified from the original. We backup current stream and plane states
4649 	 * into scratch space whenever we are populating new context. So we can
4650 	 * restore the original values back by calling the restore function now.
4651 	 * This restores back the original stream and plane states associated
4652 	 * with the current state.
4653 	 */
4654 	restore_planes_and_stream_state(&dc->scratch.current_state, stream);
4655 	dc_state_retain(old_current_state);
4656 	intermediate_context = create_minimal_transition_state(dc,
4657 			old_current_state, &policy);
4658 
4659 	if (intermediate_context) {
4660 		if (is_pipe_topology_transition_seamless_with_intermediate_step(
4661 				dc,
4662 				dc->current_state,
4663 				intermediate_context,
4664 				new_context)) {
4665 			DC_LOG_DC("commit minimal transition state: base = current state\n");
4666 			surface_count = initialize_empty_surface_updates(
4667 					stream, srf_updates);
4668 			commit_planes_for_stream(dc, srf_updates,
4669 					surface_count, stream, NULL,
4670 					UPDATE_TYPE_FULL, intermediate_context);
4671 			swap_and_release_current_context(
4672 					dc, intermediate_context, stream);
4673 			dc_state_retain(dc->current_state);
4674 			success = true;
4675 		}
4676 		release_minimal_transition_state(dc, intermediate_context,
4677 				old_current_state, &policy);
4678 	}
4679 	dc_state_release(old_current_state);
4680 	/*
4681 	 * Restore stream and plane states back to the values associated with
4682 	 * new context.
4683 	 */
4684 	restore_planes_and_stream_state(&dc->scratch.new_state, stream);
4685 	return success;
4686 }
4687 
4688 /**
4689  * commit_minimal_transition_state_in_dc_update - Commit a minimal state based
4690  * on current or new context
4691  *
4692  * @dc: DC structure, used to get the current state
4693  * @new_context: New context
4694  * @stream: Stream getting the update for the flip
4695  * @srf_updates: Surface updates
4696  * @surface_count: Number of surfaces
4697  *
4698  * The function takes in current state and new state and determine a minimal
4699  * transition state as the intermediate step which could make the transition
4700  * between current and new states seamless. If found, it will commit the minimal
4701  * transition state and update current state to this minimal transition state
4702  * and return true, if not, it will return false.
4703  *
4704  * Return:
4705  * Return True if the minimal transition succeeded, false otherwise
4706  */
commit_minimal_transition_state_in_dc_update(struct dc * dc,struct dc_state * new_context,struct dc_stream_state * stream,struct dc_surface_update * srf_updates,int surface_count)4707 static bool commit_minimal_transition_state_in_dc_update(struct dc *dc,
4708 		struct dc_state *new_context,
4709 		struct dc_stream_state *stream,
4710 		struct dc_surface_update *srf_updates,
4711 		int surface_count)
4712 {
4713 	bool success = commit_minimal_transition_based_on_new_context(
4714 				dc, new_context, stream, srf_updates,
4715 				surface_count);
4716 	if (!success)
4717 		success = commit_minimal_transition_based_on_current_context(dc,
4718 				new_context, stream);
4719 	if (!success)
4720 		DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n");
4721 	return success;
4722 }
4723 
4724 /**
4725  * commit_minimal_transition_state - Create a transition pipe split state
4726  *
4727  * @dc: Used to get the current state status
4728  * @transition_base_context: New transition state
4729  *
4730  * In some specific configurations, such as pipe split on multi-display with
4731  * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe
4732  * programming when moving to new planes. To mitigate those types of problems,
4733  * this function adds a transition state that minimizes pipe usage before
4734  * programming the new configuration. When adding a new plane, the current
4735  * state requires the least pipes, so it is applied without splitting. When
4736  * removing a plane, the new state requires the least pipes, so it is applied
4737  * without splitting.
4738  *
4739  * Return:
4740  * Return false if something is wrong in the transition state.
4741  */
commit_minimal_transition_state(struct dc * dc,struct dc_state * transition_base_context)4742 static bool commit_minimal_transition_state(struct dc *dc,
4743 		struct dc_state *transition_base_context)
4744 {
4745 	struct dc_state *transition_context;
4746 	struct pipe_split_policy_backup policy;
4747 	enum dc_status ret = DC_ERROR_UNEXPECTED;
4748 	unsigned int i, j;
4749 	unsigned int pipe_in_use = 0;
4750 	bool subvp_in_use = false;
4751 	bool odm_in_use = false;
4752 
4753 	/* check current pipes in use*/
4754 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
4755 		struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4756 
4757 		if (pipe->plane_state)
4758 			pipe_in_use++;
4759 	}
4760 
4761 	/* If SubVP is enabled and we are adding or removing planes from any main subvp
4762 	 * pipe, we must use the minimal transition.
4763 	 */
4764 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
4765 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4766 
4767 		if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
4768 			subvp_in_use = true;
4769 			break;
4770 		}
4771 	}
4772 
4773 	/* If ODM is enabled and we are adding or removing planes from any ODM
4774 	 * pipe, we must use the minimal transition.
4775 	 */
4776 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
4777 		struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4778 
4779 		if (resource_is_pipe_type(pipe, OTG_MASTER)) {
4780 			odm_in_use = resource_get_odm_slice_count(pipe) > 1;
4781 			break;
4782 		}
4783 	}
4784 
4785 	/* When the OS add a new surface if we have been used all of pipes with odm combine
4786 	 * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
4787 	 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
4788 	 * call it again. Otherwise return true to skip.
4789 	 *
4790 	 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
4791 	 * enter/exit MPO when DCN still have enough resources.
4792 	 */
4793 	if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use)
4794 		return true;
4795 
4796 	DC_LOG_DC("%s base = %s state, reason = %s\n", __func__,
4797 			dc->current_state == transition_base_context ? "current" : "new",
4798 			subvp_in_use ? "Subvp In Use" :
4799 			odm_in_use ? "ODM in Use" :
4800 			dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" :
4801 			"Unknown");
4802 
4803 	dc_state_retain(transition_base_context);
4804 	transition_context = create_minimal_transition_state(dc,
4805 			transition_base_context, &policy);
4806 	if (transition_context) {
4807 		ret = dc_commit_state_no_check(dc, transition_context);
4808 		release_minimal_transition_state(dc, transition_context, transition_base_context, &policy);
4809 	}
4810 	dc_state_release(transition_base_context);
4811 
4812 	if (ret != DC_OK) {
4813 		/* this should never happen */
4814 		BREAK_TO_DEBUGGER();
4815 		return false;
4816 	}
4817 
4818 	/* force full surface update */
4819 	for (i = 0; i < dc->current_state->stream_count; i++) {
4820 		for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
4821 			dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
4822 		}
4823 	}
4824 
4825 	return true;
4826 }
4827 
populate_fast_updates(struct dc_fast_update * fast_update,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_update * stream_update)4828 void populate_fast_updates(struct dc_fast_update *fast_update,
4829 		struct dc_surface_update *srf_updates,
4830 		int surface_count,
4831 		struct dc_stream_update *stream_update)
4832 {
4833 	int i = 0;
4834 
4835 	if (stream_update) {
4836 		fast_update[0].out_transfer_func = stream_update->out_transfer_func;
4837 		fast_update[0].output_csc_transform = stream_update->output_csc_transform;
4838 	} else {
4839 		fast_update[0].out_transfer_func = NULL;
4840 		fast_update[0].output_csc_transform = NULL;
4841 	}
4842 
4843 	for (i = 0; i < surface_count; i++) {
4844 		fast_update[i].flip_addr = srf_updates[i].flip_addr;
4845 		fast_update[i].gamma = srf_updates[i].gamma;
4846 		fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
4847 		fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
4848 		fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
4849 		fast_update[i].cursor_csc_color_matrix = srf_updates[i].cursor_csc_color_matrix;
4850 	}
4851 }
4852 
fast_updates_exist(struct dc_fast_update * fast_update,int surface_count)4853 static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
4854 {
4855 	int i;
4856 
4857 	if (fast_update[0].out_transfer_func ||
4858 		fast_update[0].output_csc_transform)
4859 		return true;
4860 
4861 	for (i = 0; i < surface_count; i++) {
4862 		if (fast_update[i].flip_addr ||
4863 				fast_update[i].gamma ||
4864 				fast_update[i].gamut_remap_matrix ||
4865 				fast_update[i].input_csc_color_matrix ||
4866 				fast_update[i].cursor_csc_color_matrix ||
4867 				fast_update[i].coeff_reduction_factor)
4868 			return true;
4869 	}
4870 
4871 	return false;
4872 }
4873 
fast_nonaddr_updates_exist(struct dc_fast_update * fast_update,int surface_count)4874 bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count)
4875 {
4876 	int i;
4877 
4878 	if (fast_update[0].out_transfer_func ||
4879 		fast_update[0].output_csc_transform)
4880 		return true;
4881 
4882 	for (i = 0; i < surface_count; i++) {
4883 		if (fast_update[i].input_csc_color_matrix ||
4884 				fast_update[i].gamma ||
4885 				fast_update[i].gamut_remap_matrix ||
4886 				fast_update[i].coeff_reduction_factor ||
4887 				fast_update[i].cursor_csc_color_matrix)
4888 			return true;
4889 	}
4890 
4891 	return false;
4892 }
4893 
full_update_required(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_update * stream_update,struct dc_stream_state * stream)4894 static bool full_update_required(struct dc *dc,
4895 		struct dc_surface_update *srf_updates,
4896 		int surface_count,
4897 		struct dc_stream_update *stream_update,
4898 		struct dc_stream_state *stream)
4899 {
4900 
4901 	int i;
4902 	struct dc_stream_status *stream_status;
4903 	const struct dc_state *context = dc->current_state;
4904 
4905 	for (i = 0; i < surface_count; i++) {
4906 		if (srf_updates &&
4907 				(srf_updates[i].plane_info ||
4908 				srf_updates[i].scaling_info ||
4909 				(srf_updates[i].hdr_mult.value &&
4910 				srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
4911 				(srf_updates[i].sdr_white_level_nits &&
4912 				srf_updates[i].sdr_white_level_nits != srf_updates->surface->sdr_white_level_nits) ||
4913 				srf_updates[i].in_transfer_func ||
4914 				srf_updates[i].func_shaper ||
4915 				srf_updates[i].lut3d_func ||
4916 				srf_updates[i].surface->force_full_update ||
4917 				(srf_updates[i].flip_addr &&
4918 				srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
4919 				(srf_updates[i].cm2_params &&
4920 				 (srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting ||
4921 				  srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)) ||
4922 				!is_surface_in_context(context, srf_updates[i].surface)))
4923 			return true;
4924 	}
4925 
4926 	if (stream_update &&
4927 			(((stream_update->src.height != 0 && stream_update->src.width != 0) ||
4928 			(stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
4929 			stream_update->integer_scaling_update) ||
4930 			stream_update->hdr_static_metadata ||
4931 			stream_update->abm_level ||
4932 			stream_update->periodic_interrupt ||
4933 			stream_update->vrr_infopacket ||
4934 			stream_update->vsc_infopacket ||
4935 			stream_update->vsp_infopacket ||
4936 			stream_update->hfvsif_infopacket ||
4937 			stream_update->vtem_infopacket ||
4938 			stream_update->adaptive_sync_infopacket ||
4939 			stream_update->dpms_off ||
4940 			stream_update->allow_freesync ||
4941 			stream_update->vrr_active_variable ||
4942 			stream_update->vrr_active_fixed ||
4943 			stream_update->gamut_remap ||
4944 			stream_update->output_color_space ||
4945 			stream_update->dither_option ||
4946 			stream_update->wb_update ||
4947 			stream_update->dsc_config ||
4948 			stream_update->mst_bw_update ||
4949 			stream_update->func_shaper ||
4950 			stream_update->lut3d_func ||
4951 			stream_update->pending_test_pattern ||
4952 			stream_update->crtc_timing_adjust ||
4953 			stream_update->scaler_sharpener_update ||
4954 			stream_update->hw_cursor_req))
4955 		return true;
4956 
4957 	if (stream) {
4958 		stream_status = dc_stream_get_status(stream);
4959 		if (stream_status == NULL || stream_status->plane_count != surface_count)
4960 			return true;
4961 	}
4962 	if (dc->idle_optimizations_allowed)
4963 		return true;
4964 
4965 	return false;
4966 }
4967 
fast_update_only(struct dc * dc,struct dc_fast_update * fast_update,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_update * stream_update,struct dc_stream_state * stream)4968 static bool fast_update_only(struct dc *dc,
4969 		struct dc_fast_update *fast_update,
4970 		struct dc_surface_update *srf_updates,
4971 		int surface_count,
4972 		struct dc_stream_update *stream_update,
4973 		struct dc_stream_state *stream)
4974 {
4975 	return fast_updates_exist(fast_update, surface_count)
4976 			&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
4977 }
4978 
update_planes_and_stream_v1(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,struct dc_state * state)4979 static bool update_planes_and_stream_v1(struct dc *dc,
4980 		struct dc_surface_update *srf_updates, int surface_count,
4981 		struct dc_stream_state *stream,
4982 		struct dc_stream_update *stream_update,
4983 		struct dc_state *state)
4984 {
4985 	const struct dc_stream_status *stream_status;
4986 	enum surface_update_type update_type;
4987 	struct dc_state *context;
4988 	struct dc_context *dc_ctx = dc->ctx;
4989 	int i, j;
4990 	struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4991 
4992 	dc_exit_ips_for_hw_access(dc);
4993 
4994 	populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4995 	stream_status = dc_stream_get_status(stream);
4996 	context = dc->current_state;
4997 
4998 	update_type = dc_check_update_surfaces_for_stream(
4999 				dc, srf_updates, surface_count, stream_update, stream_status);
5000 	/* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
5001 	 * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
5002 	 * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
5003 	 */
5004 	force_immediate_gsl_plane_flip(dc, srf_updates, surface_count);
5005 
5006 	if (update_type >= UPDATE_TYPE_FULL) {
5007 
5008 		/* initialize scratch memory for building context */
5009 		context = dc_state_create_copy(state);
5010 		if (context == NULL) {
5011 			DC_ERROR("Failed to allocate new validate context!\n");
5012 			return false;
5013 		}
5014 
5015 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
5016 			struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
5017 			struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5018 
5019 			if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
5020 				new_pipe->plane_state->force_full_update = true;
5021 		}
5022 	} else if (update_type == UPDATE_TYPE_FAST) {
5023 		/*
5024 		 * Previous frame finished and HW is ready for optimization.
5025 		 */
5026 		dc_post_update_surfaces_to_stream(dc);
5027 	}
5028 
5029 	for (i = 0; i < surface_count; i++) {
5030 		struct dc_plane_state *surface = srf_updates[i].surface;
5031 
5032 		copy_surface_update_to_plane(surface, &srf_updates[i]);
5033 
5034 		if (update_type >= UPDATE_TYPE_MED) {
5035 			for (j = 0; j < dc->res_pool->pipe_count; j++) {
5036 				struct pipe_ctx *pipe_ctx =
5037 					&context->res_ctx.pipe_ctx[j];
5038 
5039 				if (pipe_ctx->plane_state != surface)
5040 					continue;
5041 
5042 				resource_build_scaling_params(pipe_ctx);
5043 			}
5044 		}
5045 	}
5046 
5047 	copy_stream_update_to_stream(dc, context, stream, stream_update);
5048 
5049 	if (update_type >= UPDATE_TYPE_FULL) {
5050 		if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
5051 			DC_ERROR("Mode validation failed for stream update!\n");
5052 			dc_state_release(context);
5053 			return false;
5054 		}
5055 	}
5056 
5057 	TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
5058 
5059 	if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
5060 			!dc->debug.enable_legacy_fast_update) {
5061 		commit_planes_for_stream_fast(dc,
5062 				srf_updates,
5063 				surface_count,
5064 				stream,
5065 				stream_update,
5066 				update_type,
5067 				context);
5068 	} else {
5069 		commit_planes_for_stream(
5070 				dc,
5071 				srf_updates,
5072 				surface_count,
5073 				stream,
5074 				stream_update,
5075 				update_type,
5076 				context);
5077 	}
5078 	/*update current_State*/
5079 	if (dc->current_state != context) {
5080 
5081 		struct dc_state *old = dc->current_state;
5082 
5083 		dc->current_state = context;
5084 		dc_state_release(old);
5085 
5086 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
5087 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
5088 
5089 			if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
5090 				pipe_ctx->plane_state->force_full_update = false;
5091 		}
5092 	}
5093 
5094 	/* Legacy optimization path for DCE. */
5095 	if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
5096 		dc_post_update_surfaces_to_stream(dc);
5097 		TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
5098 	}
5099 	return true;
5100 }
5101 
update_planes_and_stream_v2(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update)5102 static bool update_planes_and_stream_v2(struct dc *dc,
5103 		struct dc_surface_update *srf_updates, int surface_count,
5104 		struct dc_stream_state *stream,
5105 		struct dc_stream_update *stream_update)
5106 {
5107 	struct dc_state *context;
5108 	enum surface_update_type update_type;
5109 	struct dc_fast_update fast_update[MAX_SURFACES] = {0};
5110 
5111 	/* In cases where MPO and split or ODM are used transitions can
5112 	 * cause underflow. Apply stream configuration with minimal pipe
5113 	 * split first to avoid unsupported transitions for active pipes.
5114 	 */
5115 	bool force_minimal_pipe_splitting = 0;
5116 	bool is_plane_addition = 0;
5117 	bool is_fast_update_only;
5118 
5119 	populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
5120 	is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
5121 			surface_count, stream_update, stream);
5122 	force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
5123 			dc,
5124 			stream,
5125 			srf_updates,
5126 			surface_count,
5127 			&is_plane_addition);
5128 
5129 	/* on plane addition, minimal state is the current one */
5130 	if (force_minimal_pipe_splitting && is_plane_addition &&
5131 		!commit_minimal_transition_state(dc, dc->current_state))
5132 		return false;
5133 
5134 	if (!update_planes_and_stream_state(
5135 			dc,
5136 			srf_updates,
5137 			surface_count,
5138 			stream,
5139 			stream_update,
5140 			&update_type,
5141 			&context))
5142 		return false;
5143 
5144 	/* on plane removal, minimal state is the new one */
5145 	if (force_minimal_pipe_splitting && !is_plane_addition) {
5146 		if (!commit_minimal_transition_state(dc, context)) {
5147 			dc_state_release(context);
5148 			return false;
5149 		}
5150 		update_type = UPDATE_TYPE_FULL;
5151 	}
5152 
5153 	if (dc->hwss.is_pipe_topology_transition_seamless &&
5154 			!dc->hwss.is_pipe_topology_transition_seamless(
5155 					dc, dc->current_state, context))
5156 		commit_minimal_transition_state_in_dc_update(dc, context, stream,
5157 				srf_updates, surface_count);
5158 
5159 	if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
5160 		commit_planes_for_stream_fast(dc,
5161 				srf_updates,
5162 				surface_count,
5163 				stream,
5164 				stream_update,
5165 				update_type,
5166 				context);
5167 	} else {
5168 		if (!stream_update &&
5169 				dc->hwss.is_pipe_topology_transition_seamless &&
5170 				!dc->hwss.is_pipe_topology_transition_seamless(
5171 						dc, dc->current_state, context)) {
5172 			DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
5173 			BREAK_TO_DEBUGGER();
5174 		}
5175 		commit_planes_for_stream(
5176 				dc,
5177 				srf_updates,
5178 				surface_count,
5179 				stream,
5180 				stream_update,
5181 				update_type,
5182 				context);
5183 	}
5184 	if (dc->current_state != context)
5185 		swap_and_release_current_context(dc, context, stream);
5186 	return true;
5187 }
5188 
commit_planes_and_stream_update_on_current_context(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type)5189 static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
5190 		struct dc_surface_update *srf_updates, int surface_count,
5191 		struct dc_stream_state *stream,
5192 		struct dc_stream_update *stream_update,
5193 		enum surface_update_type update_type)
5194 {
5195 	struct dc_fast_update fast_update[MAX_SURFACES] = {0};
5196 
5197 	ASSERT(update_type < UPDATE_TYPE_FULL);
5198 	populate_fast_updates(fast_update, srf_updates, surface_count,
5199 			stream_update);
5200 	if (fast_update_only(dc, fast_update, srf_updates, surface_count,
5201 			stream_update, stream) &&
5202 			!dc->debug.enable_legacy_fast_update)
5203 		commit_planes_for_stream_fast(dc,
5204 				srf_updates,
5205 				surface_count,
5206 				stream,
5207 				stream_update,
5208 				update_type,
5209 				dc->current_state);
5210 	else
5211 		commit_planes_for_stream(
5212 				dc,
5213 				srf_updates,
5214 				surface_count,
5215 				stream,
5216 				stream_update,
5217 				update_type,
5218 				dc->current_state);
5219 }
5220 
commit_planes_and_stream_update_with_new_context(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * new_context)5221 static void commit_planes_and_stream_update_with_new_context(struct dc *dc,
5222 		struct dc_surface_update *srf_updates, int surface_count,
5223 		struct dc_stream_state *stream,
5224 		struct dc_stream_update *stream_update,
5225 		enum surface_update_type update_type,
5226 		struct dc_state *new_context)
5227 {
5228 	ASSERT(update_type >= UPDATE_TYPE_FULL);
5229 	if (!dc->hwss.is_pipe_topology_transition_seamless(dc,
5230 			dc->current_state, new_context))
5231 		/*
5232 		 * It is required by the feature design that all pipe topologies
5233 		 * using extra free pipes for power saving purposes such as
5234 		 * dynamic ODM or SubVp shall only be enabled when it can be
5235 		 * transitioned seamlessly to AND from its minimal transition
5236 		 * state. A minimal transition state is defined as the same dc
5237 		 * state but with all power saving features disabled. So it uses
5238 		 * the minimum pipe topology. When we can't seamlessly
5239 		 * transition from state A to state B, we will insert the
5240 		 * minimal transition state A' or B' in between so seamless
5241 		 * transition between A and B can be made possible.
5242 		 */
5243 		commit_minimal_transition_state_in_dc_update(dc, new_context,
5244 				stream, srf_updates, surface_count);
5245 
5246 	commit_planes_for_stream(
5247 			dc,
5248 			srf_updates,
5249 			surface_count,
5250 			stream,
5251 			stream_update,
5252 			update_type,
5253 			new_context);
5254 }
5255 
update_planes_and_stream_v3(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update)5256 static bool update_planes_and_stream_v3(struct dc *dc,
5257 		struct dc_surface_update *srf_updates, int surface_count,
5258 		struct dc_stream_state *stream,
5259 		struct dc_stream_update *stream_update)
5260 {
5261 	struct dc_state *new_context;
5262 	enum surface_update_type update_type;
5263 
5264 	/*
5265 	 * When this function returns true and new_context is not equal to
5266 	 * current state, the function allocates and validates a new dc state
5267 	 * and assigns it to new_context. The function expects that the caller
5268 	 * is responsible to free this memory when new_context is no longer
5269 	 * used. We swap current with new context and free current instead. So
5270 	 * new_context's memory will live until the next full update after it is
5271 	 * replaced by a newer context. Refer to the use of
5272 	 * swap_and_free_current_context below.
5273 	 */
5274 	if (!update_planes_and_stream_state(dc, srf_updates, surface_count,
5275 				stream, stream_update, &update_type,
5276 				&new_context))
5277 		return false;
5278 
5279 	if (new_context == dc->current_state) {
5280 		commit_planes_and_stream_update_on_current_context(dc,
5281 				srf_updates, surface_count, stream,
5282 				stream_update, update_type);
5283 	} else {
5284 		commit_planes_and_stream_update_with_new_context(dc,
5285 				srf_updates, surface_count, stream,
5286 				stream_update, update_type, new_context);
5287 		swap_and_release_current_context(dc, new_context, stream);
5288 	}
5289 
5290 	return true;
5291 }
5292 
clear_update_flags(struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream)5293 static void clear_update_flags(struct dc_surface_update *srf_updates,
5294 	int surface_count, struct dc_stream_state *stream)
5295 {
5296 	int i;
5297 
5298 	if (stream)
5299 		stream->update_flags.raw = 0;
5300 
5301 	for (i = 0; i < surface_count; i++)
5302 		if (srf_updates[i].surface)
5303 			srf_updates[i].surface->update_flags.raw = 0;
5304 }
5305 
dc_update_planes_and_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update)5306 bool dc_update_planes_and_stream(struct dc *dc,
5307 		struct dc_surface_update *srf_updates, int surface_count,
5308 		struct dc_stream_state *stream,
5309 		struct dc_stream_update *stream_update)
5310 {
5311 	bool ret = false;
5312 
5313 	dc_exit_ips_for_hw_access(dc);
5314 	/*
5315 	 * update planes and stream version 3 separates FULL and FAST updates
5316 	 * to their own sequences. It aims to clean up frequent checks for
5317 	 * update type resulting unnecessary branching in logic flow. It also
5318 	 * adds a new commit minimal transition sequence, which detects the need
5319 	 * for minimal transition based on the actual comparison of current and
5320 	 * new states instead of "predicting" it based on per feature software
5321 	 * policy.i.e could_mpcc_tree_change_for_active_pipes. The new commit
5322 	 * minimal transition sequence is made universal to any power saving
5323 	 * features that would use extra free pipes such as Dynamic ODM/MPC
5324 	 * Combine, MPO or SubVp. Therefore there is no longer a need to
5325 	 * specially handle compatibility problems with transitions among those
5326 	 * features as they are now transparent to the new sequence.
5327 	 */
5328 	if (dc->ctx->dce_version >= DCN_VERSION_4_01)
5329 		ret = update_planes_and_stream_v3(dc, srf_updates,
5330 				surface_count, stream, stream_update);
5331 	else
5332 		ret = update_planes_and_stream_v2(dc, srf_updates,
5333 			surface_count, stream, stream_update);
5334 
5335 	if (ret)
5336 		clear_update_flags(srf_updates, surface_count, stream);
5337 
5338 	return ret;
5339 }
5340 
dc_commit_updates_for_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,struct dc_state * state)5341 void dc_commit_updates_for_stream(struct dc *dc,
5342 		struct dc_surface_update *srf_updates,
5343 		int surface_count,
5344 		struct dc_stream_state *stream,
5345 		struct dc_stream_update *stream_update,
5346 		struct dc_state *state)
5347 {
5348 	bool ret = false;
5349 
5350 	dc_exit_ips_for_hw_access(dc);
5351 	/* TODO: Since change commit sequence can have a huge impact,
5352 	 * we decided to only enable it for DCN3x. However, as soon as
5353 	 * we get more confident about this change we'll need to enable
5354 	 * the new sequence for all ASICs.
5355 	 */
5356 	if (dc->ctx->dce_version >= DCN_VERSION_4_01) {
5357 		ret = update_planes_and_stream_v3(dc, srf_updates, surface_count,
5358 				stream, stream_update);
5359 	} else if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
5360 		ret = update_planes_and_stream_v2(dc, srf_updates, surface_count,
5361 				stream, stream_update);
5362 	} else
5363 		ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
5364 				stream_update, state);
5365 
5366 	if (ret)
5367 		clear_update_flags(srf_updates, surface_count, stream);
5368 }
5369 
dc_get_current_stream_count(struct dc * dc)5370 uint8_t dc_get_current_stream_count(struct dc *dc)
5371 {
5372 	return dc->current_state->stream_count;
5373 }
5374 
dc_get_stream_at_index(struct dc * dc,uint8_t i)5375 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
5376 {
5377 	if (i < dc->current_state->stream_count)
5378 		return dc->current_state->streams[i];
5379 	return NULL;
5380 }
5381 
dc_interrupt_to_irq_source(struct dc * dc,uint32_t src_id,uint32_t ext_id)5382 enum dc_irq_source dc_interrupt_to_irq_source(
5383 		struct dc *dc,
5384 		uint32_t src_id,
5385 		uint32_t ext_id)
5386 {
5387 	return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
5388 }
5389 
5390 /*
5391  * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
5392  */
dc_interrupt_set(struct dc * dc,enum dc_irq_source src,bool enable)5393 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
5394 {
5395 
5396 	if (dc == NULL)
5397 		return false;
5398 
5399 	return dal_irq_service_set(dc->res_pool->irqs, src, enable);
5400 }
5401 
dc_interrupt_ack(struct dc * dc,enum dc_irq_source src)5402 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
5403 {
5404 	dal_irq_service_ack(dc->res_pool->irqs, src);
5405 }
5406 
dc_power_down_on_boot(struct dc * dc)5407 void dc_power_down_on_boot(struct dc *dc)
5408 {
5409 	if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
5410 	    dc->hwss.power_down_on_boot) {
5411 		if (dc->caps.ips_support)
5412 			dc_exit_ips_for_hw_access(dc);
5413 		dc->hwss.power_down_on_boot(dc);
5414 	}
5415 }
5416 
dc_set_power_state(struct dc * dc,enum dc_acpi_cm_power_state power_state)5417 void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
5418 {
5419 	if (!dc->current_state)
5420 		return;
5421 
5422 	switch (power_state) {
5423 	case DC_ACPI_CM_POWER_STATE_D0:
5424 		dc_state_construct(dc, dc->current_state);
5425 
5426 		dc_exit_ips_for_hw_access(dc);
5427 
5428 		dc_z10_restore(dc);
5429 
5430 		dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
5431 
5432 		dc->hwss.init_hw(dc);
5433 
5434 		if (dc->hwss.init_sys_ctx != NULL &&
5435 			dc->vm_pa_config.valid) {
5436 			dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
5437 		}
5438 		break;
5439 	default:
5440 		ASSERT(dc->current_state->stream_count == 0);
5441 		dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
5442 
5443 		dc_state_destruct(dc->current_state);
5444 
5445 		break;
5446 	}
5447 }
5448 
dc_resume(struct dc * dc)5449 void dc_resume(struct dc *dc)
5450 {
5451 	uint32_t i;
5452 
5453 	for (i = 0; i < dc->link_count; i++)
5454 		dc->link_srv->resume(dc->links[i]);
5455 }
5456 
dc_is_dmcu_initialized(struct dc * dc)5457 bool dc_is_dmcu_initialized(struct dc *dc)
5458 {
5459 	struct dmcu *dmcu = dc->res_pool->dmcu;
5460 
5461 	if (dmcu)
5462 		return dmcu->funcs->is_dmcu_initialized(dmcu);
5463 	return false;
5464 }
5465 
dc_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)5466 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
5467 {
5468 	if (dc->hwss.set_clock)
5469 		return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
5470 	return DC_ERROR_UNEXPECTED;
5471 }
dc_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)5472 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
5473 {
5474 	if (dc->hwss.get_clock)
5475 		dc->hwss.get_clock(dc, clock_type, clock_cfg);
5476 }
5477 
5478 /* enable/disable eDP PSR without specify stream for eDP */
dc_set_psr_allow_active(struct dc * dc,bool enable)5479 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
5480 {
5481 	int i;
5482 	bool allow_active;
5483 
5484 	for (i = 0; i < dc->current_state->stream_count ; i++) {
5485 		struct dc_link *link;
5486 		struct dc_stream_state *stream = dc->current_state->streams[i];
5487 
5488 		link = stream->link;
5489 		if (!link)
5490 			continue;
5491 
5492 		if (link->psr_settings.psr_feature_enabled) {
5493 			if (enable && !link->psr_settings.psr_allow_active) {
5494 				allow_active = true;
5495 				if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
5496 					return false;
5497 			} else if (!enable && link->psr_settings.psr_allow_active) {
5498 				allow_active = false;
5499 				if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
5500 					return false;
5501 			}
5502 		}
5503 	}
5504 
5505 	return true;
5506 }
5507 
5508 /* enable/disable eDP Replay without specify stream for eDP */
dc_set_replay_allow_active(struct dc * dc,bool active)5509 bool dc_set_replay_allow_active(struct dc *dc, bool active)
5510 {
5511 	int i;
5512 	bool allow_active;
5513 
5514 	for (i = 0; i < dc->current_state->stream_count; i++) {
5515 		struct dc_link *link;
5516 		struct dc_stream_state *stream = dc->current_state->streams[i];
5517 
5518 		link = stream->link;
5519 		if (!link)
5520 			continue;
5521 
5522 		if (link->replay_settings.replay_feature_enabled) {
5523 			if (active && !link->replay_settings.replay_allow_active) {
5524 				allow_active = true;
5525 				if (!dc_link_set_replay_allow_active(link, &allow_active,
5526 					false, false, NULL))
5527 					return false;
5528 			} else if (!active && link->replay_settings.replay_allow_active) {
5529 				allow_active = false;
5530 				if (!dc_link_set_replay_allow_active(link, &allow_active,
5531 					true, false, NULL))
5532 					return false;
5533 			}
5534 		}
5535 	}
5536 
5537 	return true;
5538 }
5539 
5540 /* set IPS disable state */
dc_set_ips_disable(struct dc * dc,unsigned int disable_ips)5541 bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips)
5542 {
5543 	dc_exit_ips_for_hw_access(dc);
5544 
5545 	dc->config.disable_ips = disable_ips;
5546 
5547 	return true;
5548 }
5549 
dc_allow_idle_optimizations_internal(struct dc * dc,bool allow,char const * caller_name)5550 void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name)
5551 {
5552 	int idle_fclk_khz = 0, idle_dramclk_khz = 0, i = 0;
5553 	enum mall_stream_type subvp_pipe_type[MAX_PIPES] = {0};
5554 	struct pipe_ctx *pipe = NULL;
5555 	struct dc_state *context = dc->current_state;
5556 
5557 	if (dc->debug.disable_idle_power_optimizations) {
5558 		DC_LOG_DEBUG("%s: disabled\n", __func__);
5559 		return;
5560 	}
5561 
5562 	if (allow != dc->idle_optimizations_allowed)
5563 		DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__,
5564 			   dc->idle_optimizations_allowed, allow, caller_name);
5565 
5566 	if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
5567 		return;
5568 
5569 	if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
5570 		if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
5571 			return;
5572 
5573 	if (allow == dc->idle_optimizations_allowed)
5574 		return;
5575 
5576 	if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL &&
5577 	    dc->hwss.apply_idle_power_optimizations(dc, allow)) {
5578 		dc->idle_optimizations_allowed = allow;
5579 		DC_LOG_DEBUG("%s: %s\n", __func__, allow ? "enabled" : "disabled");
5580 	}
5581 
5582 	// log idle clocks and sub vp pipe types at idle optimization time
5583 	if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_fclk)
5584 		idle_fclk_khz = dc->clk_mgr->funcs->get_hard_min_fclk(dc->clk_mgr);
5585 
5586 	if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_memclk)
5587 		idle_dramclk_khz = dc->clk_mgr->funcs->get_hard_min_memclk(dc->clk_mgr);
5588 
5589 	if (dc->res_pool && context) {
5590 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
5591 			pipe = &context->res_ctx.pipe_ctx[i];
5592 			subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe);
5593 		}
5594 	}
5595 
5596 	DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n",
5597 			__func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2],
5598 			subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name);
5599 
5600 }
5601 
dc_exit_ips_for_hw_access_internal(struct dc * dc,const char * caller_name)5602 void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name)
5603 {
5604 	if (dc->caps.ips_support)
5605 		dc_allow_idle_optimizations_internal(dc, false, caller_name);
5606 }
5607 
dc_dmub_is_ips_idle_state(struct dc * dc)5608 bool dc_dmub_is_ips_idle_state(struct dc *dc)
5609 {
5610 	if (dc->debug.disable_idle_power_optimizations)
5611 		return false;
5612 
5613 	if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
5614 		return false;
5615 
5616 	if (!dc->ctx->dmub_srv)
5617 		return false;
5618 
5619 	return dc->ctx->dmub_srv->idle_allowed;
5620 }
5621 
5622 /* set min and max memory clock to lowest and highest DPM level, respectively */
dc_unlock_memory_clock_frequency(struct dc * dc)5623 void dc_unlock_memory_clock_frequency(struct dc *dc)
5624 {
5625 	if (dc->clk_mgr->funcs->set_hard_min_memclk)
5626 		dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
5627 
5628 	if (dc->clk_mgr->funcs->set_hard_max_memclk)
5629 		dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
5630 }
5631 
5632 /* set min memory clock to the min required for current mode, max to maxDPM */
dc_lock_memory_clock_frequency(struct dc * dc)5633 void dc_lock_memory_clock_frequency(struct dc *dc)
5634 {
5635 	if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
5636 		dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
5637 
5638 	if (dc->clk_mgr->funcs->set_hard_min_memclk)
5639 		dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
5640 
5641 	if (dc->clk_mgr->funcs->set_hard_max_memclk)
5642 		dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
5643 }
5644 
blank_and_force_memclk(struct dc * dc,bool apply,unsigned int memclk_mhz)5645 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
5646 {
5647 	struct dc_state *context = dc->current_state;
5648 	struct hubp *hubp;
5649 	struct pipe_ctx *pipe;
5650 	int i;
5651 
5652 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
5653 		pipe = &context->res_ctx.pipe_ctx[i];
5654 
5655 		if (pipe->stream != NULL) {
5656 			dc->hwss.disable_pixel_data(dc, pipe, true);
5657 
5658 			// wait for double buffer
5659 			pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
5660 			pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
5661 			pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
5662 
5663 			hubp = pipe->plane_res.hubp;
5664 			hubp->funcs->set_blank_regs(hubp, true);
5665 		}
5666 	}
5667 	if (dc->clk_mgr->funcs->set_max_memclk)
5668 		dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
5669 	if (dc->clk_mgr->funcs->set_min_memclk)
5670 		dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
5671 
5672 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
5673 		pipe = &context->res_ctx.pipe_ctx[i];
5674 
5675 		if (pipe->stream != NULL) {
5676 			dc->hwss.disable_pixel_data(dc, pipe, false);
5677 
5678 			hubp = pipe->plane_res.hubp;
5679 			hubp->funcs->set_blank_regs(hubp, false);
5680 		}
5681 	}
5682 }
5683 
5684 
5685 /**
5686  * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
5687  * @dc: pointer to dc of the dm calling this
5688  * @enable: True = transition to DC mode, false = transition back to AC mode
5689  *
5690  * Some SoCs define additional clock limits when in DC mode, DM should
5691  * invoke this function when the platform undergoes a power source transition
5692  * so DC can apply/unapply the limit. This interface may be disruptive to
5693  * the onscreen content.
5694  *
5695  * Context: Triggered by OS through DM interface, or manually by escape calls.
5696  * Need to hold a dclock when doing so.
5697  *
5698  * Return: none (void function)
5699  *
5700  */
dc_enable_dcmode_clk_limit(struct dc * dc,bool enable)5701 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
5702 {
5703 	unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i;
5704 	bool p_state_change_support;
5705 
5706 	if (!dc->config.dc_mode_clk_limit_support)
5707 		return;
5708 
5709 	softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
5710 	for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) {
5711 		if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM)
5712 			maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
5713 	}
5714 	funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
5715 	p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
5716 
5717 	if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
5718 		if (p_state_change_support) {
5719 			if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk)
5720 				dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
5721 			// else: No-Op
5722 		} else {
5723 			if (funcMin <= softMax)
5724 				blank_and_force_memclk(dc, true, softMax);
5725 			// else: No-Op
5726 		}
5727 	} else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
5728 		if (p_state_change_support) {
5729 			if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk)
5730 				dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
5731 			// else: No-Op
5732 		} else {
5733 			if (funcMin <= softMax)
5734 				blank_and_force_memclk(dc, true, maxDPM);
5735 			// else: No-Op
5736 		}
5737 	}
5738 	dc->clk_mgr->dc_mode_softmax_enabled = enable;
5739 }
dc_is_plane_eligible_for_idle_optimizations(struct dc * dc,unsigned int pitch,unsigned int height,enum surface_pixel_format format,struct dc_cursor_attributes * cursor_attr)5740 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
5741 		unsigned int pitch,
5742 		unsigned int height,
5743 		enum surface_pixel_format format,
5744 		struct dc_cursor_attributes *cursor_attr)
5745 {
5746 	if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, pitch, height, format, cursor_attr))
5747 		return true;
5748 	return false;
5749 }
5750 
5751 /* cleanup on driver unload */
dc_hardware_release(struct dc * dc)5752 void dc_hardware_release(struct dc *dc)
5753 {
5754 	dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
5755 
5756 	if (dc->hwss.hardware_release)
5757 		dc->hwss.hardware_release(dc);
5758 }
5759 
dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc * dc)5760 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
5761 {
5762 	if (dc->current_state)
5763 		dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
5764 }
5765 
5766 /**
5767  * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
5768  *
5769  * @dc: [in] dc structure
5770  *
5771  * Checks whether DMUB FW supports outbox notifications, if supported DM
5772  * should register outbox interrupt prior to actually enabling interrupts
5773  * via dc_enable_dmub_outbox
5774  *
5775  * Return:
5776  * True if DMUB FW supports outbox notifications, False otherwise
5777  */
dc_is_dmub_outbox_supported(struct dc * dc)5778 bool dc_is_dmub_outbox_supported(struct dc *dc)
5779 {
5780 	if (!dc->caps.dmcub_support)
5781 		return false;
5782 
5783 	switch (dc->ctx->asic_id.chip_family) {
5784 
5785 	case FAMILY_YELLOW_CARP:
5786 		/* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
5787 		if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
5788 		    !dc->debug.dpia_debug.bits.disable_dpia)
5789 			return true;
5790 	break;
5791 
5792 	case AMDGPU_FAMILY_GC_11_0_1:
5793 	case AMDGPU_FAMILY_GC_11_5_0:
5794 		if (!dc->debug.dpia_debug.bits.disable_dpia)
5795 			return true;
5796 	break;
5797 
5798 	default:
5799 		break;
5800 	}
5801 
5802 	/* dmub aux needs dmub notifications to be enabled */
5803 	return dc->debug.enable_dmub_aux_for_legacy_ddc;
5804 
5805 }
5806 
5807 /**
5808  * dc_enable_dmub_notifications - Check if dmub fw supports outbox
5809  *
5810  * @dc: [in] dc structure
5811  *
5812  * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
5813  * notifications. All DMs shall switch to dc_is_dmub_outbox_supported.  This
5814  * API shall be removed after switching.
5815  *
5816  * Return:
5817  * True if DMUB FW supports outbox notifications, False otherwise
5818  */
dc_enable_dmub_notifications(struct dc * dc)5819 bool dc_enable_dmub_notifications(struct dc *dc)
5820 {
5821 	return dc_is_dmub_outbox_supported(dc);
5822 }
5823 
5824 /**
5825  * dc_enable_dmub_outbox - Enables DMUB unsolicited notification
5826  *
5827  * @dc: [in] dc structure
5828  *
5829  * Enables DMUB unsolicited notifications to x86 via outbox.
5830  */
dc_enable_dmub_outbox(struct dc * dc)5831 void dc_enable_dmub_outbox(struct dc *dc)
5832 {
5833 	struct dc_context *dc_ctx = dc->ctx;
5834 
5835 	dmub_enable_outbox_notification(dc_ctx->dmub_srv);
5836 	DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
5837 }
5838 
5839 /**
5840  * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
5841  *                                      Sets port index appropriately for legacy DDC
5842  * @dc: dc structure
5843  * @link_index: link index
5844  * @payload: aux payload
5845  *
5846  * Returns: True if successful, False if failure
5847  */
dc_process_dmub_aux_transfer_async(struct dc * dc,uint32_t link_index,struct aux_payload * payload)5848 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
5849 				uint32_t link_index,
5850 				struct aux_payload *payload)
5851 {
5852 	uint8_t action;
5853 	union dmub_rb_cmd cmd = {0};
5854 
5855 	ASSERT(payload->length <= 16);
5856 
5857 	cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
5858 	cmd.dp_aux_access.header.payload_bytes = 0;
5859 	/* For dpia, ddc_pin is set to NULL */
5860 	if (!dc->links[link_index]->ddc->ddc_pin)
5861 		cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
5862 	else
5863 		cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
5864 
5865 	cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
5866 	cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
5867 	cmd.dp_aux_access.aux_control.timeout = 0;
5868 	cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
5869 	cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
5870 	cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
5871 
5872 	/* set aux action */
5873 	if (payload->i2c_over_aux) {
5874 		if (payload->write) {
5875 			if (payload->mot)
5876 				action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
5877 			else
5878 				action = DP_AUX_REQ_ACTION_I2C_WRITE;
5879 		} else {
5880 			if (payload->mot)
5881 				action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
5882 			else
5883 				action = DP_AUX_REQ_ACTION_I2C_READ;
5884 			}
5885 	} else {
5886 		if (payload->write)
5887 			action = DP_AUX_REQ_ACTION_DPCD_WRITE;
5888 		else
5889 			action = DP_AUX_REQ_ACTION_DPCD_READ;
5890 	}
5891 
5892 	cmd.dp_aux_access.aux_control.dpaux.action = action;
5893 
5894 	if (payload->length && payload->write) {
5895 		memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
5896 			payload->data,
5897 			payload->length
5898 			);
5899 	}
5900 
5901 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5902 
5903 	return true;
5904 }
5905 
get_link_index_from_dpia_port_index(const struct dc * dc,uint8_t dpia_port_index)5906 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
5907 					    uint8_t dpia_port_index)
5908 {
5909 	uint8_t index, link_index = 0xFF;
5910 
5911 	for (index = 0; index < dc->link_count; index++) {
5912 		/* ddc_hw_inst has dpia port index for dpia links
5913 		 * and ddc instance for legacy links
5914 		 */
5915 		if (!dc->links[index]->ddc->ddc_pin) {
5916 			if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
5917 				link_index = index;
5918 				break;
5919 			}
5920 		}
5921 	}
5922 	ASSERT(link_index != 0xFF);
5923 	return link_index;
5924 }
5925 
5926 /**
5927  * dc_process_dmub_set_config_async - Submits set_config command
5928  *
5929  * @dc: [in] dc structure
5930  * @link_index: [in] link_index: link index
5931  * @payload: [in] aux payload
5932  * @notify: [out] set_config immediate reply
5933  *
5934  * Submits set_config command to dmub via inbox message.
5935  *
5936  * Return:
5937  * True if successful, False if failure
5938  */
dc_process_dmub_set_config_async(struct dc * dc,uint32_t link_index,struct set_config_cmd_payload * payload,struct dmub_notification * notify)5939 bool dc_process_dmub_set_config_async(struct dc *dc,
5940 				uint32_t link_index,
5941 				struct set_config_cmd_payload *payload,
5942 				struct dmub_notification *notify)
5943 {
5944 	union dmub_rb_cmd cmd = {0};
5945 	bool is_cmd_complete = true;
5946 
5947 	/* prepare SET_CONFIG command */
5948 	cmd.set_config_access.header.type = DMUB_CMD__DPIA;
5949 	cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
5950 
5951 	cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
5952 	cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
5953 	cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
5954 
5955 	if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
5956 		/* command is not processed by dmub */
5957 		notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
5958 		return is_cmd_complete;
5959 	}
5960 
5961 	/* command processed by dmub, if ret_status is 1, it is completed instantly */
5962 	if (cmd.set_config_access.header.ret_status == 1)
5963 		notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
5964 	else
5965 		/* cmd pending, will receive notification via outbox */
5966 		is_cmd_complete = false;
5967 
5968 	return is_cmd_complete;
5969 }
5970 
5971 /**
5972  * dc_process_dmub_set_mst_slots - Submits MST solt allocation
5973  *
5974  * @dc: [in] dc structure
5975  * @link_index: [in] link index
5976  * @mst_alloc_slots: [in] mst slots to be allotted
5977  * @mst_slots_in_use: [out] mst slots in use returned in failure case
5978  *
5979  * Submits mst slot allocation command to dmub via inbox message
5980  *
5981  * Return:
5982  * DC_OK if successful, DC_ERROR if failure
5983  */
dc_process_dmub_set_mst_slots(const struct dc * dc,uint32_t link_index,uint8_t mst_alloc_slots,uint8_t * mst_slots_in_use)5984 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
5985 				uint32_t link_index,
5986 				uint8_t mst_alloc_slots,
5987 				uint8_t *mst_slots_in_use)
5988 {
5989 	union dmub_rb_cmd cmd = {0};
5990 
5991 	/* prepare MST_ALLOC_SLOTS command */
5992 	cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
5993 	cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
5994 
5995 	cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
5996 	cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
5997 
5998 	if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
5999 		/* command is not processed by dmub */
6000 		return DC_ERROR_UNEXPECTED;
6001 
6002 	/* command processed by dmub, if ret_status is 1 */
6003 	if (cmd.set_config_access.header.ret_status != 1)
6004 		/* command processing error */
6005 		return DC_ERROR_UNEXPECTED;
6006 
6007 	/* command processed and we have a status of 2, mst not enabled in dpia */
6008 	if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
6009 		return DC_FAIL_UNSUPPORTED_1;
6010 
6011 	/* previously configured mst alloc and used slots did not match */
6012 	if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
6013 		*mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
6014 		return DC_NOT_SUPPORTED;
6015 	}
6016 
6017 	return DC_OK;
6018 }
6019 
6020 /**
6021  * dc_process_dmub_dpia_set_tps_notification - Submits tps notification
6022  *
6023  * @dc: [in] dc structure
6024  * @link_index: [in] link index
6025  * @tps: [in] request tps
6026  *
6027  * Submits set_tps_notification command to dmub via inbox message
6028  */
dc_process_dmub_dpia_set_tps_notification(const struct dc * dc,uint32_t link_index,uint8_t tps)6029 void dc_process_dmub_dpia_set_tps_notification(const struct dc *dc, uint32_t link_index, uint8_t tps)
6030 {
6031 	union dmub_rb_cmd cmd = {0};
6032 
6033 	cmd.set_tps_notification.header.type = DMUB_CMD__DPIA;
6034 	cmd.set_tps_notification.header.sub_type = DMUB_CMD__DPIA_SET_TPS_NOTIFICATION;
6035 	cmd.set_tps_notification.tps_notification.instance = dc->links[link_index]->ddc_hw_inst;
6036 	cmd.set_tps_notification.tps_notification.tps = tps;
6037 
6038 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
6039 }
6040 
6041 /**
6042  * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
6043  *
6044  * @dc: [in] dc structure
6045  * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
6046  *
6047  * Submits dpia hpd int enable command to dmub via inbox message
6048  */
dc_process_dmub_dpia_hpd_int_enable(const struct dc * dc,uint32_t hpd_int_enable)6049 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
6050 				uint32_t hpd_int_enable)
6051 {
6052 	union dmub_rb_cmd cmd = {0};
6053 
6054 	cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
6055 	cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
6056 
6057 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
6058 
6059 	DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
6060 }
6061 
6062 /**
6063  * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging
6064  *
6065  * @dc: [in] dc structure
6066  *
6067  *
6068  */
dc_print_dmub_diagnostic_data(const struct dc * dc)6069 void dc_print_dmub_diagnostic_data(const struct dc *dc)
6070 {
6071 	dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv);
6072 }
6073 
6074 /**
6075  * dc_disable_accelerated_mode - disable accelerated mode
6076  * @dc: dc structure
6077  */
dc_disable_accelerated_mode(struct dc * dc)6078 void dc_disable_accelerated_mode(struct dc *dc)
6079 {
6080 	bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
6081 }
6082 
6083 
6084 /**
6085  *  dc_notify_vsync_int_state - notifies vsync enable/disable state
6086  *  @dc: dc structure
6087  *  @stream: stream where vsync int state changed
6088  *  @enable: whether vsync is enabled or disabled
6089  *
6090  *  Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
6091  *  interrupts after steady state is reached.
6092  */
dc_notify_vsync_int_state(struct dc * dc,struct dc_stream_state * stream,bool enable)6093 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
6094 {
6095 	int i;
6096 	int edp_num;
6097 	struct pipe_ctx *pipe = NULL;
6098 	struct dc_link *link = stream->sink->link;
6099 	struct dc_link *edp_links[MAX_NUM_EDP];
6100 
6101 
6102 	if (link->psr_settings.psr_feature_enabled)
6103 		return;
6104 
6105 	if (link->replay_settings.replay_feature_enabled)
6106 		return;
6107 
6108 	/*find primary pipe associated with stream*/
6109 	for (i = 0; i < MAX_PIPES; i++) {
6110 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
6111 
6112 		if (pipe->stream == stream && pipe->stream_res.tg)
6113 			break;
6114 	}
6115 
6116 	if (i == MAX_PIPES) {
6117 		ASSERT(0);
6118 		return;
6119 	}
6120 
6121 	dc_get_edp_links(dc, edp_links, &edp_num);
6122 
6123 	/* Determine panel inst */
6124 	for (i = 0; i < edp_num; i++) {
6125 		if (edp_links[i] == link)
6126 			break;
6127 	}
6128 
6129 	if (i == edp_num) {
6130 		return;
6131 	}
6132 
6133 	if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
6134 		pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
6135 }
6136 
6137 /*****************************************************************************
6138  *  dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause
6139  *                          ABM
6140  *  @dc: dc structure
6141  *	@stream: stream where vsync int state changed
6142  *  @pData: abm hw states
6143  *
6144  ****************************************************************************/
dc_abm_save_restore(struct dc * dc,struct dc_stream_state * stream,struct abm_save_restore * pData)6145 bool dc_abm_save_restore(
6146 		struct dc *dc,
6147 		struct dc_stream_state *stream,
6148 		struct abm_save_restore *pData)
6149 {
6150 	int i;
6151 	int edp_num;
6152 	struct pipe_ctx *pipe = NULL;
6153 	struct dc_link *link = stream->sink->link;
6154 	struct dc_link *edp_links[MAX_NUM_EDP];
6155 
6156 	if (link->replay_settings.replay_feature_enabled)
6157 		return false;
6158 
6159 	/*find primary pipe associated with stream*/
6160 	for (i = 0; i < MAX_PIPES; i++) {
6161 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
6162 
6163 		if (pipe->stream == stream && pipe->stream_res.tg)
6164 			break;
6165 	}
6166 
6167 	if (i == MAX_PIPES) {
6168 		ASSERT(0);
6169 		return false;
6170 	}
6171 
6172 	dc_get_edp_links(dc, edp_links, &edp_num);
6173 
6174 	/* Determine panel inst */
6175 	for (i = 0; i < edp_num; i++)
6176 		if (edp_links[i] == link)
6177 			break;
6178 
6179 	if (i == edp_num)
6180 		return false;
6181 
6182 	if (pipe->stream_res.abm &&
6183 		pipe->stream_res.abm->funcs->save_restore)
6184 		return pipe->stream_res.abm->funcs->save_restore(
6185 				pipe->stream_res.abm,
6186 				i,
6187 				pData);
6188 	return false;
6189 }
6190 
dc_query_current_properties(struct dc * dc,struct dc_current_properties * properties)6191 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
6192 {
6193 	unsigned int i;
6194 	bool subvp_sw_cursor_req = false;
6195 
6196 	for (i = 0; i < dc->current_state->stream_count; i++) {
6197 		if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i]) && !dc->current_state->streams[i]->hw_cursor_req) {
6198 			subvp_sw_cursor_req = true;
6199 			break;
6200 		}
6201 	}
6202 	properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size;
6203 }
6204 
6205 /**
6206  * dc_set_edp_power() - DM controls eDP power to be ON/OFF
6207  *
6208  * Called when DM wants to power on/off eDP.
6209  *     Only work on links with flag skip_implict_edp_power_control is set.
6210  *
6211  * @dc: Current DC state
6212  * @edp_link: a link with eDP connector signal type
6213  * @powerOn: power on/off eDP
6214  *
6215  * Return: void
6216  */
dc_set_edp_power(const struct dc * dc,struct dc_link * edp_link,bool powerOn)6217 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
6218 				 bool powerOn)
6219 {
6220 	if (edp_link->connector_signal != SIGNAL_TYPE_EDP)
6221 		return;
6222 
6223 	if (edp_link->skip_implict_edp_power_control == false)
6224 		return;
6225 
6226 	edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
6227 }
6228 
6229 /*
6230  *****************************************************************************
6231  * dc_get_power_profile_for_dc_state() - extracts power profile from dc state
6232  *
6233  * Called when DM wants to make power policy decisions based on dc_state
6234  *
6235  *****************************************************************************
6236  */
dc_get_power_profile_for_dc_state(const struct dc_state * context)6237 struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context)
6238 {
6239 	struct dc_power_profile profile = { 0 };
6240 
6241 	profile.power_level = !context->bw_ctx.bw.dcn.clk.p_state_change_support;
6242 	if (!context->clk_mgr || !context->clk_mgr->ctx || !context->clk_mgr->ctx->dc)
6243 		return profile;
6244 	struct dc *dc = context->clk_mgr->ctx->dc;
6245 
6246 	if (dc->res_pool->funcs->get_power_profile)
6247 		profile.power_level = dc->res_pool->funcs->get_power_profile(context);
6248 	return profile;
6249 }
6250 
6251 /*
6252  **********************************************************************************
6253  * dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state
6254  *
6255  * Called when DM wants to log detile buffer size from dc_state
6256  *
6257  **********************************************************************************
6258  */
dc_get_det_buffer_size_from_state(const struct dc_state * context)6259 unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
6260 {
6261 	struct dc *dc = context->clk_mgr->ctx->dc;
6262 
6263 	if (dc->res_pool->funcs->get_det_buffer_size)
6264 		return dc->res_pool->funcs->get_det_buffer_size(context);
6265 	else
6266 		return 0;
6267 }
6268