xref: /linux/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: AMD
24  *
25  */
26 
27 #include "display_mode_core.h"
28 #include "dml2_internal_types.h"
29 #include "dml2_utils.h"
30 #include "dml2_policy.h"
31 #include "dml2_translation_helper.h"
32 #include "dml2_mall_phantom.h"
33 #include "dml2_dc_resource_mgmt.h"
34 #include "dml21_wrapper.h"
35 
initialize_dml2_ip_params(struct dml2_context * dml2,const struct dc * in_dc,struct ip_params_st * out)36 static void initialize_dml2_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out)
37 {
38 	if (dml2->config.use_native_soc_bb_construction)
39 		dml2_init_ip_params(dml2, in_dc, out);
40 	else
41 		dml2_translate_ip_params(in_dc, out);
42 }
43 
initialize_dml2_soc_bbox(struct dml2_context * dml2,const struct dc * in_dc,struct soc_bounding_box_st * out)44 static void initialize_dml2_soc_bbox(struct dml2_context *dml2, const struct dc *in_dc, struct soc_bounding_box_st *out)
45 {
46 	if (dml2->config.use_native_soc_bb_construction)
47 		dml2_init_socbb_params(dml2, in_dc, out);
48 	else
49 		dml2_translate_socbb_params(in_dc, out);
50 }
51 
initialize_dml2_soc_states(struct dml2_context * dml2,const struct dc * in_dc,const struct soc_bounding_box_st * in_bbox,struct soc_states_st * out)52 static void initialize_dml2_soc_states(struct dml2_context *dml2,
53 	const struct dc *in_dc, const struct soc_bounding_box_st *in_bbox, struct soc_states_st *out)
54 {
55 	if (dml2->config.use_native_soc_bb_construction)
56 		dml2_init_soc_states(dml2, in_dc, in_bbox, out);
57 	else
58 		dml2_translate_soc_states(in_dc, out, in_dc->dml.soc.num_states);
59 }
60 
map_hw_resources(struct dml2_context * dml2,struct dml_display_cfg_st * in_out_display_cfg,struct dml_mode_support_info_st * mode_support_info)61 static void map_hw_resources(struct dml2_context *dml2,
62 		struct dml_display_cfg_st *in_out_display_cfg, struct dml_mode_support_info_st *mode_support_info)
63 {
64 	unsigned int num_pipes = 0;
65 	int i, j;
66 
67 	for (i = 0; i < __DML_NUM_PLANES__; i++) {
68 		in_out_display_cfg->hw.ODMMode[i] = mode_support_info->ODMMode[i];
69 		in_out_display_cfg->hw.DPPPerSurface[i] = mode_support_info->DPPPerSurface[i];
70 		in_out_display_cfg->hw.DSCEnabled[i] = mode_support_info->DSCEnabled[i];
71 		in_out_display_cfg->hw.NumberOfDSCSlices[i] = mode_support_info->NumberOfDSCSlices[i];
72 		in_out_display_cfg->hw.DLGRefClkFreqMHz = 24;
73 		if (dml2->v20.dml_core_ctx.project != dml_project_dcn35 &&
74 			dml2->v20.dml_core_ctx.project != dml_project_dcn36 &&
75 			dml2->v20.dml_core_ctx.project != dml_project_dcn351) {
76 			/*dGPU default as 50Mhz*/
77 			in_out_display_cfg->hw.DLGRefClkFreqMHz = 50;
78 		}
79 		for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) {
80 			if (i >= __DML2_WRAPPER_MAX_STREAMS_PLANES__) {
81 				dml_print("DML::%s: Index out of bounds: i=%d, __DML2_WRAPPER_MAX_STREAMS_PLANES__=%d\n",
82 					  __func__, i, __DML2_WRAPPER_MAX_STREAMS_PLANES__);
83 				break;
84 			}
85 			dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i];
86 			dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true;
87 			dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i];
88 			dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id_valid[num_pipes] = true;
89 			num_pipes++;
90 		}
91 	}
92 }
93 
pack_and_call_dml_mode_support_ex(struct dml2_context * dml2,const struct dml_display_cfg_st * display_cfg,struct dml_mode_support_info_st * evaluation_info,enum dc_validate_mode validate_mode)94 static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2,
95 	const struct dml_display_cfg_st *display_cfg,
96 	struct dml_mode_support_info_st *evaluation_info,
97 	enum dc_validate_mode validate_mode)
98 {
99 	struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
100 
101 	s->mode_support_params.mode_lib = &dml2->v20.dml_core_ctx;
102 	s->mode_support_params.in_display_cfg = display_cfg;
103 	if (validate_mode == DC_VALIDATE_MODE_ONLY)
104 		s->mode_support_params.in_start_state_idx = dml2->v20.dml_core_ctx.states.num_states - 1;
105 	else
106 		s->mode_support_params.in_start_state_idx = 0;
107 	s->mode_support_params.out_evaluation_info = evaluation_info;
108 
109 	memset(evaluation_info, 0, sizeof(struct dml_mode_support_info_st));
110 	s->mode_support_params.out_lowest_state_idx = 0;
111 
112 	return dml_mode_support_ex(&s->mode_support_params);
113 }
114 
optimize_configuration(struct dml2_context * dml2,struct dml2_wrapper_optimize_configuration_params * p)115 static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrapper_optimize_configuration_params *p)
116 {
117 	int unused_dpps = p->ip_params->max_num_dpp;
118 	int i;
119 	int odms_needed;
120 	int largest_blend_and_timing = 0;
121 	bool optimization_done = false;
122 
123 	for (i = 0; i < (int) p->cur_display_config->num_timings; i++) {
124 		if (p->cur_display_config->plane.BlendingAndTiming[i] > largest_blend_and_timing)
125 			largest_blend_and_timing = p->cur_display_config->plane.BlendingAndTiming[i];
126 	}
127 
128 	if (p->new_policy != p->cur_policy)
129 		*p->new_policy = *p->cur_policy;
130 
131 	if (p->new_display_config != p->cur_display_config)
132 		*p->new_display_config = *p->cur_display_config;
133 
134 
135 	// Optimize Clocks
136 	if (!optimization_done) {
137 		if (largest_blend_and_timing == 0 && p->cur_policy->ODMUse[0] == dml_odm_use_policy_combine_as_needed && dml2->config.minimize_dispclk_using_odm) {
138 			odms_needed = dml2_util_get_maximum_odm_combine_for_output(dml2->config.optimize_odm_4to1,
139 				p->cur_display_config->output.OutputEncoder[0], p->cur_mode_support_info->DSCEnabled[0]) - 1;
140 
141 			if (odms_needed <= unused_dpps) {
142 				if (odms_needed == 1) {
143 					p->new_policy->ODMUse[0] = dml_odm_use_policy_combine_2to1;
144 					optimization_done = true;
145 				} else if (odms_needed == 3) {
146 					p->new_policy->ODMUse[0] = dml_odm_use_policy_combine_4to1;
147 					optimization_done = true;
148 				} else
149 					optimization_done = false;
150 			}
151 		}
152 	}
153 
154 	return optimization_done;
155 }
156 
calculate_lowest_supported_state_for_temp_read(struct dml2_context * dml2,struct dc_state * display_state,enum dc_validate_mode validate_mode)157 static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state,
158 		enum dc_validate_mode validate_mode)
159 {
160 	struct dml2_calculate_lowest_supported_state_for_temp_read_scratch *s = &dml2->v20.scratch.dml2_calculate_lowest_supported_state_for_temp_read_scratch;
161 	struct dml2_wrapper_scratch *s_global = &dml2->v20.scratch;
162 
163 	unsigned int dml_result = 0;
164 	int result = -1, i, j;
165 
166 	build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
167 
168 	/* Zero out before each call before proceeding */
169 	memset(s, 0, sizeof(struct dml2_calculate_lowest_supported_state_for_temp_read_scratch));
170 	memset(&s_global->mode_support_params, 0, sizeof(struct dml_mode_support_ex_params_st));
171 	memset(&s_global->dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
172 
173 	for (i = 0; i < dml2->config.dcn_pipe_count; i++) {
174 		/* Calling resource_build_scaling_params will populate the pipe params
175 		 * with the necessary information needed for correct DML calculations
176 		 * This is also done in DML1 driver code path and hence display_state
177 		 * cannot be const.
178 		 */
179 		struct pipe_ctx *pipe = &display_state->res_ctx.pipe_ctx[i];
180 
181 		if (pipe->plane_state) {
182 			if (!dml2->config.callbacks.build_scaling_params(pipe)) {
183 				ASSERT(false);
184 				return false;
185 			}
186 		}
187 	}
188 
189 	map_dc_state_into_dml_display_cfg(dml2, display_state, &s->cur_display_config);
190 
191 	for (i = 0; i < dml2->v20.dml_core_ctx.states.num_states; i++) {
192 		s->uclk_change_latencies[i] = dml2->v20.dml_core_ctx.states.state_array[i].dram_clock_change_latency_us;
193 	}
194 
195 	for (i = 0; i < 4; i++) {
196 		for (j = 0; j < dml2->v20.dml_core_ctx.states.num_states; j++) {
197 			dml2->v20.dml_core_ctx.states.state_array[j].dram_clock_change_latency_us = s_global->dummy_pstate_table[i].dummy_pstate_latency_us;
198 		}
199 
200 		dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info,
201 						validate_mode);
202 
203 		if (dml_result && s->evaluation_info.DRAMClockChangeSupport[0] == dml_dram_clock_change_vactive) {
204 			map_hw_resources(dml2, &s->cur_display_config, &s->evaluation_info);
205 			dml_result = dml_mode_programming(&dml2->v20.dml_core_ctx, s_global->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
206 
207 			ASSERT(dml_result);
208 
209 			dml2_extract_watermark_set(&dml2->v20.g6_temp_read_watermark_set, &dml2->v20.dml_core_ctx);
210 			dml2->v20.g6_temp_read_watermark_set.cstate_pstate.fclk_pstate_change_ns = dml2->v20.g6_temp_read_watermark_set.cstate_pstate.pstate_change_ns;
211 
212 			result = s_global->mode_support_params.out_lowest_state_idx;
213 
214 			while (dml2->v20.dml_core_ctx.states.state_array[result].dram_speed_mts < s_global->dummy_pstate_table[i].dram_speed_mts)
215 				result++;
216 
217 			break;
218 		}
219 	}
220 
221 	for (i = 0; i < dml2->v20.dml_core_ctx.states.num_states; i++) {
222 		dml2->v20.dml_core_ctx.states.state_array[i].dram_clock_change_latency_us = s->uclk_change_latencies[i];
223 	}
224 
225 	return result;
226 }
227 
copy_dummy_pstate_table(struct dummy_pstate_entry * dest,struct dummy_pstate_entry * src,unsigned int num_entries)228 static void copy_dummy_pstate_table(struct dummy_pstate_entry *dest, struct dummy_pstate_entry *src, unsigned int num_entries)
229 {
230 	for (int i = 0; i < num_entries; i++) {
231 		dest[i] = src[i];
232 	}
233 }
234 
are_timings_requiring_odm_doing_blending(const struct dml_display_cfg_st * display_cfg,const struct dml_mode_support_info_st * evaluation_info)235 static bool are_timings_requiring_odm_doing_blending(const struct dml_display_cfg_st *display_cfg,
236 		const struct dml_mode_support_info_st *evaluation_info)
237 {
238 	unsigned int planes_per_timing[__DML_NUM_PLANES__] = {0};
239 	int i;
240 
241 	for (i = 0; i < display_cfg->num_surfaces; i++)
242 		planes_per_timing[display_cfg->plane.BlendingAndTiming[i]]++;
243 
244 	for (i = 0; i < __DML_NUM_PLANES__; i++) {
245 		if (planes_per_timing[i] > 1 && evaluation_info->ODMMode[i] != dml_odm_mode_bypass)
246 			return true;
247 	}
248 
249 	return false;
250 }
251 
does_configuration_meet_sw_policies(struct dml2_context * ctx,const struct dml_display_cfg_st * display_cfg,const struct dml_mode_support_info_st * evaluation_info)252 static bool does_configuration_meet_sw_policies(struct dml2_context *ctx, const struct dml_display_cfg_st *display_cfg,
253 	const struct dml_mode_support_info_st *evaluation_info)
254 {
255 	bool pass = true;
256 
257 	if (!ctx->config.enable_windowed_mpo_odm) {
258 		if (are_timings_requiring_odm_doing_blending(display_cfg, evaluation_info))
259 			pass = false;
260 	}
261 
262 	return pass;
263 }
264 
dml_mode_support_wrapper(struct dml2_context * dml2,struct dc_state * display_state,enum dc_validate_mode validate_mode)265 static bool dml_mode_support_wrapper(struct dml2_context *dml2,
266 		struct dc_state *display_state,
267 		enum dc_validate_mode validate_mode)
268 {
269 	struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
270 	unsigned int result = 0, i;
271 	unsigned int optimized_result = true;
272 
273 	build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
274 
275 	/* Zero out before each call before proceeding */
276 	memset(&s->cur_display_config, 0, sizeof(struct dml_display_cfg_st));
277 	memset(&s->mode_support_params, 0, sizeof(struct dml_mode_support_ex_params_st));
278 	memset(&s->dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
279 	memset(&s->optimize_configuration_params, 0, sizeof(struct dml2_wrapper_optimize_configuration_params));
280 
281 	for (i = 0; i < dml2->config.dcn_pipe_count; i++) {
282 		/* Calling resource_build_scaling_params will populate the pipe params
283 		 * with the necessary information needed for correct DML calculations
284 		 * This is also done in DML1 driver code path and hence display_state
285 		 * cannot be const.
286 		 */
287 		struct pipe_ctx *pipe = &display_state->res_ctx.pipe_ctx[i];
288 
289 		if (pipe->plane_state) {
290 			if (!dml2->config.callbacks.build_scaling_params(pipe)) {
291 				ASSERT(false);
292 				return false;
293 			}
294 		}
295 	}
296 
297 	map_dc_state_into_dml_display_cfg(dml2, display_state, &s->cur_display_config);
298 	if (!dml2->config.skip_hw_state_mapping)
299 		dml2_apply_det_buffer_allocation_policy(dml2, &s->cur_display_config);
300 
301 	result = pack_and_call_dml_mode_support_ex(dml2,
302 		&s->cur_display_config,
303 		&s->mode_support_info,
304 		validate_mode);
305 
306 	if (result)
307 		result = does_configuration_meet_sw_policies(dml2, &s->cur_display_config, &s->mode_support_info);
308 
309 	// Try to optimize
310 	if (result) {
311 		s->cur_policy = dml2->v20.dml_core_ctx.policy;
312 		s->optimize_configuration_params.dml_core_ctx = &dml2->v20.dml_core_ctx;
313 		s->optimize_configuration_params.config = &dml2->config;
314 		s->optimize_configuration_params.ip_params = &dml2->v20.dml_core_ctx.ip;
315 		s->optimize_configuration_params.cur_display_config = &s->cur_display_config;
316 		s->optimize_configuration_params.cur_mode_support_info = &s->mode_support_info;
317 		s->optimize_configuration_params.cur_policy = &s->cur_policy;
318 		s->optimize_configuration_params.new_display_config = &s->new_display_config;
319 		s->optimize_configuration_params.new_policy = &s->new_policy;
320 
321 		while (optimized_result && optimize_configuration(dml2, &s->optimize_configuration_params)) {
322 			dml2->v20.dml_core_ctx.policy = s->new_policy;
323 			optimized_result = pack_and_call_dml_mode_support_ex(dml2,
324 				&s->new_display_config,
325 				&s->mode_support_info,
326 				validate_mode);
327 
328 			if (optimized_result)
329 				optimized_result = does_configuration_meet_sw_policies(dml2, &s->new_display_config, &s->mode_support_info);
330 
331 			// If the new optimized state is supposed, then set current = new
332 			if (optimized_result) {
333 				s->cur_display_config = s->new_display_config;
334 				s->cur_policy = s->new_policy;
335 			} else {
336 				// Else, restore policy to current
337 				dml2->v20.dml_core_ctx.policy = s->cur_policy;
338 			}
339 		}
340 
341 		// Optimize ended with a failed config, so we need to restore DML state to last passing
342 		if (!optimized_result) {
343 			result = pack_and_call_dml_mode_support_ex(dml2,
344 				&s->cur_display_config,
345 				&s->mode_support_info,
346 				validate_mode);
347 		}
348 	}
349 
350 	if (result)
351 		map_hw_resources(dml2, &s->cur_display_config, &s->mode_support_info);
352 
353 	return result;
354 }
355 
call_dml_mode_support_and_programming(struct dc_state * context,enum dc_validate_mode validate_mode)356 static bool call_dml_mode_support_and_programming(struct dc_state *context, enum dc_validate_mode validate_mode)
357 {
358 	unsigned int result = 0;
359 	unsigned int min_state = 0;
360 	int min_state_for_g6_temp_read = 0;
361 
362 
363 	if (!context)
364 		return false;
365 
366 	struct dml2_context *dml2 = context->bw_ctx.dml2;
367 	struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
368 
369 	if (!context->streams[0]->sink->link->dc->caps.is_apu) {
370 		min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context,
371 										validate_mode);
372 
373 		ASSERT(min_state_for_g6_temp_read >= 0);
374 	}
375 
376 	result = dml_mode_support_wrapper(dml2, context, validate_mode);
377 
378 	/* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on.
379 	 * Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly.
380 	 */
381 	if (!context->streams[0]->sink->link->dc->caps.is_apu) {
382 		if (min_state_for_g6_temp_read >= 0)
383 			min_state = min_state_for_g6_temp_read > s->mode_support_params.out_lowest_state_idx ? min_state_for_g6_temp_read : s->mode_support_params.out_lowest_state_idx;
384 		else
385 			min_state = s->mode_support_params.out_lowest_state_idx;
386 	}
387 
388 	if (result) {
389 		if (!context->streams[0]->sink->link->dc->caps.is_apu) {
390 			result = dml_mode_programming(&dml2->v20.dml_core_ctx, min_state, &s->cur_display_config, true);
391 		} else {
392 			result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
393 		}
394 	}
395 	return result;
396 }
397 
dml2_validate_and_build_resource(const struct dc * in_dc,struct dc_state * context,enum dc_validate_mode validate_mode)398 static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context,
399 		enum dc_validate_mode validate_mode)
400 {
401 	struct dml2_context *dml2 = context->bw_ctx.dml2;
402 	struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
403 	struct dml2_dcn_clocks out_clks;
404 	unsigned int result = 0;
405 	bool need_recalculation = false;
406 	uint32_t cstate_enter_plus_exit_z8_ns;
407 
408 	if (context->stream_count == 0) {
409 		unsigned int lowest_state_idx = 0;
410 
411 		out_clks.p_state_supported = true;
412 		out_clks.dispclk_khz = 0; /* No requirement, and lowest index will generally be maximum dispclk. */
413 		out_clks.dcfclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dcfclk_mhz * 1000;
414 		out_clks.fclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].fabricclk_mhz * 1000;
415 		out_clks.uclk_mts = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dram_speed_mts;
416 		out_clks.phyclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].phyclk_mhz * 1000;
417 		out_clks.socclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].socclk_mhz * 1000;
418 		out_clks.ref_dtbclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dtbclk_mhz * 1000;
419 		context->bw_ctx.bw.dcn.clk.dtbclk_en = false;
420 		dml2_copy_clocks_to_dc_state(&out_clks, context);
421 		return true;
422 	}
423 
424 	/* Zero out before each call before proceeding */
425 	memset(&dml2->v20.scratch, 0, sizeof(struct dml2_wrapper_scratch));
426 	memset(&dml2->v20.dml_core_ctx.policy, 0, sizeof(struct dml_mode_eval_policy_st));
427 	memset(&dml2->v20.dml_core_ctx.ms, 0, sizeof(struct mode_support_st));
428 	memset(&dml2->v20.dml_core_ctx.mp, 0, sizeof(struct mode_program_st));
429 
430 	/* Initialize DET scratch */
431 	dml2_initialize_det_scratch(dml2);
432 
433 	copy_dummy_pstate_table(s->dummy_pstate_table, in_dc->clk_mgr->bw_params->dummy_pstate_table, 4);
434 
435 	result = call_dml_mode_support_and_programming(context, validate_mode);
436 	/* Call map dc pipes to map the pipes based on the DML output. For correctly determining if recalculation
437 	 * is required or not, the resource context needs to correctly reflect the number of active pipes. We would
438 	 * only know the correct number if active pipes after dml2_map_dc_pipes is called.
439 	 */
440 	if (result && !dml2->config.skip_hw_state_mapping)
441 		dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state);
442 
443 	/* Verify and update DET Buffer configuration if needed. dml2_verify_det_buffer_configuration will check if DET Buffer
444 	 * size needs to be updated. If yes it will update the DETOverride variable and set need_recalculation flag to true.
445 	 * Based on that flag, run mode support again. Verification needs to be run after dml_mode_programming because the getters
446 	 * return correct det buffer values only after dml_mode_programming is called.
447 	 */
448 	if (result && !dml2->config.skip_hw_state_mapping) {
449 		need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch);
450 		if (need_recalculation) {
451 			/* Engage the DML again if recalculation is required. */
452 			call_dml_mode_support_and_programming(context, validate_mode);
453 			if (!dml2->config.skip_hw_state_mapping) {
454 				dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state);
455 			}
456 			need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch);
457 			ASSERT(need_recalculation == false);
458 		}
459 	}
460 
461 	if (result) {
462 		unsigned int lowest_state_idx = s->mode_support_params.out_lowest_state_idx;
463 		out_clks.dispclk_khz = (unsigned int)dml2->v20.dml_core_ctx.mp.Dispclk_calculated * 1000;
464 		out_clks.p_state_supported = s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported;
465 		if (in_dc->config.use_default_clock_table &&
466 			(lowest_state_idx < dml2->v20.dml_core_ctx.states.num_states - 1)) {
467 			lowest_state_idx = dml2->v20.dml_core_ctx.states.num_states - 1;
468 			out_clks.dispclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dispclk_mhz * 1000;
469 		}
470 
471 		out_clks.dcfclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dcfclk_mhz * 1000;
472 		out_clks.fclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].fabricclk_mhz * 1000;
473 		out_clks.uclk_mts = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dram_speed_mts;
474 		out_clks.phyclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].phyclk_mhz * 1000;
475 		out_clks.socclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].socclk_mhz * 1000;
476 		out_clks.ref_dtbclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dtbclk_mhz * 1000;
477 		context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(in_dc, context);
478 
479 		if (!dml2->config.skip_hw_state_mapping) {
480 			/* Call dml2_calculate_rq_and_dlg_params */
481 			dml2_calculate_rq_and_dlg_params(in_dc, context, &context->res_ctx, dml2, in_dc->res_pool->pipe_count);
482 		}
483 
484 		dml2_copy_clocks_to_dc_state(&out_clks, context);
485 		dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.a, &dml2->v20.dml_core_ctx);
486 		dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx);
487 		if (context->streams[0]->sink->link->dc->caps.is_apu)
488 			dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.dml_core_ctx);
489 		else
490 			memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
491 		dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx);
492 		dml2_extract_writeback_wm(context, &dml2->v20.dml_core_ctx);
493 		//copy for deciding zstate use
494 		context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod;
495 
496 		cstate_enter_plus_exit_z8_ns = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
497 
498 		if (context->bw_ctx.dml.vba.StutterPeriod < in_dc->debug.minimum_z8_residency_time &&
499 				cstate_enter_plus_exit_z8_ns < in_dc->debug.minimum_z8_residency_time * 1000)
500 			cstate_enter_plus_exit_z8_ns = in_dc->debug.minimum_z8_residency_time * 1000;
501 
502 		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = cstate_enter_plus_exit_z8_ns;
503 	}
504 
505 	return result;
506 }
507 
dml2_validate_only(struct dc_state * context,enum dc_validate_mode validate_mode)508 static bool dml2_validate_only(struct dc_state *context, enum dc_validate_mode validate_mode)
509 {
510 	struct dml2_context *dml2;
511 	unsigned int result = 0;
512 
513 	if (!context || context->stream_count == 0)
514 		return true;
515 
516 	dml2 = context->bw_ctx.dml2;
517 
518 	/* Zero out before each call before proceeding */
519 	memset(&dml2->v20.scratch, 0, sizeof(struct dml2_wrapper_scratch));
520 	memset(&dml2->v20.dml_core_ctx.policy, 0, sizeof(struct dml_mode_eval_policy_st));
521 	memset(&dml2->v20.dml_core_ctx.ms, 0, sizeof(struct mode_support_st));
522 	memset(&dml2->v20.dml_core_ctx.mp, 0, sizeof(struct mode_program_st));
523 
524 	build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
525 
526 	map_dc_state_into_dml_display_cfg(dml2, context, &dml2->v20.scratch.cur_display_config);
527 	 if (!dml2->config.skip_hw_state_mapping)
528 		 dml2_apply_det_buffer_allocation_policy(dml2, &dml2->v20.scratch.cur_display_config);
529 
530 	result = pack_and_call_dml_mode_support_ex(dml2,
531 		&dml2->v20.scratch.cur_display_config,
532 		&dml2->v20.scratch.mode_support_info,
533 		validate_mode);
534 
535 	if (result)
536 		result = does_configuration_meet_sw_policies(dml2, &dml2->v20.scratch.cur_display_config, &dml2->v20.scratch.mode_support_info);
537 
538 	return (result == 1) ? true : false;
539 }
540 
dml2_apply_debug_options(const struct dc * dc,struct dml2_context * dml2)541 static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *dml2)
542 {
543 	if (dc->debug.override_odm_optimization) {
544 		dml2->config.minimize_dispclk_using_odm = dc->debug.minimize_dispclk_using_odm;
545 	}
546 }
547 
dml2_validate(const struct dc * in_dc,struct dc_state * context,struct dml2_context * dml2,enum dc_validate_mode validate_mode)548 bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2,
549 	enum dc_validate_mode validate_mode)
550 {
551 	bool out = false;
552 
553 	if (!dml2)
554 		return false;
555 	dml2_apply_debug_options(in_dc, dml2);
556 
557 	/* DML2.1 validation path */
558 	if (dml2->architecture == dml2_architecture_21) {
559 		out = dml21_validate(in_dc, context, dml2, validate_mode);
560 		return out;
561 	}
562 
563 	DC_FP_START();
564 
565 	/* Use dml_validate_only for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */
566 	if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
567 		out = dml2_validate_only(context, validate_mode);
568 	else
569 		out = dml2_validate_and_build_resource(in_dc, context, validate_mode);
570 
571 	DC_FP_END();
572 
573 	return out;
574 }
575 
dml2_allocate_memory(void)576 static inline struct dml2_context *dml2_allocate_memory(void)
577 {
578 	return (struct dml2_context *) vzalloc(sizeof(struct dml2_context));
579 }
580 
dml2_init(const struct dc * in_dc,const struct dml2_configuration_options * config,struct dml2_context ** dml2)581 static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
582 {
583 	if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) {
584 		dml21_reinit(in_dc, *dml2, config);
585 		return;
586 	}
587 
588 	// Store config options
589 	(*dml2)->config = *config;
590 
591 	switch (in_dc->ctx->dce_version) {
592 	case DCN_VERSION_3_5:
593 		(*dml2)->v20.dml_core_ctx.project = dml_project_dcn35;
594 		break;
595 	case DCN_VERSION_3_51:
596 		(*dml2)->v20.dml_core_ctx.project = dml_project_dcn351;
597 		break;
598 	case DCN_VERSION_3_6:
599 		(*dml2)->v20.dml_core_ctx.project = dml_project_dcn36;
600 		break;
601 	case DCN_VERSION_3_2:
602 		(*dml2)->v20.dml_core_ctx.project = dml_project_dcn32;
603 		break;
604 	case DCN_VERSION_3_21:
605 		(*dml2)->v20.dml_core_ctx.project = dml_project_dcn321;
606 		break;
607 	case DCN_VERSION_4_01:
608 		(*dml2)->v20.dml_core_ctx.project = dml_project_dcn401;
609 		break;
610 	default:
611 		(*dml2)->v20.dml_core_ctx.project = dml_project_default;
612 		break;
613 	}
614 
615 	DC_FP_START();
616 
617 	initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip);
618 
619 	initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
620 
621 	initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
622 
623 	DC_FP_END();
624 }
625 
dml2_create(const struct dc * in_dc,const struct dml2_configuration_options * config,struct dml2_context ** dml2)626 bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
627 {
628 	// TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
629 	if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01))
630 		return dml21_create(in_dc, dml2, config);
631 
632 	// Allocate Mode Lib Ctx
633 	*dml2 = dml2_allocate_memory();
634 
635 	if (!(*dml2))
636 		return false;
637 
638 	dml2_init(in_dc, config, dml2);
639 
640 	return true;
641 }
642 
dml2_destroy(struct dml2_context * dml2)643 void dml2_destroy(struct dml2_context *dml2)
644 {
645 	if (!dml2)
646 		return;
647 
648 	if (dml2->architecture == dml2_architecture_21)
649 		dml21_destroy(dml2);
650 	vfree(dml2);
651 }
652 
dml2_extract_dram_and_fclk_change_support(struct dml2_context * dml2,unsigned int * fclk_change_support,unsigned int * dram_clk_change_support)653 void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2,
654 	unsigned int *fclk_change_support, unsigned int *dram_clk_change_support)
655 {
656 	*fclk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0];
657 	*dram_clk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.DRAMClockChangeSupport[0];
658 }
659 
dml2_prepare_mcache_programming(struct dc * in_dc,struct dc_state * context,struct dml2_context * dml2)660 void dml2_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2)
661 {
662 	if (dml2->architecture == dml2_architecture_21)
663 		dml21_prepare_mcache_programming(in_dc, context, dml2);
664 }
665 
dml2_copy(struct dml2_context * dst_dml2,struct dml2_context * src_dml2)666 void dml2_copy(struct dml2_context *dst_dml2,
667 	struct dml2_context *src_dml2)
668 {
669 	if (src_dml2->architecture == dml2_architecture_21) {
670 		dml21_copy(dst_dml2, src_dml2);
671 		return;
672 	}
673 	/* copy Mode Lib Ctx */
674 	memcpy(dst_dml2, src_dml2, sizeof(struct dml2_context));
675 }
676 
dml2_create_copy(struct dml2_context ** dst_dml2,struct dml2_context * src_dml2)677 bool dml2_create_copy(struct dml2_context **dst_dml2,
678 	struct dml2_context *src_dml2)
679 {
680 	if (src_dml2->architecture == dml2_architecture_21)
681 		return dml21_create_copy(dst_dml2, src_dml2);
682 	/* Allocate Mode Lib Ctx */
683 	*dst_dml2 = dml2_allocate_memory();
684 
685 	if (!(*dst_dml2))
686 		return false;
687 
688 	/* copy Mode Lib Ctx */
689 	dml2_copy(*dst_dml2, src_dml2);
690 
691 	return true;
692 }
693 
dml2_reinit(const struct dc * in_dc,const struct dml2_configuration_options * config,struct dml2_context ** dml2)694 void dml2_reinit(const struct dc *in_dc,
695 				 const struct dml2_configuration_options *config,
696 				 struct dml2_context **dml2)
697 {
698 	if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) {
699 		dml21_reinit(in_dc, *dml2, config);
700 		return;
701 	}
702 
703 	dml2_init(in_dc, config, dml2);
704 }
705