1 // SPDX-License-Identifier: MIT
2 //
3 // Copyright 2024 Advanced Micro Devices, Inc.
4
5 #include "dml2_pmo_factory.h"
6 #include "dml2_debug.h"
7 #include "lib_float_math.h"
8 #include "dml2_pmo_dcn4_fams2.h"
9
10 static const double MIN_VACTIVE_MARGIN_PCT = 0.25; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding
11 static const double MIN_BLANK_STUTTER_FACTOR = 3.0;
12
13 static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = {
14 // VActive Preferred
15 {
16 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
17 .allow_state_increase = true,
18 },
19
20 // Then SVP
21 {
22 .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
23 .allow_state_increase = true,
24 },
25
26 // Then VBlank
27 {
28 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
29 .allow_state_increase = false,
30 },
31
32 // Then DRR
33 {
34 .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
35 .allow_state_increase = true,
36 },
37
38 // Finally VBlank, but allow base clocks for latency to increase
39 /*
40 {
41 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
42 .allow_state_increase = true,
43 },
44 */
45 };
46
47 static const int base_strategy_list_1_display_size = sizeof(base_strategy_list_1_display) / sizeof(struct dml2_pmo_pstate_strategy);
48
49 static const struct dml2_pmo_pstate_strategy base_strategy_list_2_display[] = {
50 // VActive only is preferred
51 {
52 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_na, dml2_pstate_method_na },
53 .allow_state_increase = true,
54 },
55
56 // Then VActive + VBlank
57 {
58 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
59 .allow_state_increase = false,
60 },
61
62 // Then VBlank only
63 {
64 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
65 .allow_state_increase = false,
66 },
67
68 // Then SVP + VBlank
69 {
70 .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
71 .allow_state_increase = false,
72 },
73
74 // Then SVP + DRR
75 {
76 .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
77 .allow_state_increase = true,
78 },
79
80 // Then SVP + SVP
81 {
82 .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_fw_svp, dml2_pstate_method_na, dml2_pstate_method_na },
83 .allow_state_increase = true,
84 },
85
86 // Then DRR + VActive
87 {
88 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
89 .allow_state_increase = true,
90 },
91
92 // Then DRR + DRR
93 {
94 .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
95 .allow_state_increase = true,
96 },
97
98 // Finally VBlank, but allow base clocks for latency to increase
99 /*
100 {
101 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
102 .allow_state_increase = true,
103 },
104 */
105 };
106
107 static const int base_strategy_list_2_display_size = sizeof(base_strategy_list_2_display) / sizeof(struct dml2_pmo_pstate_strategy);
108
109 static const struct dml2_pmo_pstate_strategy base_strategy_list_3_display[] = {
110 // All VActive
111 {
112 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_na },
113 .allow_state_increase = true,
114 },
115
116 // VActive + 1 VBlank
117 {
118 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vblank, dml2_pstate_method_na },
119 .allow_state_increase = false,
120 },
121
122 // All VBlank
123 {
124 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na },
125 .allow_state_increase = false,
126 },
127
128 // All DRR
129 {
130 .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_na },
131 .allow_state_increase = true,
132 },
133
134 // All VBlank, with state increase allowed
135 /*
136 {
137 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na },
138 .allow_state_increase = true,
139 },
140 */
141 };
142
143 static const int base_strategy_list_3_display_size = sizeof(base_strategy_list_3_display) / sizeof(struct dml2_pmo_pstate_strategy);
144
145 static const struct dml2_pmo_pstate_strategy base_strategy_list_4_display[] = {
146 // All VActive
147 {
148 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive },
149 .allow_state_increase = true,
150 },
151
152 // VActive + 1 VBlank
153 {
154 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vblank },
155 .allow_state_increase = false,
156 },
157
158 // All Vblank
159 {
160 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank },
161 .allow_state_increase = false,
162 },
163
164 // All DRR
165 {
166 .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr },
167 .allow_state_increase = true,
168 },
169
170 // All VBlank, with state increase allowed
171 /*
172 {
173 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank },
174 .allow_state_increase = true,
175 },
176 */
177 };
178
179 static const int base_strategy_list_4_display_size = sizeof(base_strategy_list_4_display) / sizeof(struct dml2_pmo_pstate_strategy);
180
181
increase_odm_combine_factor(enum dml2_odm_mode * odm_mode,int odms_calculated)182 static bool increase_odm_combine_factor(enum dml2_odm_mode *odm_mode, int odms_calculated)
183 {
184 bool result = true;
185
186 if (*odm_mode == dml2_odm_mode_auto) {
187 switch (odms_calculated) {
188 case 1:
189 *odm_mode = dml2_odm_mode_bypass;
190 break;
191 case 2:
192 *odm_mode = dml2_odm_mode_combine_2to1;
193 break;
194 case 3:
195 *odm_mode = dml2_odm_mode_combine_3to1;
196 break;
197 case 4:
198 *odm_mode = dml2_odm_mode_combine_4to1;
199 break;
200 default:
201 result = false;
202 break;
203 }
204 }
205
206 if (result) {
207 if (*odm_mode == dml2_odm_mode_bypass) {
208 *odm_mode = dml2_odm_mode_combine_2to1;
209 } else if (*odm_mode == dml2_odm_mode_combine_2to1) {
210 *odm_mode = dml2_odm_mode_combine_3to1;
211 } else if (*odm_mode == dml2_odm_mode_combine_3to1) {
212 *odm_mode = dml2_odm_mode_combine_4to1;
213 } else {
214 result = false;
215 }
216 }
217
218 return result;
219 }
220
increase_mpc_combine_factor(unsigned int * mpc_combine_factor,unsigned int limit)221 static bool increase_mpc_combine_factor(unsigned int *mpc_combine_factor, unsigned int limit)
222 {
223 if (*mpc_combine_factor < limit) {
224 (*mpc_combine_factor)++;
225 return true;
226 }
227
228 return false;
229 }
230
count_planes_with_stream_index(const struct dml2_display_cfg * display_cfg,unsigned int stream_index)231 static int count_planes_with_stream_index(const struct dml2_display_cfg *display_cfg, unsigned int stream_index)
232 {
233 unsigned int i, count;
234
235 count = 0;
236 for (i = 0; i < display_cfg->num_planes; i++) {
237 if (display_cfg->plane_descriptors[i].stream_index == stream_index)
238 count++;
239 }
240
241 return count;
242 }
243
optimize_dcc_mcache_no_odm(struct dml2_pmo_optimize_dcc_mcache_in_out * in_out,int free_pipes)244 static bool optimize_dcc_mcache_no_odm(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out,
245 int free_pipes)
246 {
247 struct dml2_pmo_instance *pmo = in_out->instance;
248
249 unsigned int i;
250 bool result = true;
251
252 for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
253 // For pipes that failed dcc mcache check, we want to increase the pipe count.
254 // The logic for doing this depends on how many pipes is already being used,
255 // and whether it's mpcc or odm combine.
256 if (!in_out->dcc_mcache_supported[i]) {
257 // For the general case of "n displays", we can only optimize streams with an ODM combine factor of 1
258 if (in_out->cfg_support_info->stream_support_info[in_out->optimized_display_cfg->plane_descriptors[i].stream_index].odms_used == 1) {
259 in_out->optimized_display_cfg->plane_descriptors[i].overrides.mpcc_combine_factor =
260 in_out->cfg_support_info->plane_support_info[i].dpps_used;
261 // For each plane that is not passing mcache validation, just add another pipe to it, up to the limit.
262 if (free_pipes > 0) {
263 if (!increase_mpc_combine_factor(&in_out->optimized_display_cfg->plane_descriptors[i].overrides.mpcc_combine_factor,
264 pmo->mpc_combine_limit)) {
265 // We've reached max pipes allocatable to a single plane, so we fail.
266 result = false;
267 break;
268 } else {
269 // Successfully added another pipe to this failing plane.
270 free_pipes--;
271 }
272 } else {
273 // No free pipes to add.
274 result = false;
275 break;
276 }
277 } else {
278 // If the stream of this plane needs ODM combine, no further optimization can be done.
279 result = false;
280 break;
281 }
282 }
283 }
284
285 return result;
286 }
287
pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out * in_out)288 bool pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out)
289 {
290 struct dml2_pmo_instance *pmo = in_out->instance;
291
292 unsigned int i, used_pipes, free_pipes, planes_on_stream;
293 bool result;
294
295 if (in_out->display_config != in_out->optimized_display_cfg) {
296 memcpy(in_out->optimized_display_cfg, in_out->display_config, sizeof(struct dml2_display_cfg));
297 }
298
299 //Count number of free pipes, and check if any odm combine is in use.
300 used_pipes = 0;
301 for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
302 used_pipes += in_out->cfg_support_info->plane_support_info[i].dpps_used;
303 }
304 free_pipes = pmo->ip_caps->pipe_count - used_pipes;
305
306 // Optimization loop
307 // The goal here is to add more pipes to any planes
308 // which are failing mcache admissibility
309 result = true;
310
311 // The optimization logic depends on whether ODM combine is enabled, and the stream count.
312 if (in_out->optimized_display_cfg->num_streams > 1 || in_out->instance->options->disable_dyn_odm) {
313 // If there are multiple streams, we are limited to only be able to optimize mcache failures on planes
314 // which are not ODM combined.
315
316 result = optimize_dcc_mcache_no_odm(in_out, free_pipes);
317 } else if (in_out->optimized_display_cfg->num_streams == 1) {
318 // In single stream cases, we still optimize mcache failures when there's ODM combine with some
319 // additional logic.
320
321 if (in_out->cfg_support_info->stream_support_info[0].odms_used > 1) {
322 // If ODM combine is enabled, then the logic is to increase ODM combine factor.
323
324 // Optimization for streams with > 1 ODM combine factor is only supported for single display.
325 planes_on_stream = count_planes_with_stream_index(in_out->optimized_display_cfg, 0);
326
327 for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
328 // For pipes that failed dcc mcache check, we want to increase the pipe count.
329 // The logic for doing this depends on how many pipes is already being used,
330 // and whether it's mpcc or odm combine.
331 if (!in_out->dcc_mcache_supported[i]) {
332 // Increasing ODM combine factor on a stream requires a free pipe for each plane on the stream.
333 if (free_pipes >= planes_on_stream) {
334 if (!increase_odm_combine_factor(&in_out->optimized_display_cfg->stream_descriptors[i].overrides.odm_mode,
335 in_out->cfg_support_info->plane_support_info[i].dpps_used)) {
336 result = false;
337 } else {
338 break;
339 }
340 } else {
341 result = false;
342 break;
343 }
344 }
345 }
346 } else {
347 // If ODM combine is not enabled, then we can actually use the same logic as before.
348
349 result = optimize_dcc_mcache_no_odm(in_out, free_pipes);
350 }
351 } else {
352 result = true;
353 }
354
355 return result;
356 }
357
convert_strategy_to_drr_variant(const enum dml2_pstate_method base_strategy)358 static enum dml2_pstate_method convert_strategy_to_drr_variant(const enum dml2_pstate_method base_strategy)
359 {
360 enum dml2_pstate_method variant_strategy = 0;
361
362 switch (base_strategy) {
363 case dml2_pstate_method_vactive:
364 variant_strategy = dml2_pstate_method_fw_vactive_drr;
365 break;
366 case dml2_pstate_method_vblank:
367 variant_strategy = dml2_pstate_method_fw_vblank_drr;
368 break;
369 case dml2_pstate_method_fw_svp:
370 variant_strategy = dml2_pstate_method_fw_svp_drr;
371 break;
372 case dml2_pstate_method_fw_vactive_drr:
373 case dml2_pstate_method_fw_vblank_drr:
374 case dml2_pstate_method_fw_svp_drr:
375 case dml2_pstate_method_fw_drr:
376 case dml2_pstate_method_reserved_hw:
377 case dml2_pstate_method_reserved_fw:
378 case dml2_pstate_method_reserved_fw_drr_clamped:
379 case dml2_pstate_method_reserved_fw_drr_var:
380 case dml2_pstate_method_count:
381 case dml2_pstate_method_na:
382 default:
383 /* no variant for this mode */
384 variant_strategy = base_strategy;
385 }
386
387 return variant_strategy;
388 }
389
get_expanded_strategy_list(struct dml2_pmo_init_data * init_data,int stream_count)390 static struct dml2_pmo_pstate_strategy *get_expanded_strategy_list(struct dml2_pmo_init_data *init_data, int stream_count)
391 {
392 struct dml2_pmo_pstate_strategy *expanded_strategy_list = NULL;
393
394 switch (stream_count) {
395 case 1:
396 expanded_strategy_list = init_data->pmo_dcn4.expanded_strategy_list_1_display;
397 break;
398 case 2:
399 expanded_strategy_list = init_data->pmo_dcn4.expanded_strategy_list_2_display;
400 break;
401 case 3:
402 expanded_strategy_list = init_data->pmo_dcn4.expanded_strategy_list_3_display;
403 break;
404 case 4:
405 expanded_strategy_list = init_data->pmo_dcn4.expanded_strategy_list_4_display;
406 break;
407 default:
408 break;
409 }
410
411 return expanded_strategy_list;
412 }
413
get_num_expanded_strategies(struct dml2_pmo_init_data * init_data,int stream_count)414 static unsigned int get_num_expanded_strategies(
415 struct dml2_pmo_init_data *init_data,
416 int stream_count)
417 {
418 return init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1];
419 }
420
insert_strategy_into_expanded_list(const struct dml2_pmo_pstate_strategy * per_stream_pstate_strategy,const int stream_count,struct dml2_pmo_pstate_strategy * expanded_strategy_list,unsigned int * num_expanded_strategies)421 static void insert_strategy_into_expanded_list(
422 const struct dml2_pmo_pstate_strategy *per_stream_pstate_strategy,
423 const int stream_count,
424 struct dml2_pmo_pstate_strategy *expanded_strategy_list,
425 unsigned int *num_expanded_strategies)
426 {
427 if (expanded_strategy_list && num_expanded_strategies) {
428 memcpy(&expanded_strategy_list[*num_expanded_strategies], per_stream_pstate_strategy, sizeof(struct dml2_pmo_pstate_strategy));
429
430 (*num_expanded_strategies)++;
431 }
432 }
433
expand_base_strategy(const struct dml2_pmo_pstate_strategy * base_strategy,const unsigned int stream_count,struct dml2_pmo_pstate_strategy * expanded_strategy_list,unsigned int * num_expanded_strategies)434 static void expand_base_strategy(
435 const struct dml2_pmo_pstate_strategy *base_strategy,
436 const unsigned int stream_count,
437 struct dml2_pmo_pstate_strategy *expanded_strategy_list,
438 unsigned int *num_expanded_strategies)
439 {
440 bool skip_to_next_stream;
441 bool expanded_strategy_added;
442 bool skip_iteration;
443 unsigned int i, j;
444 unsigned int num_streams_per_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
445 unsigned int stream_iteration_indices[PMO_DCN4_MAX_DISPLAYS] = { 0 };
446 struct dml2_pmo_pstate_strategy cur_strategy_list = { 0 };
447
448 /* determine number of displays per method */
449 for (i = 0; i < stream_count; i++) {
450 /* increment the count of the earliest index with the same method */
451 for (j = 0; j < stream_count; j++) {
452 if (base_strategy->per_stream_pstate_method[i] == base_strategy->per_stream_pstate_method[j]) {
453 num_streams_per_method[j] = num_streams_per_method[j] + 1;
454 break;
455 }
456 }
457 }
458
459 cur_strategy_list.allow_state_increase = base_strategy->allow_state_increase;
460
461 i = 0;
462 /* uses a while loop instead of recursion to build permutations of base strategy */
463 while (stream_iteration_indices[0] < stream_count) {
464 skip_to_next_stream = false;
465 expanded_strategy_added = false;
466 skip_iteration = false;
467
468 /* determine what to do for this iteration */
469 if (stream_iteration_indices[i] < stream_count && num_streams_per_method[stream_iteration_indices[i]] != 0) {
470 /* decrement count and assign method */
471 cur_strategy_list.per_stream_pstate_method[i] = base_strategy->per_stream_pstate_method[stream_iteration_indices[i]];
472 num_streams_per_method[stream_iteration_indices[i]] -= 1;
473
474 if (i >= stream_count - 1) {
475 /* insert into strategy list */
476 insert_strategy_into_expanded_list(&cur_strategy_list, stream_count, expanded_strategy_list, num_expanded_strategies);
477 expanded_strategy_added = true;
478 } else {
479 /* skip to next stream */
480 skip_to_next_stream = true;
481 }
482 } else {
483 skip_iteration = true;
484 }
485
486 /* prepare for next iteration */
487 if (skip_to_next_stream) {
488 i++;
489 } else {
490 /* restore count */
491 if (!skip_iteration) {
492 num_streams_per_method[stream_iteration_indices[i]] += 1;
493 }
494
495 /* increment iteration count */
496 stream_iteration_indices[i]++;
497
498 /* if iterations are complete, or last stream was reached */
499 if ((stream_iteration_indices[i] >= stream_count || expanded_strategy_added) && i > 0) {
500 /* reset per stream index, decrement i */
501 stream_iteration_indices[i] = 0;
502 i--;
503
504 /* restore previous stream's count and increment index */
505 num_streams_per_method[stream_iteration_indices[i]] += 1;
506 stream_iteration_indices[i]++;
507 }
508 }
509 }
510 }
511
512
is_variant_method_valid(const struct dml2_pmo_pstate_strategy * base_strategy,const struct dml2_pmo_pstate_strategy * variant_strategy,const unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS],const unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS],const unsigned int stream_count)513 static bool is_variant_method_valid(const struct dml2_pmo_pstate_strategy *base_strategy,
514 const struct dml2_pmo_pstate_strategy *variant_strategy,
515 const unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS],
516 const unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS],
517 const unsigned int stream_count)
518 {
519 bool valid = true;
520 unsigned int i;
521
522 /* check all restrictions are met */
523 for (i = 0; i < stream_count; i++) {
524 /* vblank + vblank_drr variants are invalid */
525 if (base_strategy->per_stream_pstate_method[i] == dml2_pstate_method_vblank &&
526 ((num_streams_per_base_method[i] > 0 && num_streams_per_variant_method[i] > 0) ||
527 num_streams_per_variant_method[i] > 1)) {
528 valid = false;
529 break;
530 }
531 }
532
533 return valid;
534 }
535
expand_variant_strategy(const struct dml2_pmo_pstate_strategy * base_strategy,const unsigned int stream_count,const bool should_permute,struct dml2_pmo_pstate_strategy * expanded_strategy_list,unsigned int * num_expanded_strategies)536 static void expand_variant_strategy(
537 const struct dml2_pmo_pstate_strategy *base_strategy,
538 const unsigned int stream_count,
539 const bool should_permute,
540 struct dml2_pmo_pstate_strategy *expanded_strategy_list,
541 unsigned int *num_expanded_strategies)
542 {
543 bool variant_found;
544 unsigned int i, j;
545 unsigned int method_index;
546 unsigned int stream_index;
547 unsigned int num_streams_per_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
548 unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
549 unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
550 enum dml2_pstate_method per_stream_variant_method[DML2_MAX_PLANES];
551 struct dml2_pmo_pstate_strategy variant_strategy = { 0 };
552
553 /* determine number of displays per method */
554 for (i = 0; i < stream_count; i++) {
555 /* increment the count of the earliest index with the same method */
556 for (j = 0; j < stream_count; j++) {
557 if (base_strategy->per_stream_pstate_method[i] == base_strategy->per_stream_pstate_method[j]) {
558 num_streams_per_method[j] = num_streams_per_method[j] + 1;
559 break;
560 }
561 }
562
563 per_stream_variant_method[i] = convert_strategy_to_drr_variant(base_strategy->per_stream_pstate_method[i]);
564 }
565 memcpy(num_streams_per_base_method, num_streams_per_method, sizeof(unsigned int) * PMO_DCN4_MAX_DISPLAYS);
566
567 memcpy(&variant_strategy, base_strategy, sizeof(struct dml2_pmo_pstate_strategy));
568
569 method_index = 0;
570 /* uses a while loop instead of recursion to build permutations of base strategy */
571 while (num_streams_per_base_method[0] > 0 || method_index != 0) {
572 if (method_index == stream_count) {
573 /* construct variant strategy */
574 variant_found = false;
575 stream_index = 0;
576
577 for (i = 0; i < stream_count; i++) {
578 for (j = 0; j < num_streams_per_base_method[i]; j++) {
579 variant_strategy.per_stream_pstate_method[stream_index++] = base_strategy->per_stream_pstate_method[i];
580 }
581
582 for (j = 0; j < num_streams_per_variant_method[i]; j++) {
583 variant_strategy.per_stream_pstate_method[stream_index++] = per_stream_variant_method[i];
584 if (base_strategy->per_stream_pstate_method[i] != per_stream_variant_method[i]) {
585 variant_found = true;
586 }
587 }
588 }
589
590 if (variant_found && is_variant_method_valid(base_strategy, &variant_strategy, num_streams_per_base_method, num_streams_per_variant_method, stream_count)) {
591 if (should_permute) {
592 /* permutations are permitted, proceed to expand */
593 expand_base_strategy(&variant_strategy, stream_count, expanded_strategy_list, num_expanded_strategies);
594 } else {
595 /* no permutations allowed, so add to list now */
596 insert_strategy_into_expanded_list(&variant_strategy, stream_count, expanded_strategy_list, num_expanded_strategies);
597 }
598 }
599
600 /* rollback to earliest method with bases remaining */
601 for (method_index = stream_count - 1; method_index > 0; method_index--) {
602 if (num_streams_per_base_method[method_index]) {
603 /* bases remaining */
604 break;
605 } else {
606 /* reset counters */
607 num_streams_per_base_method[method_index] = num_streams_per_method[method_index];
608 num_streams_per_variant_method[method_index] = 0;
609 }
610 }
611 }
612
613 if (num_streams_per_base_method[method_index]) {
614 num_streams_per_base_method[method_index]--;
615 num_streams_per_variant_method[method_index]++;
616
617 method_index++;
618 } else if (method_index != 0) {
619 method_index++;
620 }
621 }
622 }
623
pmo_dcn4_fams2_expand_base_pstate_strategies(const struct dml2_pmo_pstate_strategy * base_strategies_list,const unsigned int num_base_strategies,const unsigned int stream_count,struct dml2_pmo_pstate_strategy * expanded_strategy_list,unsigned int * num_expanded_strategies)624 void pmo_dcn4_fams2_expand_base_pstate_strategies(
625 const struct dml2_pmo_pstate_strategy *base_strategies_list,
626 const unsigned int num_base_strategies,
627 const unsigned int stream_count,
628 struct dml2_pmo_pstate_strategy *expanded_strategy_list,
629 unsigned int *num_expanded_strategies)
630 {
631 unsigned int i;
632
633 /* expand every explicit base strategy (except all DRR) */
634 for (i = 0; i < num_base_strategies; i++) {
635 expand_base_strategy(&base_strategies_list[i], stream_count, expanded_strategy_list, num_expanded_strategies);
636 expand_variant_strategy(&base_strategies_list[i], stream_count, true, expanded_strategy_list, num_expanded_strategies);
637 }
638 }
639
pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out * in_out)640 bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
641 {
642 int i = 0;
643 struct dml2_pmo_instance *pmo = in_out->instance;
644
645 pmo->soc_bb = in_out->soc_bb;
646 pmo->ip_caps = in_out->ip_caps;
647 pmo->mpc_combine_limit = 2;
648 pmo->odm_combine_limit = 4;
649 pmo->mcg_clock_table_size = in_out->mcg_clock_table_size;
650
651 pmo->fams_params.v2.subvp.refresh_rate_limit_max = 175;
652 pmo->fams_params.v2.subvp.refresh_rate_limit_min = 0;
653 pmo->fams_params.v2.drr.refresh_rate_limit_max = 1000;
654 pmo->fams_params.v2.drr.refresh_rate_limit_min = 119;
655
656 pmo->options = in_out->options;
657
658 /* generate permutations of p-state configs from base strategy list */
659 for (i = 1; i <= PMO_DCN4_MAX_DISPLAYS; i++) {
660 switch (i) {
661 case 1:
662 DML2_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
663
664 /* populate list */
665 pmo_dcn4_fams2_expand_base_pstate_strategies(
666 base_strategy_list_1_display,
667 base_strategy_list_1_display_size,
668 i,
669 pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display,
670 &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
671 break;
672 case 2:
673 DML2_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
674
675 /* populate list */
676 pmo_dcn4_fams2_expand_base_pstate_strategies(
677 base_strategy_list_2_display,
678 base_strategy_list_2_display_size,
679 i,
680 pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display,
681 &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
682 break;
683 case 3:
684 DML2_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
685
686 /* populate list */
687 pmo_dcn4_fams2_expand_base_pstate_strategies(
688 base_strategy_list_3_display,
689 base_strategy_list_3_display_size,
690 i,
691 pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display,
692 &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
693 break;
694 case 4:
695 DML2_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
696
697 /* populate list */
698 pmo_dcn4_fams2_expand_base_pstate_strategies(
699 base_strategy_list_4_display,
700 base_strategy_list_4_display_size,
701 i,
702 pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display,
703 &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
704 break;
705 }
706 }
707
708 return true;
709 }
710
is_h_timing_divisible_by(const struct dml2_timing_cfg * timing,unsigned char denominator)711 static bool is_h_timing_divisible_by(const struct dml2_timing_cfg *timing, unsigned char denominator)
712 {
713 /*
714 * Htotal, Hblank start/end, and Hsync start/end all must be divisible
715 * in order for the horizontal timing params to be considered divisible
716 * by 2. Hsync start is always 0.
717 */
718 unsigned long h_blank_start = timing->h_total - timing->h_front_porch;
719
720 return (timing->h_total % denominator == 0) &&
721 (h_blank_start % denominator == 0) &&
722 (timing->h_blank_end % denominator == 0) &&
723 (timing->h_sync_width % denominator == 0);
724 }
725
is_dp_encoder(enum dml2_output_encoder_class encoder_type)726 static bool is_dp_encoder(enum dml2_output_encoder_class encoder_type)
727 {
728 switch (encoder_type) {
729 case dml2_dp:
730 case dml2_edp:
731 case dml2_dp2p0:
732 case dml2_none:
733 return true;
734 case dml2_hdmi:
735 case dml2_hdmifrl:
736 default:
737 return false;
738 }
739 }
740
pmo_dcn4_fams2_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out * in_out)741 bool pmo_dcn4_fams2_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out)
742 {
743 unsigned int i;
744 const struct dml2_display_cfg *display_config =
745 &in_out->base_display_config->display_config;
746 const struct dml2_core_mode_support_result *mode_support_result =
747 &in_out->base_display_config->mode_support_result;
748 struct dml2_optimization_stage4_state *state =
749 &in_out->base_display_config->stage4;
750
751 if (in_out->instance->options->disable_dyn_odm ||
752 (in_out->instance->options->disable_dyn_odm_for_multi_stream && display_config->num_streams > 1))
753 return false;
754
755 for (i = 0; i < display_config->num_planes; i++)
756 /*
757 * vmin optimization is required to be seamlessly switched off
758 * at any time when the new configuration is no longer
759 * supported. However switching from ODM combine to MPC combine
760 * is not always seamless. When there not enough free pipes, we
761 * will have to use the same secondary OPP heads as secondary
762 * DPP pipes in MPC combine in new state. This transition is
763 * expected to cause glitches. To avoid the transition, we only
764 * allow vmin optimization if the stream's base configuration
765 * doesn't require MPC combine. This condition checks if MPC
766 * combine is enabled. If so do not optimize the stream.
767 */
768 if (mode_support_result->cfg_support_info.plane_support_info[i].dpps_used > 1 &&
769 mode_support_result->cfg_support_info.stream_support_info[display_config->plane_descriptors[i].stream_index].odms_used == 1)
770 state->unoptimizable_streams[display_config->plane_descriptors[i].stream_index] = true;
771
772 for (i = 0; i < display_config->num_streams; i++) {
773 if (display_config->stream_descriptors[i].overrides.disable_dynamic_odm)
774 state->unoptimizable_streams[i] = true;
775 else if (in_out->base_display_config->stage3.stream_svp_meta[i].valid &&
776 in_out->instance->options->disable_dyn_odm_for_stream_with_svp)
777 state->unoptimizable_streams[i] = true;
778 /*
779 * ODM Combine requires horizontal timing divisible by 2 so each
780 * ODM segment has the same size.
781 */
782 else if (!is_h_timing_divisible_by(&display_config->stream_descriptors[i].timing, 2))
783 state->unoptimizable_streams[i] = true;
784 /*
785 * Our hardware support seamless ODM transitions for DP encoders
786 * only.
787 */
788 else if (!is_dp_encoder(display_config->stream_descriptors[i].output.output_encoder))
789 state->unoptimizable_streams[i] = true;
790 }
791
792 state->performed = true;
793
794 return true;
795 }
796
pmo_dcn4_fams2_test_for_vmin(struct dml2_pmo_test_for_vmin_in_out * in_out)797 bool pmo_dcn4_fams2_test_for_vmin(struct dml2_pmo_test_for_vmin_in_out *in_out)
798 {
799 bool is_vmin = true;
800
801 if (in_out->vmin_limits->dispclk_khz > 0 &&
802 in_out->display_config->mode_support_result.global.dispclk_khz > in_out->vmin_limits->dispclk_khz)
803 is_vmin = false;
804
805 return is_vmin;
806 }
807
find_highest_odm_load_stream_index(const struct dml2_display_cfg * display_config,const struct dml2_core_mode_support_result * mode_support_result)808 static int find_highest_odm_load_stream_index(
809 const struct dml2_display_cfg *display_config,
810 const struct dml2_core_mode_support_result *mode_support_result)
811 {
812 unsigned int i;
813 int odm_load, highest_odm_load = -1, highest_odm_load_index = -1;
814
815 for (i = 0; i < display_config->num_streams; i++) {
816 if (mode_support_result->cfg_support_info.stream_support_info[i].odms_used > 0)
817 odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
818 / mode_support_result->cfg_support_info.stream_support_info[i].odms_used;
819 else
820 odm_load = 0;
821
822 if (odm_load > highest_odm_load) {
823 highest_odm_load_index = i;
824 highest_odm_load = odm_load;
825 }
826 }
827
828 return highest_odm_load_index;
829 }
830
pmo_dcn4_fams2_optimize_for_vmin(struct dml2_pmo_optimize_for_vmin_in_out * in_out)831 bool pmo_dcn4_fams2_optimize_for_vmin(struct dml2_pmo_optimize_for_vmin_in_out *in_out)
832 {
833 int stream_index;
834 const struct dml2_display_cfg *display_config =
835 &in_out->base_display_config->display_config;
836 const struct dml2_core_mode_support_result *mode_support_result =
837 &in_out->base_display_config->mode_support_result;
838 unsigned int odms_used;
839 struct dml2_stream_parameters *stream_descriptor;
840 bool optimizable = false;
841
842 /*
843 * highest odm load stream must be optimizable to continue as dispclk is
844 * bounded by it.
845 */
846 stream_index = find_highest_odm_load_stream_index(display_config,
847 mode_support_result);
848
849 if (stream_index < 0 ||
850 in_out->base_display_config->stage4.unoptimizable_streams[stream_index])
851 return false;
852
853 odms_used = mode_support_result->cfg_support_info.stream_support_info[stream_index].odms_used;
854 if ((int)odms_used >= in_out->instance->odm_combine_limit)
855 return false;
856
857 memcpy(in_out->optimized_display_config,
858 in_out->base_display_config,
859 sizeof(struct display_configuation_with_meta));
860
861 stream_descriptor = &in_out->optimized_display_config->display_config.stream_descriptors[stream_index];
862 while (!optimizable && increase_odm_combine_factor(
863 &stream_descriptor->overrides.odm_mode,
864 odms_used)) {
865 switch (stream_descriptor->overrides.odm_mode) {
866 case dml2_odm_mode_combine_2to1:
867 optimizable = true;
868 break;
869 case dml2_odm_mode_combine_3to1:
870 /*
871 * In ODM Combine 3:1 OTG_valid_pixel rate is 1/4 of
872 * actual pixel rate. Therefore horizontal timing must
873 * be divisible by 4.
874 */
875 if (is_h_timing_divisible_by(&display_config->stream_descriptors[stream_index].timing, 4)) {
876 if (mode_support_result->cfg_support_info.stream_support_info[stream_index].dsc_enable) {
877 /*
878 * DSC h slice count must be divisible
879 * by 3.
880 */
881 if (mode_support_result->cfg_support_info.stream_support_info[stream_index].num_dsc_slices % 3 == 0)
882 optimizable = true;
883 } else {
884 optimizable = true;
885 }
886 }
887 break;
888 case dml2_odm_mode_combine_4to1:
889 /*
890 * In ODM Combine 4:1 OTG_valid_pixel rate is 1/4 of
891 * actual pixel rate. Therefore horizontal timing must
892 * be divisible by 4.
893 */
894 if (is_h_timing_divisible_by(&display_config->stream_descriptors[stream_index].timing, 4)) {
895 if (mode_support_result->cfg_support_info.stream_support_info[stream_index].dsc_enable) {
896 /*
897 * DSC h slice count must be divisible
898 * by 4.
899 */
900 if (mode_support_result->cfg_support_info.stream_support_info[stream_index].num_dsc_slices % 4 == 0)
901 optimizable = true;
902 } else {
903 optimizable = true;
904 }
905 }
906 break;
907 case dml2_odm_mode_auto:
908 case dml2_odm_mode_bypass:
909 case dml2_odm_mode_split_1to2:
910 case dml2_odm_mode_mso_1to2:
911 case dml2_odm_mode_mso_1to4:
912 default:
913 break;
914 }
915 }
916
917 return optimizable;
918 }
919
set_bit_in_bitfield(unsigned int * bit_field,unsigned int bit_offset)920 static void set_bit_in_bitfield(unsigned int *bit_field, unsigned int bit_offset)
921 {
922 *bit_field = *bit_field | (0x1 << bit_offset);
923 }
924
is_bit_set_in_bitfield(unsigned int bit_field,unsigned int bit_offset)925 static bool is_bit_set_in_bitfield(unsigned int bit_field, unsigned int bit_offset)
926 {
927 if (bit_field & (0x1 << bit_offset))
928 return true;
929
930 return false;
931 }
932
build_synchronized_timing_groups(struct dml2_pmo_instance * pmo,struct display_configuation_with_meta * display_config)933 static void build_synchronized_timing_groups(
934 struct dml2_pmo_instance *pmo,
935 struct display_configuation_with_meta *display_config)
936 {
937 unsigned int i, j;
938 struct dml2_timing_cfg *master_timing;
939
940 unsigned int stream_mapped_mask = 0;
941 unsigned int num_timing_groups = 0;
942 unsigned int timing_group_idx = 0;
943 struct dml2_pmo_scratch *s = &pmo->scratch;
944
945 /* clear all group masks */
946 memset(s->pmo_dcn4.synchronized_timing_group_masks, 0, sizeof(s->pmo_dcn4.synchronized_timing_group_masks));
947 memset(s->pmo_dcn4.group_is_drr_enabled, 0, sizeof(s->pmo_dcn4.group_is_drr_enabled));
948 memset(s->pmo_dcn4.group_is_drr_active, 0, sizeof(s->pmo_dcn4.group_is_drr_active));
949 memset(s->pmo_dcn4.group_line_time_us, 0, sizeof(s->pmo_dcn4.group_line_time_us));
950 s->pmo_dcn4.num_timing_groups = 0;
951
952 for (i = 0; i < display_config->display_config.num_streams; i++) {
953 master_timing = &display_config->display_config.stream_descriptors[i].timing;
954
955 /* only need to build group of this stream is not in a group already */
956 if (is_bit_set_in_bitfield(stream_mapped_mask, i)) {
957 continue;
958 }
959 set_bit_in_bitfield(&stream_mapped_mask, i);
960 timing_group_idx = num_timing_groups;
961 num_timing_groups++;
962
963 /* trivially set default timing group to itself */
964 set_bit_in_bitfield(&s->pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i);
965 s->pmo_dcn4.group_line_time_us[timing_group_idx] = (double)master_timing->h_total / master_timing->pixel_clock_khz * 1000.0;
966
967 /* if drr is in use, timing is not sychnronizable */
968 if (master_timing->drr_config.enabled) {
969 s->pmo_dcn4.group_is_drr_enabled[timing_group_idx] = true;
970 s->pmo_dcn4.group_is_drr_active[timing_group_idx] = !master_timing->drr_config.disallowed &&
971 (master_timing->drr_config.drr_active_fixed || master_timing->drr_config.drr_active_variable);
972 continue;
973 }
974
975 /* find synchronizable timing groups */
976 for (j = i + 1; j < display_config->display_config.num_streams; j++) {
977 if (memcmp(master_timing,
978 &display_config->display_config.stream_descriptors[j].timing,
979 sizeof(struct dml2_timing_cfg)) == 0) {
980 set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j);
981 set_bit_in_bitfield(&stream_mapped_mask, j);
982 }
983 }
984 }
985
986 s->pmo_dcn4.num_timing_groups = num_timing_groups;
987 }
988
all_timings_support_vactive(const struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_config,unsigned int mask)989 static bool all_timings_support_vactive(const struct dml2_pmo_instance *pmo,
990 const struct display_configuation_with_meta *display_config,
991 unsigned int mask)
992 {
993 unsigned int i;
994 bool valid = true;
995
996 // Create a remap array to enable simple iteration through only masked stream indicies
997 for (i = 0; i < display_config->display_config.num_streams; i++) {
998 if (is_bit_set_in_bitfield(mask, i)) {
999 /* check if stream has enough vactive margin */
1000 valid &= is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.stream_vactive_capability_mask, i);
1001 }
1002 }
1003
1004 return valid;
1005 }
1006
all_timings_support_vblank(const struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_config,unsigned int mask)1007 static bool all_timings_support_vblank(const struct dml2_pmo_instance *pmo,
1008 const struct display_configuation_with_meta *display_config,
1009 unsigned int mask)
1010 {
1011 unsigned int i;
1012
1013 bool synchronizable = true;
1014
1015 /* find first vblank stream index and compare the timing group mask */
1016 for (i = 0; i < display_config->display_config.num_streams; i++) {
1017 if (is_bit_set_in_bitfield(mask, i)) {
1018 if (mask != pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[i]) {
1019 /* vblank streams are not synchronizable */
1020 synchronizable = false;
1021 }
1022 break;
1023 }
1024 }
1025
1026 return synchronizable;
1027 }
1028
calc_svp_microschedule(const struct dml2_fams2_meta * fams2_meta)1029 static unsigned int calc_svp_microschedule(const struct dml2_fams2_meta *fams2_meta)
1030 {
1031 return fams2_meta->contention_delay_otg_vlines +
1032 fams2_meta->method_subvp.programming_delay_otg_vlines +
1033 fams2_meta->method_subvp.phantom_vtotal +
1034 fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
1035 fams2_meta->dram_clk_change_blackout_otg_vlines;
1036 }
1037
all_timings_support_drr(const struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_config,unsigned int mask)1038 static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
1039 const struct display_configuation_with_meta *display_config,
1040 unsigned int mask)
1041 {
1042 unsigned int i;
1043 for (i = 0; i < DML2_MAX_PLANES; i++) {
1044 const struct dml2_stream_parameters *stream_descriptor;
1045 const struct dml2_fams2_meta *stream_fams2_meta;
1046
1047 if (is_bit_set_in_bitfield(mask, i)) {
1048 stream_descriptor = &display_config->display_config.stream_descriptors[i];
1049 stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i];
1050
1051 if (!stream_descriptor->timing.drr_config.enabled)
1052 return false;
1053
1054 /* cannot support required vtotal */
1055 if (stream_fams2_meta->method_drr.stretched_vtotal > stream_fams2_meta->max_vtotal) {
1056 return false;
1057 }
1058
1059 /* check rr is within bounds */
1060 if (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min ||
1061 stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) {
1062 return false;
1063 }
1064
1065 /* check required stretch is allowed */
1066 if (stream_descriptor->timing.drr_config.max_instant_vtotal_delta > 0 &&
1067 stream_fams2_meta->method_drr.stretched_vtotal - stream_fams2_meta->nom_vtotal > stream_descriptor->timing.drr_config.max_instant_vtotal_delta) {
1068 return false;
1069 }
1070 }
1071 }
1072
1073 return true;
1074 }
1075
all_timings_support_svp(const struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_config,unsigned int mask)1076 static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
1077 const struct display_configuation_with_meta *display_config,
1078 unsigned int mask)
1079 {
1080 const struct dml2_stream_parameters *stream_descriptor;
1081 const struct dml2_plane_parameters *plane_descriptor;
1082 const struct dml2_fams2_meta *stream_fams2_meta;
1083 unsigned int microschedule_vlines;
1084 unsigned int i;
1085 unsigned int mcaches_per_plane;
1086 unsigned int total_mcaches_required = 0;
1087
1088 unsigned int num_planes_per_stream[DML2_MAX_PLANES] = { 0 };
1089
1090 /* confirm timing it is not a centered timing */
1091 for (i = 0; i < display_config->display_config.num_planes; i++) {
1092 plane_descriptor = &display_config->display_config.plane_descriptors[i];
1093 mcaches_per_plane = 0;
1094
1095 if (plane_descriptor->surface.dcc.enable) {
1096 mcaches_per_plane += display_config->stage2.mcache_allocations[i].num_mcaches_plane0 +
1097 display_config->stage2.mcache_allocations[i].num_mcaches_plane1 -
1098 (display_config->stage2.mcache_allocations[i].last_slice_sharing.plane0_plane1 ? 1 : 0);
1099 }
1100
1101 if (is_bit_set_in_bitfield(mask, (unsigned char)plane_descriptor->stream_index)) {
1102 num_planes_per_stream[plane_descriptor->stream_index]++;
1103
1104 /* check recout height covers entire otg vactive, and single plane */
1105 if (num_planes_per_stream[plane_descriptor->stream_index] > 1 ||
1106 !plane_descriptor->composition.rect_out_height_spans_vactive ||
1107 plane_descriptor->composition.rotation_angle != dml2_rotation_0) {
1108 return false;
1109 }
1110
1111 /* phantom requires same number of mcaches as main */
1112 if (plane_descriptor->surface.dcc.enable) {
1113 mcaches_per_plane *= 2;
1114 }
1115 }
1116 total_mcaches_required += mcaches_per_plane;
1117 }
1118
1119 if (total_mcaches_required > pmo->soc_bb->num_dcc_mcaches) {
1120 /* too many mcaches required */
1121 return false;
1122 }
1123
1124 for (i = 0; i < DML2_MAX_PLANES; i++) {
1125 if (is_bit_set_in_bitfield(mask, i)) {
1126 stream_descriptor = &display_config->display_config.stream_descriptors[i];
1127 stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i];
1128
1129 if (stream_descriptor->overrides.disable_subvp) {
1130 return false;
1131 }
1132
1133 microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_fams2_meta[i]);
1134
1135 /* block if using an interlaced timing */
1136 if (stream_descriptor->timing.interlaced) {
1137 return false;
1138 }
1139
1140 /* 1) svp main stream's vactive must be able to fit the microschedule
1141 * 2) refresh rate must be within the allowed bounds
1142 */
1143 if (microschedule_vlines >= stream_descriptor->timing.v_active ||
1144 (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min ||
1145 stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) {
1146 return false;
1147 }
1148 }
1149 }
1150
1151 return true;
1152 }
1153
insert_into_candidate_list(const struct dml2_pmo_pstate_strategy * pstate_strategy,int stream_count,struct dml2_pmo_scratch * scratch)1154 static void insert_into_candidate_list(const struct dml2_pmo_pstate_strategy *pstate_strategy, int stream_count, struct dml2_pmo_scratch *scratch)
1155 {
1156 scratch->pmo_dcn4.pstate_strategy_candidates[scratch->pmo_dcn4.num_pstate_candidates] = *pstate_strategy;
1157 scratch->pmo_dcn4.num_pstate_candidates++;
1158 }
1159
uclk_pstate_strategy_override_to_pstate_method(const enum dml2_uclk_pstate_change_strategy override_strategy)1160 static enum dml2_pstate_method uclk_pstate_strategy_override_to_pstate_method(const enum dml2_uclk_pstate_change_strategy override_strategy)
1161 {
1162 enum dml2_pstate_method method = dml2_pstate_method_na;
1163
1164 switch (override_strategy) {
1165 case dml2_uclk_pstate_change_strategy_force_vactive:
1166 method = dml2_pstate_method_vactive;
1167 break;
1168 case dml2_uclk_pstate_change_strategy_force_vblank:
1169 method = dml2_pstate_method_vblank;
1170 break;
1171 case dml2_uclk_pstate_change_strategy_force_drr:
1172 method = dml2_pstate_method_fw_drr;
1173 break;
1174 case dml2_uclk_pstate_change_strategy_force_mall_svp:
1175 method = dml2_pstate_method_fw_svp;
1176 break;
1177 case dml2_uclk_pstate_change_strategy_force_mall_full_frame:
1178 case dml2_uclk_pstate_change_strategy_auto:
1179 default:
1180 method = dml2_pstate_method_na;
1181 }
1182
1183 return method;
1184 }
1185
pstate_method_to_uclk_pstate_strategy_override(const enum dml2_pstate_method method)1186 static enum dml2_uclk_pstate_change_strategy pstate_method_to_uclk_pstate_strategy_override(const enum dml2_pstate_method method)
1187 {
1188 enum dml2_uclk_pstate_change_strategy override_strategy = dml2_uclk_pstate_change_strategy_auto;
1189
1190 switch (method) {
1191 case dml2_pstate_method_vactive:
1192 case dml2_pstate_method_fw_vactive_drr:
1193 override_strategy = dml2_uclk_pstate_change_strategy_force_vactive;
1194 break;
1195 case dml2_pstate_method_vblank:
1196 case dml2_pstate_method_fw_vblank_drr:
1197 override_strategy = dml2_uclk_pstate_change_strategy_force_vblank;
1198 break;
1199 case dml2_pstate_method_fw_svp:
1200 case dml2_pstate_method_fw_svp_drr:
1201 override_strategy = dml2_uclk_pstate_change_strategy_force_mall_svp;
1202 break;
1203 case dml2_pstate_method_fw_drr:
1204 override_strategy = dml2_uclk_pstate_change_strategy_force_drr;
1205 break;
1206 case dml2_pstate_method_reserved_hw:
1207 case dml2_pstate_method_reserved_fw:
1208 case dml2_pstate_method_reserved_fw_drr_clamped:
1209 case dml2_pstate_method_reserved_fw_drr_var:
1210 case dml2_pstate_method_count:
1211 case dml2_pstate_method_na:
1212 default:
1213 override_strategy = dml2_uclk_pstate_change_strategy_auto;
1214 }
1215
1216 return override_strategy;
1217 }
1218
all_planes_match_method(const struct display_configuation_with_meta * display_cfg,int plane_mask,enum dml2_pstate_method method)1219 static bool all_planes_match_method(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pstate_method method)
1220 {
1221 unsigned int i;
1222
1223 for (i = 0; i < DML2_MAX_PLANES; i++) {
1224 if (is_bit_set_in_bitfield(plane_mask, i)) {
1225 if (display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != dml2_uclk_pstate_change_strategy_auto &&
1226 display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != pstate_method_to_uclk_pstate_strategy_override(method))
1227 return false;
1228 }
1229 }
1230
1231 return true;
1232 }
1233
build_method_scheduling_params(struct dml2_fams2_per_method_common_meta * stream_method_fams2_meta,struct dml2_fams2_meta * stream_fams2_meta)1234 static void build_method_scheduling_params(
1235 struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta,
1236 struct dml2_fams2_meta *stream_fams2_meta)
1237 {
1238 stream_method_fams2_meta->allow_time_us =
1239 (double)((int)stream_method_fams2_meta->allow_end_otg_vline - (int)stream_method_fams2_meta->allow_start_otg_vline) *
1240 stream_fams2_meta->otg_vline_time_us;
1241 if (stream_method_fams2_meta->allow_time_us >= stream_method_fams2_meta->period_us) {
1242 /* when allow wave overlaps an entire frame, it is always schedulable (DRR can do this)*/
1243 stream_method_fams2_meta->disallow_time_us = 0.0;
1244 } else {
1245 stream_method_fams2_meta->disallow_time_us =
1246 stream_method_fams2_meta->period_us - stream_method_fams2_meta->allow_time_us;
1247 }
1248 }
1249
get_per_method_common_meta(struct dml2_pmo_instance * pmo,enum dml2_pstate_method stream_pstate_method,int stream_idx)1250 static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
1251 struct dml2_pmo_instance *pmo,
1252 enum dml2_pstate_method stream_pstate_method,
1253 int stream_idx)
1254 {
1255 struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta = NULL;
1256
1257 switch (stream_pstate_method) {
1258 case dml2_pstate_method_vactive:
1259 case dml2_pstate_method_fw_vactive_drr:
1260 stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vactive.common;
1261 break;
1262 case dml2_pstate_method_vblank:
1263 case dml2_pstate_method_fw_vblank_drr:
1264 stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vblank.common;
1265 break;
1266 case dml2_pstate_method_fw_svp:
1267 case dml2_pstate_method_fw_svp_drr:
1268 stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_subvp.common;
1269 break;
1270 case dml2_pstate_method_fw_drr:
1271 stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_drr.common;
1272 break;
1273 case dml2_pstate_method_reserved_hw:
1274 case dml2_pstate_method_reserved_fw:
1275 case dml2_pstate_method_reserved_fw_drr_clamped:
1276 case dml2_pstate_method_reserved_fw_drr_var:
1277 case dml2_pstate_method_count:
1278 case dml2_pstate_method_na:
1279 default:
1280 stream_method_fams2_meta = NULL;
1281 }
1282
1283 return stream_method_fams2_meta;
1284 }
1285
is_timing_group_schedulable(struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_cfg,const struct dml2_pmo_pstate_strategy * pstate_strategy,const unsigned int timing_group_idx,struct dml2_fams2_per_method_common_meta * group_fams2_meta)1286 static bool is_timing_group_schedulable(
1287 struct dml2_pmo_instance *pmo,
1288 const struct display_configuation_with_meta *display_cfg,
1289 const struct dml2_pmo_pstate_strategy *pstate_strategy,
1290 const unsigned int timing_group_idx,
1291 struct dml2_fams2_per_method_common_meta *group_fams2_meta)
1292 {
1293 unsigned int i;
1294 struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta;
1295
1296 unsigned int base_stream_idx = 0;
1297 struct dml2_pmo_scratch *s = &pmo->scratch;
1298
1299 /* find base stream idx */
1300 for (base_stream_idx = 0; base_stream_idx < display_cfg->display_config.num_streams; base_stream_idx++) {
1301 if (is_bit_set_in_bitfield(s->pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], base_stream_idx)) {
1302 /* master stream found */
1303 break;
1304 }
1305 }
1306
1307 /* init allow start and end lines for timing group */
1308 stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx);
1309 if (!stream_method_fams2_meta)
1310 return false;
1311
1312 group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline;
1313 group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline;
1314 group_fams2_meta->period_us = stream_method_fams2_meta->period_us;
1315 for (i = base_stream_idx + 1; i < display_cfg->display_config.num_streams; i++) {
1316 if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) {
1317 stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
1318 if (!stream_method_fams2_meta)
1319 continue;
1320
1321 if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) {
1322 /* set group allow start to larger otg vline */
1323 group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline;
1324 }
1325
1326 if (group_fams2_meta->allow_end_otg_vline > stream_method_fams2_meta->allow_end_otg_vline) {
1327 /* set group allow end to smaller otg vline */
1328 group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline;
1329 }
1330
1331 /* check waveform still has positive width */
1332 if (group_fams2_meta->allow_start_otg_vline >= group_fams2_meta->allow_end_otg_vline) {
1333 /* timing group is not schedulable */
1334 return false;
1335 }
1336 }
1337 }
1338
1339 /* calculate the rest of the meta */
1340 build_method_scheduling_params(group_fams2_meta, &pmo->scratch.pmo_dcn4.stream_fams2_meta[base_stream_idx]);
1341
1342 return group_fams2_meta->allow_time_us > 0.0 &&
1343 group_fams2_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us;
1344 }
1345
is_config_schedulable(struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_cfg,const struct dml2_pmo_pstate_strategy * pstate_strategy)1346 static bool is_config_schedulable(
1347 struct dml2_pmo_instance *pmo,
1348 const struct display_configuation_with_meta *display_cfg,
1349 const struct dml2_pmo_pstate_strategy *pstate_strategy)
1350 {
1351 unsigned int i, j;
1352 bool schedulable;
1353 struct dml2_pmo_scratch *s = &pmo->scratch;
1354
1355 double max_allow_delay_us = 0.0;
1356
1357 memset(s->pmo_dcn4.group_common_fams2_meta, 0, sizeof(s->pmo_dcn4.group_common_fams2_meta));
1358 memset(s->pmo_dcn4.sorted_group_gtl_disallow_index, 0, sizeof(unsigned int) * DML2_MAX_PLANES);
1359
1360 /* search for a general solution to the schedule */
1361
1362 /* STAGE 0: Early return for special cases */
1363 if (display_cfg->display_config.num_streams == 0) {
1364 return true;
1365 }
1366
1367 /* STAGE 1: confirm allow waves overlap for synchronizable streams */
1368 schedulable = true;
1369 for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
1370 s->pmo_dcn4.sorted_group_gtl_disallow_index[i] = i;
1371 s->pmo_dcn4.sorted_group_gtl_period_index[i] = i;
1372 if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_fams2_meta[i])) {
1373 /* synchronized timing group was not schedulable */
1374 schedulable = false;
1375 break;
1376 }
1377 max_allow_delay_us += s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us;
1378 }
1379
1380 if ((schedulable && s->pmo_dcn4.num_timing_groups <= 1) || !schedulable) {
1381 /* 1. the only timing group was schedulable, so early pass
1382 * 2. one of the timing groups was not schedulable, so early fail */
1383 return schedulable;
1384 }
1385
1386 /* STAGE 2: Check allow can't be masked entirely by other disallows */
1387 schedulable = true;
1388
1389 /* sort disallow times from greatest to least */
1390 for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
1391 bool swapped = false;
1392
1393 for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) {
1394 double j_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us;
1395 double jp1_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us;
1396 if (j_disallow_us < jp1_disallow_us) {
1397 /* swap as A < B */
1398 swap(s->pmo_dcn4.sorted_group_gtl_disallow_index[j],
1399 s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]);
1400 swapped = true;
1401 }
1402 }
1403
1404 /* sorted, exit early */
1405 if (!swapped)
1406 break;
1407 }
1408
1409 /* Check worst case disallow region occurs in the middle of allow for the
1410 * other display, or when >2 streams continue to halve the remaining allow time.
1411 */
1412 for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
1413 if (s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us <= 0.0) {
1414 /* this timing group always allows */
1415 continue;
1416 }
1417
1418 double max_allow_time_us = s->pmo_dcn4.group_common_fams2_meta[i].allow_time_us;
1419 for (j = 0; j < s->pmo_dcn4.num_timing_groups; j++) {
1420 unsigned int sorted_j = s->pmo_dcn4.sorted_group_gtl_disallow_index[j];
1421 /* stream can't overlap itself */
1422 if (i != sorted_j && s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us > 0.0) {
1423 max_allow_time_us = math_min2(
1424 s->pmo_dcn4.group_common_fams2_meta[sorted_j].allow_time_us,
1425 (max_allow_time_us - s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us) / 2);
1426
1427 if (max_allow_time_us < 0.0) {
1428 /* failed exit early */
1429 break;
1430 }
1431 }
1432 }
1433
1434 if (max_allow_time_us <= 0.0) {
1435 /* not enough time for microschedule in the worst case */
1436 schedulable = false;
1437 break;
1438 }
1439 }
1440
1441 if (schedulable && max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) {
1442 return true;
1443 }
1444
1445 /* STAGE 3: check larger allow can fit period of all other streams */
1446 schedulable = true;
1447
1448 /* sort periods from greatest to least */
1449 for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
1450 bool swapped = false;
1451
1452 for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) {
1453 double j_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us;
1454 double jp1_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us;
1455 if (j_period_us < jp1_period_us) {
1456 /* swap as A < B */
1457 swap(s->pmo_dcn4.sorted_group_gtl_period_index[j],
1458 s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]);
1459 swapped = true;
1460 }
1461 }
1462
1463 /* sorted, exit early */
1464 if (!swapped)
1465 break;
1466 }
1467
1468 /* check larger allow can fit period of all other streams */
1469 for (i = 0; i < s->pmo_dcn4.num_timing_groups - 1; i++) {
1470 unsigned int sorted_i = s->pmo_dcn4.sorted_group_gtl_period_index[i];
1471 unsigned int sorted_ip1 = s->pmo_dcn4.sorted_group_gtl_period_index[i + 1];
1472
1473 if (s->pmo_dcn4.group_common_fams2_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_fams2_meta[sorted_ip1].period_us ||
1474 (s->pmo_dcn4.group_is_drr_enabled[sorted_ip1] && s->pmo_dcn4.group_is_drr_active[sorted_ip1])) {
1475 schedulable = false;
1476 break;
1477 }
1478 }
1479
1480 if (schedulable && max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) {
1481 return true;
1482 }
1483
1484 /* STAGE 4: When using HW exclusive modes, check disallow alignments are within allowed threshold */
1485 if (s->pmo_dcn4.num_timing_groups == 2 &&
1486 !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, pstate_strategy->per_stream_pstate_method[0]) &&
1487 !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, pstate_strategy->per_stream_pstate_method[1])) {
1488 double period_ratio;
1489 double max_shift_us;
1490 double shift_per_period;
1491
1492 /* default period_0 > period_1 */
1493 unsigned int lrg_idx = 0;
1494 unsigned int sml_idx = 1;
1495 if (s->pmo_dcn4.group_common_fams2_meta[0].period_us < s->pmo_dcn4.group_common_fams2_meta[1].period_us) {
1496 /* period_0 < period_1 */
1497 lrg_idx = 1;
1498 sml_idx = 0;
1499 }
1500 period_ratio = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us;
1501 shift_per_period = s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio));
1502 max_shift_us = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us;
1503 max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us;
1504
1505 if (shift_per_period > 0.0 &&
1506 shift_per_period < s->pmo_dcn4.group_common_fams2_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us &&
1507 max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) {
1508 schedulable = true;
1509 }
1510 }
1511
1512 return schedulable;
1513 }
1514
stream_matches_drr_policy(struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_cfg,const enum dml2_pstate_method stream_pstate_method,unsigned int stream_index)1515 static bool stream_matches_drr_policy(struct dml2_pmo_instance *pmo,
1516 const struct display_configuation_with_meta *display_cfg,
1517 const enum dml2_pstate_method stream_pstate_method,
1518 unsigned int stream_index)
1519 {
1520 const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[stream_index];
1521 bool strategy_matches_drr_requirements = true;
1522
1523 /* check if strategy is compatible with stream drr capability and strategy */
1524 if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_method) &&
1525 display_cfg->display_config.num_streams > 1 &&
1526 stream_descriptor->timing.drr_config.enabled &&
1527 (stream_descriptor->timing.drr_config.drr_active_fixed || stream_descriptor->timing.drr_config.drr_active_variable)) {
1528 /* DRR is active, so config may become unschedulable */
1529 strategy_matches_drr_requirements = false;
1530 } else if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_method) &&
1531 is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_method) &&
1532 stream_descriptor->timing.drr_config.enabled &&
1533 stream_descriptor->timing.drr_config.drr_active_variable) {
1534 /* DRR is variable, fw exclusive methods require DRR to be clamped */
1535 strategy_matches_drr_requirements = false;
1536 } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_method) &&
1537 pmo->options->disable_drr_var_when_var_active &&
1538 stream_descriptor->timing.drr_config.enabled &&
1539 stream_descriptor->timing.drr_config.drr_active_variable) {
1540 /* DRR variable is active, but policy blocks DRR for p-state when this happens */
1541 strategy_matches_drr_requirements = false;
1542 } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_method) &&
1543 (pmo->options->disable_drr_var ||
1544 !stream_descriptor->timing.drr_config.enabled ||
1545 stream_descriptor->timing.drr_config.disallowed)) {
1546 /* DRR variable strategies are disallowed due to settings or policy */
1547 strategy_matches_drr_requirements = false;
1548 } else if (is_bit_set_in_bitfield(PMO_DRR_CLAMPED_STRATEGY_MASK, stream_pstate_method) &&
1549 (pmo->options->disable_drr_clamped ||
1550 (!stream_descriptor->timing.drr_config.enabled ||
1551 (!stream_descriptor->timing.drr_config.drr_active_fixed && !stream_descriptor->timing.drr_config.drr_active_variable)) ||
1552 (pmo->options->disable_drr_clamped_when_var_active &&
1553 stream_descriptor->timing.drr_config.enabled &&
1554 stream_descriptor->timing.drr_config.drr_active_variable))) {
1555 /* DRR fixed strategies are disallowed due to settings or policy */
1556 strategy_matches_drr_requirements = false;
1557 } else if (is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_method) &&
1558 pmo->options->disable_fams2) {
1559 /* FW modes require FAMS2 */
1560 strategy_matches_drr_requirements = false;
1561 }
1562
1563 return strategy_matches_drr_requirements;
1564 }
1565
validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_cfg,const struct dml2_pmo_pstate_strategy * pstate_strategy)1566 static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_instance *pmo,
1567 const struct display_configuation_with_meta *display_cfg,
1568 const struct dml2_pmo_pstate_strategy *pstate_strategy)
1569 {
1570 struct dml2_pmo_scratch *s = &pmo->scratch;
1571
1572 unsigned int stream_index = 0;
1573
1574 unsigned int svp_count = 0;
1575 unsigned int svp_stream_mask = 0;
1576 unsigned int drr_count = 0;
1577 unsigned int drr_stream_mask = 0;
1578 unsigned int vactive_count = 0;
1579 unsigned int vactive_stream_mask = 0;
1580 unsigned int vblank_count = 0;
1581 unsigned int vblank_stream_mask = 0;
1582
1583 bool strategy_matches_forced_requirements = true;
1584 bool strategy_matches_drr_requirements = true;
1585
1586 // Tabulate everything
1587 for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
1588
1589 if (!all_planes_match_method(display_cfg, s->pmo_dcn4.stream_plane_mask[stream_index],
1590 pstate_strategy->per_stream_pstate_method[stream_index])) {
1591 strategy_matches_forced_requirements = false;
1592 break;
1593 }
1594
1595 strategy_matches_drr_requirements &=
1596 stream_matches_drr_policy(pmo, display_cfg, pstate_strategy->per_stream_pstate_method[stream_index], stream_index);
1597
1598 if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp ||
1599 pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
1600 svp_count++;
1601 set_bit_in_bitfield(&svp_stream_mask, stream_index);
1602 } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
1603 drr_count++;
1604 set_bit_in_bitfield(&drr_stream_mask, stream_index);
1605 } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
1606 pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
1607 vactive_count++;
1608 set_bit_in_bitfield(&vactive_stream_mask, stream_index);
1609 } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank ||
1610 pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
1611 vblank_count++;
1612 set_bit_in_bitfield(&vblank_stream_mask, stream_index);
1613 }
1614 }
1615
1616 if (!strategy_matches_forced_requirements || !strategy_matches_drr_requirements)
1617 return false;
1618
1619 if (vactive_count > 0 && !all_timings_support_vactive(pmo, display_cfg, vactive_stream_mask))
1620 return false;
1621
1622 if (vblank_count > 0 && (pmo->options->disable_vblank || !all_timings_support_vblank(pmo, display_cfg, vblank_stream_mask)))
1623 return false;
1624
1625 if (drr_count > 0 && (pmo->options->disable_drr_var || !all_timings_support_drr(pmo, display_cfg, drr_stream_mask)))
1626 return false;
1627
1628 if (svp_count > 0 && (pmo->options->disable_svp || !all_timings_support_svp(pmo, display_cfg, svp_stream_mask)))
1629 return false;
1630
1631 return is_config_schedulable(pmo, display_cfg, pstate_strategy);
1632 }
1633
get_vactive_pstate_margin(const struct display_configuation_with_meta * display_cfg,int plane_mask)1634 static int get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask)
1635 {
1636 unsigned int i;
1637 int min_vactive_margin_us = 0xFFFFFFF;
1638
1639 for (i = 0; i < DML2_MAX_PLANES; i++) {
1640 if (is_bit_set_in_bitfield(plane_mask, i)) {
1641 if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_latency_hiding_margin_in_active < min_vactive_margin_us)
1642 min_vactive_margin_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_latency_hiding_margin_in_active;
1643 }
1644 }
1645
1646 return min_vactive_margin_us;
1647 }
1648
get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta * display_cfg,int plane_mask)1649 static unsigned int get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta *display_cfg, int plane_mask)
1650 {
1651 unsigned char i;
1652 unsigned int max_vactive_fill_us = 0;
1653
1654 for (i = 0; i < DML2_MAX_PLANES; i++) {
1655 if (is_bit_set_in_bitfield(plane_mask, i)) {
1656 if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_vactive_det_fill_delay_us > max_vactive_fill_us)
1657 max_vactive_fill_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_vactive_det_fill_delay_us;
1658 }
1659 }
1660
1661 return max_vactive_fill_us;
1662 }
1663
build_fams2_meta_per_stream(struct dml2_pmo_instance * pmo,struct display_configuation_with_meta * display_config,int stream_index)1664 static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
1665 struct display_configuation_with_meta *display_config,
1666 int stream_index)
1667 {
1668 const struct dml2_ip_capabilities *ip_caps = pmo->ip_caps;
1669 const struct dml2_stream_parameters *stream_descriptor = &display_config->display_config.stream_descriptors[stream_index];
1670 const struct core_stream_support_info *stream_info = &display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index];
1671 const struct dml2_timing_cfg *timing = &stream_descriptor->timing;
1672 struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index];
1673
1674 /* worst case all other streams require some programming at the same time, 0 if only 1 stream */
1675 unsigned int contention_delay_us = (ip_caps->fams2.vertical_interrupt_ack_delay_us +
1676 (unsigned int)math_max3(ip_caps->fams2.subvp_programming_delay_us, ip_caps->fams2.drr_programming_delay_us, ip_caps->fams2.allow_programming_delay_us)) *
1677 (display_config->display_config.num_streams - 1);
1678
1679 /* common */
1680 stream_fams2_meta->valid = true;
1681 stream_fams2_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0;
1682 stream_fams2_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active;
1683 stream_fams2_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
1684 (stream_fams2_meta->nom_vtotal * timing->h_total);
1685 stream_fams2_meta->nom_frame_time_us =
1686 (double)stream_fams2_meta->nom_vtotal * stream_fams2_meta->otg_vline_time_us;
1687 stream_fams2_meta->vblank_start = timing->v_blank_end + timing->v_active;
1688
1689 if (stream_descriptor->timing.drr_config.enabled == true) {
1690 if (stream_descriptor->timing.drr_config.min_refresh_uhz != 0.0) {
1691 stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
1692 ((double)stream_descriptor->timing.drr_config.min_refresh_uhz * stream_descriptor->timing.h_total) * 1e9);
1693 } else {
1694 /* assume min of 48Hz */
1695 stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
1696 (48000000.0 * stream_descriptor->timing.h_total) * 1e9);
1697 }
1698 } else {
1699 stream_fams2_meta->max_vtotal = stream_fams2_meta->nom_vtotal;
1700 }
1701 stream_fams2_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
1702 (stream_fams2_meta->max_vtotal * timing->h_total);
1703 stream_fams2_meta->max_frame_time_us =
1704 (double)stream_fams2_meta->max_vtotal * stream_fams2_meta->otg_vline_time_us;
1705
1706 stream_fams2_meta->scheduling_delay_otg_vlines =
1707 (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_fams2_meta->otg_vline_time_us);
1708 stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines =
1709 (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_fams2_meta->otg_vline_time_us);
1710 stream_fams2_meta->contention_delay_otg_vlines =
1711 (unsigned int)math_ceil(contention_delay_us / stream_fams2_meta->otg_vline_time_us);
1712 /* worst case allow to target needs to account for all streams' allow events overlapping, and 1 line for error */
1713 stream_fams2_meta->allow_to_target_delay_otg_vlines =
1714 (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_fams2_meta->otg_vline_time_us)) + 1;
1715 stream_fams2_meta->min_allow_width_otg_vlines =
1716 (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_fams2_meta->otg_vline_time_us);
1717 /* this value should account for urgent latency */
1718 stream_fams2_meta->dram_clk_change_blackout_otg_vlines =
1719 (unsigned int)math_ceil(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us /
1720 stream_fams2_meta->otg_vline_time_us);
1721
1722 /* scheduling params should be built based on the worst case for allow_time:disallow_time */
1723
1724 /* vactive */
1725 if (display_config->display_config.num_streams == 1) {
1726 /* for single stream, guarantee at least an instant of allow */
1727 stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor(
1728 math_max2(0.0,
1729 timing->v_active - math_max2(1.0, stream_fams2_meta->min_allow_width_otg_vlines) - stream_fams2_meta->dram_clk_change_blackout_otg_vlines));
1730 } else {
1731 /* for multi stream, bound to a max fill time defined by IP caps */
1732 stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines =
1733 (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_fams2_meta->otg_vline_time_us);
1734 }
1735 stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us = stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_fams2_meta->otg_vline_time_us;
1736
1737 if (stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) {
1738 stream_fams2_meta->method_vactive.common.allow_start_otg_vline =
1739 timing->v_blank_end + stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
1740 stream_fams2_meta->method_vactive.common.allow_end_otg_vline =
1741 stream_fams2_meta->vblank_start -
1742 stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
1743 } else {
1744 stream_fams2_meta->method_vactive.common.allow_start_otg_vline = 0;
1745 stream_fams2_meta->method_vactive.common.allow_end_otg_vline = 0;
1746 }
1747 stream_fams2_meta->method_vactive.common.period_us = stream_fams2_meta->nom_frame_time_us;
1748 build_method_scheduling_params(&stream_fams2_meta->method_vactive.common, stream_fams2_meta);
1749
1750 /* vblank */
1751 stream_fams2_meta->method_vblank.common.allow_start_otg_vline = stream_fams2_meta->vblank_start;
1752 stream_fams2_meta->method_vblank.common.allow_end_otg_vline =
1753 stream_fams2_meta->method_vblank.common.allow_start_otg_vline + 1;
1754 stream_fams2_meta->method_vblank.common.period_us = stream_fams2_meta->nom_frame_time_us;
1755 build_method_scheduling_params(&stream_fams2_meta->method_vblank.common, stream_fams2_meta);
1756
1757 /* subvp */
1758 stream_fams2_meta->method_subvp.programming_delay_otg_vlines =
1759 (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_fams2_meta->otg_vline_time_us);
1760 stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines =
1761 (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_fams2_meta->otg_vline_time_us);
1762 stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines =
1763 (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_fams2_meta->otg_vline_time_us);
1764 stream_fams2_meta->method_subvp.phantom_vactive =
1765 stream_fams2_meta->allow_to_target_delay_otg_vlines +
1766 stream_fams2_meta->min_allow_width_otg_vlines +
1767 stream_info->phantom_min_v_active;
1768 stream_fams2_meta->method_subvp.phantom_vfp =
1769 stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines;
1770 /* phantom vtotal = v_bp(vstartup) + v_sync(1) + v_fp(throttle_delay) + v_active(allow_to_target + min_allow + min_vactive)*/
1771 stream_fams2_meta->method_subvp.phantom_vtotal =
1772 stream_info->phantom_v_startup +
1773 stream_fams2_meta->method_subvp.phantom_vfp +
1774 1 +
1775 stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines +
1776 stream_fams2_meta->method_subvp.phantom_vactive;
1777 stream_fams2_meta->method_subvp.common.allow_start_otg_vline =
1778 stream_descriptor->timing.v_blank_end +
1779 stream_fams2_meta->contention_delay_otg_vlines +
1780 stream_fams2_meta->method_subvp.programming_delay_otg_vlines +
1781 stream_fams2_meta->method_subvp.phantom_vtotal +
1782 stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
1783 stream_fams2_meta->allow_to_target_delay_otg_vlines;
1784 stream_fams2_meta->method_subvp.common.allow_end_otg_vline =
1785 stream_fams2_meta->vblank_start -
1786 stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
1787 stream_fams2_meta->method_subvp.common.period_us = stream_fams2_meta->nom_frame_time_us;
1788 build_method_scheduling_params(&stream_fams2_meta->method_subvp.common, stream_fams2_meta);
1789
1790 /* drr */
1791 stream_fams2_meta->method_drr.programming_delay_otg_vlines =
1792 (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_fams2_meta->otg_vline_time_us);
1793 stream_fams2_meta->method_drr.common.allow_start_otg_vline =
1794 stream_fams2_meta->vblank_start +
1795 stream_fams2_meta->allow_to_target_delay_otg_vlines;
1796 stream_fams2_meta->method_drr.common.period_us = stream_fams2_meta->nom_frame_time_us;
1797 if (display_config->display_config.num_streams <= 1) {
1798 /* only need to stretch vblank for blackout time */
1799 stream_fams2_meta->method_drr.stretched_vtotal =
1800 stream_fams2_meta->nom_vtotal +
1801 stream_fams2_meta->allow_to_target_delay_otg_vlines +
1802 stream_fams2_meta->min_allow_width_otg_vlines +
1803 stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
1804 } else {
1805 /* multi display needs to always be schedulable */
1806 stream_fams2_meta->method_drr.stretched_vtotal =
1807 stream_fams2_meta->nom_vtotal * 2 +
1808 stream_fams2_meta->allow_to_target_delay_otg_vlines +
1809 stream_fams2_meta->min_allow_width_otg_vlines +
1810 stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
1811 }
1812 stream_fams2_meta->method_drr.common.allow_end_otg_vline =
1813 stream_fams2_meta->method_drr.stretched_vtotal -
1814 stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
1815 build_method_scheduling_params(&stream_fams2_meta->method_drr.common, stream_fams2_meta);
1816 }
1817
build_subvp_meta_per_stream(struct dml2_pmo_instance * pmo,struct display_configuation_with_meta * display_config,int stream_index)1818 static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo,
1819 struct display_configuation_with_meta *display_config,
1820 int stream_index)
1821 {
1822 struct dml2_implicit_svp_meta *stream_svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index];
1823 struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index];
1824
1825 stream_svp_meta->valid = true;
1826
1827 /* PMO FAMS2 precaulcates these values */
1828 stream_svp_meta->v_active = stream_fams2_meta->method_subvp.phantom_vactive;
1829 stream_svp_meta->v_front_porch = stream_fams2_meta->method_subvp.phantom_vfp;
1830 stream_svp_meta->v_total = stream_fams2_meta->method_subvp.phantom_vtotal;
1831 }
1832
pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out * in_out)1833 bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out)
1834 {
1835 struct dml2_pmo_instance *pmo = in_out->instance;
1836 struct dml2_optimization_stage3_state *state = &in_out->base_display_config->stage3;
1837 struct dml2_pmo_scratch *s = &pmo->scratch;
1838
1839 struct display_configuation_with_meta *display_config;
1840 const struct dml2_plane_parameters *plane_descriptor;
1841 const struct dml2_pmo_pstate_strategy *strategy_list = NULL;
1842 struct dml2_pmo_pstate_strategy override_base_strategy = { 0 };
1843 unsigned int strategy_list_size = 0;
1844 unsigned int plane_index, stream_index, i;
1845 bool build_override_strategy = true;
1846
1847 state->performed = true;
1848 in_out->base_display_config->stage3.min_clk_index_for_latency = in_out->base_display_config->stage1.min_clk_index_for_latency;
1849
1850 display_config = in_out->base_display_config;
1851 display_config->display_config.overrides.enable_subvp_implicit_pmo = true;
1852
1853 memset(s, 0, sizeof(struct dml2_pmo_scratch));
1854
1855 if (display_config->display_config.overrides.all_streams_blanked) {
1856 return true;
1857 }
1858
1859 pmo->scratch.pmo_dcn4.min_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency;
1860 pmo->scratch.pmo_dcn4.max_latency_index = pmo->mcg_clock_table_size;
1861 pmo->scratch.pmo_dcn4.cur_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency;
1862
1863 // First build the stream plane mask (array of bitfields indexed by stream, indicating plane mapping)
1864 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
1865 plane_descriptor = &display_config->display_config.plane_descriptors[plane_index];
1866
1867 set_bit_in_bitfield(&s->pmo_dcn4.stream_plane_mask[plane_descriptor->stream_index], plane_index);
1868
1869 state->pstate_switch_modes[plane_index] = dml2_pstate_method_vactive;
1870
1871 build_override_strategy &= plane_descriptor->overrides.uclk_pstate_change_strategy != dml2_uclk_pstate_change_strategy_auto;
1872 override_base_strategy.per_stream_pstate_method[plane_descriptor->stream_index] =
1873 uclk_pstate_strategy_override_to_pstate_method(plane_descriptor->overrides.uclk_pstate_change_strategy);
1874 }
1875
1876 // Figure out which streams can do vactive, and also build up implicit SVP and FAMS2 meta
1877 for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
1878 if (get_vactive_pstate_margin(display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) >= (int)(MIN_VACTIVE_MARGIN_PCT * pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us))
1879 set_bit_in_bitfield(&s->pmo_dcn4.stream_vactive_capability_mask, stream_index);
1880
1881 /* FAMS2 meta */
1882 build_fams2_meta_per_stream(pmo, display_config, stream_index);
1883
1884 /* SVP meta */
1885 build_subvp_meta_per_stream(pmo, display_config, stream_index);
1886 }
1887
1888 /* get synchronized timing groups */
1889 build_synchronized_timing_groups(pmo, display_config);
1890
1891 if (build_override_strategy) {
1892 /* build expanded override strategy list (no permutations) */
1893 override_base_strategy.allow_state_increase = true;
1894 s->pmo_dcn4.num_expanded_override_strategies = 0;
1895 insert_strategy_into_expanded_list(&override_base_strategy,
1896 display_config->display_config.num_streams,
1897 s->pmo_dcn4.expanded_override_strategy_list,
1898 &s->pmo_dcn4.num_expanded_override_strategies);
1899 expand_variant_strategy(&override_base_strategy,
1900 display_config->display_config.num_streams,
1901 false,
1902 s->pmo_dcn4.expanded_override_strategy_list,
1903 &s->pmo_dcn4.num_expanded_override_strategies);
1904
1905 /* use override strategy list */
1906 strategy_list = s->pmo_dcn4.expanded_override_strategy_list;
1907 strategy_list_size = s->pmo_dcn4.num_expanded_override_strategies;
1908 } else {
1909 /* use predefined strategy list */
1910 strategy_list = get_expanded_strategy_list(&pmo->init_data, display_config->display_config.num_streams);
1911 strategy_list_size = get_num_expanded_strategies(&pmo->init_data, display_config->display_config.num_streams);
1912 }
1913
1914 if (!strategy_list || strategy_list_size == 0)
1915 return false;
1916
1917 s->pmo_dcn4.num_pstate_candidates = 0;
1918
1919 for (i = 0; i < strategy_list_size && s->pmo_dcn4.num_pstate_candidates < DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE; i++) {
1920 if (validate_pstate_support_strategy_cofunctionality(pmo, display_config, &strategy_list[i])) {
1921 insert_into_candidate_list(&strategy_list[i], display_config->display_config.num_streams, s);
1922 }
1923 }
1924
1925 if (s->pmo_dcn4.num_pstate_candidates > 0) {
1926 s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.num_pstate_candidates-1].allow_state_increase = true;
1927 s->pmo_dcn4.cur_pstate_candidate = -1;
1928 return true;
1929 } else {
1930 return false;
1931 }
1932 }
1933
reset_display_configuration(struct display_configuation_with_meta * display_config)1934 static void reset_display_configuration(struct display_configuation_with_meta *display_config)
1935 {
1936 unsigned int plane_index;
1937 unsigned int stream_index;
1938 struct dml2_plane_parameters *plane;
1939
1940 for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
1941 display_config->stage3.stream_svp_meta[stream_index].valid = false;
1942
1943 display_config->display_config.stream_descriptors[stream_index].overrides.minimize_active_latency_hiding = false;
1944 display_config->display_config.overrides.best_effort_min_active_latency_hiding_us = 0;
1945 }
1946
1947 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
1948 plane = &display_config->display_config.plane_descriptors[plane_index];
1949
1950 // Unset SubVP
1951 plane->overrides.legacy_svp_config = dml2_svp_mode_override_auto;
1952
1953 // Remove reserve time
1954 plane->overrides.reserved_vblank_time_ns = 0;
1955
1956 // Reset strategy to auto
1957 plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_auto;
1958
1959 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_na;
1960 }
1961 }
1962
setup_planes_for_drr_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)1963 static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *display_config,
1964 struct dml2_pmo_instance *pmo,
1965 int plane_mask)
1966 {
1967 unsigned int plane_index;
1968 struct dml2_plane_parameters *plane;
1969
1970 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
1971 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
1972 plane = &display_config->display_config.plane_descriptors[plane_index];
1973
1974 plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_force_drr;
1975
1976 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_drr;
1977
1978 }
1979 }
1980 }
1981
setup_planes_for_svp_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)1982 static void setup_planes_for_svp_by_mask(struct display_configuation_with_meta *display_config,
1983 struct dml2_pmo_instance *pmo,
1984 int plane_mask)
1985 {
1986 struct dml2_pmo_scratch *scratch = &pmo->scratch;
1987
1988 unsigned int plane_index;
1989 int stream_index = -1;
1990
1991 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
1992 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
1993 stream_index = (char)display_config->display_config.plane_descriptors[plane_index].stream_index;
1994 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_svp;
1995 }
1996 }
1997
1998 if (stream_index >= 0) {
1999 memcpy(&display_config->stage3.stream_svp_meta[stream_index],
2000 &scratch->pmo_dcn4.stream_svp_meta[stream_index],
2001 sizeof(struct dml2_implicit_svp_meta));
2002 }
2003 }
2004
setup_planes_for_svp_drr_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)2005 static void setup_planes_for_svp_drr_by_mask(struct display_configuation_with_meta *display_config,
2006 struct dml2_pmo_instance *pmo,
2007 int plane_mask)
2008 {
2009 struct dml2_pmo_scratch *scratch = &pmo->scratch;
2010
2011 unsigned int plane_index;
2012 int stream_index = -1;
2013
2014 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2015 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2016 stream_index = (char)display_config->display_config.plane_descriptors[plane_index].stream_index;
2017 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_svp_drr;
2018 }
2019 }
2020
2021 if (stream_index >= 0) {
2022 memcpy(&display_config->stage3.stream_svp_meta[stream_index],
2023 &scratch->pmo_dcn4.stream_svp_meta[stream_index],
2024 sizeof(struct dml2_implicit_svp_meta));
2025 }
2026 }
2027
setup_planes_for_vblank_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)2028 static void setup_planes_for_vblank_by_mask(struct display_configuation_with_meta *display_config,
2029 struct dml2_pmo_instance *pmo,
2030 int plane_mask)
2031 {
2032 unsigned int plane_index;
2033 struct dml2_plane_parameters *plane;
2034
2035 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2036 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2037 plane = &display_config->display_config.plane_descriptors[plane_index];
2038
2039 plane->overrides.reserved_vblank_time_ns = (long)math_max2(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000.0,
2040 plane->overrides.reserved_vblank_time_ns);
2041
2042 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vblank;
2043
2044 }
2045 }
2046 }
2047
setup_planes_for_vblank_drr_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)2048 static void setup_planes_for_vblank_drr_by_mask(struct display_configuation_with_meta *display_config,
2049 struct dml2_pmo_instance *pmo,
2050 int plane_mask)
2051 {
2052 unsigned int plane_index;
2053 struct dml2_plane_parameters *plane;
2054
2055 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2056 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2057 plane = &display_config->display_config.plane_descriptors[plane_index];
2058 plane->overrides.reserved_vblank_time_ns = (long)(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000);
2059
2060 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vblank_drr;
2061 }
2062 }
2063 }
2064
setup_planes_for_vactive_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)2065 static void setup_planes_for_vactive_by_mask(struct display_configuation_with_meta *display_config,
2066 struct dml2_pmo_instance *pmo,
2067 int plane_mask)
2068 {
2069 unsigned int plane_index;
2070 unsigned int stream_index;
2071
2072 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2073 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2074 stream_index = display_config->display_config.plane_descriptors[plane_index].stream_index;
2075
2076 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vactive;
2077
2078 if (!pmo->options->disable_vactive_det_fill_bw_pad) {
2079 display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
2080 (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
2081 }
2082 }
2083 }
2084 }
2085
setup_planes_for_vactive_drr_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)2086 static void setup_planes_for_vactive_drr_by_mask(struct display_configuation_with_meta *display_config,
2087 struct dml2_pmo_instance *pmo,
2088 int plane_mask)
2089 {
2090 unsigned int plane_index;
2091 unsigned int stream_index;
2092
2093 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2094 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2095 stream_index = display_config->display_config.plane_descriptors[plane_index].stream_index;
2096
2097 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vactive_drr;
2098
2099 if (!pmo->options->disable_vactive_det_fill_bw_pad) {
2100 display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
2101 (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
2102 }
2103 }
2104 }
2105 }
2106
setup_display_config(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int strategy_index)2107 static bool setup_display_config(struct display_configuation_with_meta *display_config, struct dml2_pmo_instance *pmo, int strategy_index)
2108 {
2109 struct dml2_pmo_scratch *scratch = &pmo->scratch;
2110
2111 bool fams2_required = false;
2112 bool success = true;
2113 unsigned int stream_index;
2114
2115 reset_display_configuration(display_config);
2116
2117 for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
2118
2119 if (pmo->scratch.pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_na) {
2120 success = false;
2121 break;
2122 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive) {
2123 setup_planes_for_vactive_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2124 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank) {
2125 setup_planes_for_vblank_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2126 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp) {
2127 fams2_required = true;
2128 setup_planes_for_svp_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2129 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
2130 fams2_required = true;
2131 setup_planes_for_vactive_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2132 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
2133 fams2_required = true;
2134 setup_planes_for_vblank_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2135 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
2136 fams2_required = true;
2137 setup_planes_for_svp_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2138 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
2139 fams2_required = true;
2140 setup_planes_for_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2141 }
2142 }
2143
2144 /* copy FAMS2 meta */
2145 if (success) {
2146 display_config->stage3.fams2_required = fams2_required;
2147 memcpy(&display_config->stage3.stream_fams2_meta,
2148 &scratch->pmo_dcn4.stream_fams2_meta,
2149 sizeof(struct dml2_fams2_meta) * DML2_MAX_PLANES);
2150 }
2151
2152 return success;
2153 }
2154
get_minimum_reserved_time_us_for_planes(struct display_configuation_with_meta * display_config,int plane_mask)2155 static int get_minimum_reserved_time_us_for_planes(struct display_configuation_with_meta *display_config, int plane_mask)
2156 {
2157 int min_time_us = 0xFFFFFF;
2158 unsigned int plane_index = 0;
2159
2160 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2161 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2162 if (min_time_us > (display_config->display_config.plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000))
2163 min_time_us = display_config->display_config.plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000;
2164 }
2165 }
2166 return min_time_us;
2167 }
2168
pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out * in_out)2169 bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out)
2170 {
2171 bool p_state_supported = true;
2172 unsigned int stream_index;
2173 struct dml2_pmo_scratch *s = &in_out->instance->scratch;
2174
2175 int MIN_VACTIVE_MARGIN_VBLANK = 0;
2176 int MIN_VACTIVE_MARGIN_DRR = 0;
2177 int REQUIRED_RESERVED_TIME = 0;
2178
2179 if (in_out->base_display_config->display_config.overrides.all_streams_blanked) {
2180 return true;
2181 }
2182
2183 MIN_VACTIVE_MARGIN_VBLANK = INT_MIN;
2184 MIN_VACTIVE_MARGIN_DRR = INT_MIN;
2185 REQUIRED_RESERVED_TIME = (int)in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us;
2186
2187 if (s->pmo_dcn4.cur_pstate_candidate < 0)
2188 return false;
2189
2190 for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
2191 struct dml2_fams2_meta *stream_fams2_meta = &s->pmo_dcn4.stream_fams2_meta[stream_index];
2192
2193 if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
2194 s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
2195 if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) ||
2196 get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us) {
2197 p_state_supported = false;
2198 break;
2199 }
2200 } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank ||
2201 s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
2202 if (get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) <
2203 REQUIRED_RESERVED_TIME ||
2204 get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_VBLANK) {
2205 p_state_supported = false;
2206 break;
2207 }
2208 } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp ||
2209 s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
2210 if (in_out->base_display_config->stage3.stream_svp_meta[stream_index].valid == false) {
2211 p_state_supported = false;
2212 break;
2213 }
2214 } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
2215 if (!all_planes_match_method(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pstate_method_fw_drr) ||
2216 get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_DRR) {
2217 p_state_supported = false;
2218 break;
2219 }
2220 } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_na) {
2221 p_state_supported = false;
2222 break;
2223 }
2224 }
2225
2226 return p_state_supported;
2227 }
2228
pmo_dcn4_fams2_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_support_in_out * in_out)2229 bool pmo_dcn4_fams2_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_support_in_out *in_out)
2230 {
2231 bool success = false;
2232 struct dml2_pmo_scratch *s = &in_out->instance->scratch;
2233
2234 memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta));
2235
2236 if (in_out->last_candidate_failed) {
2237 if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].allow_state_increase &&
2238 s->pmo_dcn4.cur_latency_index < s->pmo_dcn4.max_latency_index - 1) {
2239 s->pmo_dcn4.cur_latency_index++;
2240
2241 success = true;
2242 }
2243 }
2244
2245 if (!success) {
2246 s->pmo_dcn4.cur_latency_index = s->pmo_dcn4.min_latency_index;
2247 s->pmo_dcn4.cur_pstate_candidate++;
2248
2249 if (s->pmo_dcn4.cur_pstate_candidate < s->pmo_dcn4.num_pstate_candidates) {
2250 success = true;
2251 }
2252 }
2253
2254 if (success) {
2255 in_out->optimized_display_config->stage3.min_clk_index_for_latency = s->pmo_dcn4.cur_latency_index;
2256 setup_display_config(in_out->optimized_display_config, in_out->instance, in_out->instance->scratch.pmo_dcn4.cur_pstate_candidate);
2257 }
2258
2259 return success;
2260 }
2261
pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out * in_out)2262 bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in_out)
2263 {
2264 bool success = true;
2265 struct dml2_pmo_instance *pmo = in_out->instance;
2266 bool stutter_period_meets_z8_eco = true;
2267 bool z8_stutter_optimization_too_expensive = false;
2268 bool stutter_optimization_too_expensive = false;
2269 double line_time_us, vblank_nom_time_us;
2270
2271 unsigned int i;
2272
2273 if (pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us > 0 &&
2274 pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0 &&
2275 pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us < pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us)
2276 return false; // Unexpected SoCBB setup
2277
2278 for (i = 0; i < in_out->base_display_config->display_config.num_planes; i++) {
2279 if (in_out->base_display_config->mode_support_result.cfg_support_info.plane_support_info[i].active_latency_hiding_us <
2280 pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us + pmo->soc_bb->power_management_parameters.z8_min_idle_time) {
2281 stutter_period_meets_z8_eco = false;
2282 break;
2283 }
2284 }
2285
2286 for (i = 0; i < in_out->base_display_config->display_config.num_streams; i++) {
2287 line_time_us = (double)in_out->base_display_config->display_config.stream_descriptors[i].timing.h_total / (in_out->base_display_config->display_config.stream_descriptors[i].timing.pixel_clock_khz * 1000) * 1000000;
2288 vblank_nom_time_us = line_time_us * in_out->base_display_config->display_config.stream_descriptors[i].timing.vblank_nom;
2289
2290 if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
2291 z8_stutter_optimization_too_expensive = true;
2292 break;
2293 }
2294
2295 if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
2296 stutter_optimization_too_expensive = true;
2297 break;
2298 }
2299 }
2300
2301 pmo->scratch.pmo_dcn4.num_stutter_candidates = 0;
2302 pmo->scratch.pmo_dcn4.cur_stutter_candidate = 0;
2303
2304 if (stutter_period_meets_z8_eco && !z8_stutter_optimization_too_expensive) {
2305 if (pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us > 0) {
2306 pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.num_stutter_candidates] = (unsigned int)pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us;
2307 pmo->scratch.pmo_dcn4.num_stutter_candidates++;
2308 pmo->scratch.pmo_dcn4.z8_vblank_optimizable = true;
2309 }
2310 } else {
2311 pmo->scratch.pmo_dcn4.z8_vblank_optimizable = false;
2312 }
2313
2314 if (!stutter_optimization_too_expensive && pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) {
2315 pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.num_stutter_candidates] = (unsigned int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us;
2316 pmo->scratch.pmo_dcn4.num_stutter_candidates++;
2317 }
2318
2319 if (pmo->scratch.pmo_dcn4.num_stutter_candidates == 0)
2320 success = false;
2321
2322 return success;
2323 }
2324
pmo_dcn4_fams2_test_for_stutter(struct dml2_pmo_test_for_stutter_in_out * in_out)2325 bool pmo_dcn4_fams2_test_for_stutter(struct dml2_pmo_test_for_stutter_in_out *in_out)
2326 {
2327 bool success = true;
2328 struct dml2_pmo_instance *pmo = in_out->instance;
2329
2330 unsigned int i;
2331
2332 for (i = 0; i < in_out->base_display_config->display_config.num_planes; i++) {
2333 if (pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us > 0 &&
2334 pmo->scratch.pmo_dcn4.z8_vblank_optimizable &&
2335 in_out->base_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns < (int)pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * 1000) {
2336 success = false;
2337 break;
2338 }
2339 if (pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0 &&
2340 in_out->base_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns < (int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * 1000) {
2341 success = false;
2342 break;
2343 }
2344 }
2345
2346 return success;
2347 }
2348
pmo_dcn4_fams2_optimize_for_stutter(struct dml2_pmo_optimize_for_stutter_in_out * in_out)2349 bool pmo_dcn4_fams2_optimize_for_stutter(struct dml2_pmo_optimize_for_stutter_in_out *in_out)
2350 {
2351 bool success = false;
2352 struct dml2_pmo_instance *pmo = in_out->instance;
2353 unsigned int i;
2354
2355 memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta));
2356
2357 if (!in_out->last_candidate_failed) {
2358 if (pmo->scratch.pmo_dcn4.cur_stutter_candidate < pmo->scratch.pmo_dcn4.num_stutter_candidates) {
2359 for (i = 0; i < in_out->optimized_display_config->display_config.num_planes; i++) {
2360 /* take the max of the current and the optimal reserved time */
2361 in_out->optimized_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns =
2362 (long)math_max2(pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.cur_stutter_candidate] * 1000,
2363 in_out->optimized_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns);
2364 }
2365
2366 success = true;
2367 }
2368 }
2369
2370 return success;
2371 }
2372