1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hwseq.h"
33 #include "dcn10/dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10/dcn10_optc.h"
38 #include "dcn10/dcn10_dpp.h"
39 #include "dcn10/dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10/dcn10_hubp.h"
46 #include "dcn10/dcn10_hubbub.h"
47 #include "dcn10/dcn10_cm_common.h"
48 #include "dccg.h"
49 #include "clk_mgr.h"
50 #include "link_hwss.h"
51 #include "dpcd_defs.h"
52 #include "dsc.h"
53 #include "dce/dmub_psr.h"
54 #include "dc_dmub_srv.h"
55 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dc_trace.h"
57 #include "dce/dmub_outbox.h"
58 #include "link.h"
59 #include "dc_state_priv.h"
60
61 #define DC_LOGGER \
62 dc_logger
63 #define DC_LOGGER_INIT(logger) \
64 struct dal_logger *dc_logger = logger
65
66 #define CTX \
67 hws->ctx
68 #define REG(reg)\
69 hws->regs->reg
70
71 #undef FN
72 #define FN(reg_name, field_name) \
73 hws->shifts->field_name, hws->masks->field_name
74
75 /*print is 17 wide, first two characters are spaces*/
76 #define DTN_INFO_MICRO_SEC(ref_cycle) \
77 print_microsec(dc_ctx, log_ctx, ref_cycle)
78
79 #define GAMMA_HW_POINTS_NUM 256
80
81 #define PGFSM_POWER_ON 0
82 #define PGFSM_POWER_OFF 2
83
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)84 static void print_microsec(struct dc_context *dc_ctx,
85 struct dc_log_buffer_ctx *log_ctx,
86 uint32_t ref_cycle)
87 {
88 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
89 static const unsigned int frac = 1000;
90 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
91
92 DTN_INFO(" %11d.%03d",
93 us_x10 / frac,
94 us_x10 % frac);
95 }
96
dcn10_lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)97 void dcn10_lock_all_pipes(struct dc *dc,
98 struct dc_state *context,
99 bool lock)
100 {
101 struct pipe_ctx *pipe_ctx;
102 struct pipe_ctx *old_pipe_ctx;
103 struct timing_generator *tg;
104 int i;
105
106 for (i = 0; i < dc->res_pool->pipe_count; i++) {
107 old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
108 pipe_ctx = &context->res_ctx.pipe_ctx[i];
109 tg = pipe_ctx->stream_res.tg;
110
111 /*
112 * Only lock the top pipe's tg to prevent redundant
113 * (un)locking. Also skip if pipe is disabled.
114 */
115 if (pipe_ctx->top_pipe ||
116 !pipe_ctx->stream ||
117 (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
118 !tg->funcs->is_tg_enabled(tg) ||
119 dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM)
120 continue;
121
122 if (lock)
123 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
124 else
125 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
126 }
127 }
128
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)129 static void log_mpc_crc(struct dc *dc,
130 struct dc_log_buffer_ctx *log_ctx)
131 {
132 struct dc_context *dc_ctx = dc->ctx;
133 struct dce_hwseq *hws = dc->hwseq;
134
135 if (REG(MPC_CRC_RESULT_GB))
136 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
137 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
138 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
139 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
140 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
141 }
142
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)143 static void dcn10_log_hubbub_state(struct dc *dc,
144 struct dc_log_buffer_ctx *log_ctx)
145 {
146 struct dc_context *dc_ctx = dc->ctx;
147 struct dcn_hubbub_wm wm;
148 int i;
149
150 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
151 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
152
153 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
154 " sr_enter sr_exit dram_clk_change\n");
155
156 for (i = 0; i < 4; i++) {
157 struct dcn_hubbub_wm_set *s;
158
159 s = &wm.sets[i];
160 DTN_INFO("WM_Set[%d]:", s->wm_set);
161 DTN_INFO_MICRO_SEC(s->data_urgent);
162 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
163 DTN_INFO_MICRO_SEC(s->sr_enter);
164 DTN_INFO_MICRO_SEC(s->sr_exit);
165 DTN_INFO_MICRO_SEC(s->dram_clk_change);
166 DTN_INFO("\n");
167 }
168
169 DTN_INFO("\n");
170 }
171
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)172 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
173 {
174 struct dc_context *dc_ctx = dc->ctx;
175 struct resource_pool *pool = dc->res_pool;
176 int i;
177
178 DTN_INFO(
179 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
180 for (i = 0; i < pool->pipe_count; i++) {
181 struct hubp *hubp = pool->hubps[i];
182 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
183
184 hubp->funcs->hubp_read_state(hubp);
185
186 if (!s->blank_en) {
187 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
188 hubp->inst,
189 s->pixel_format,
190 s->inuse_addr_hi,
191 s->viewport_width,
192 s->viewport_height,
193 s->rotation_angle,
194 s->h_mirror_en,
195 s->sw_mode,
196 s->dcc_en,
197 s->blank_en,
198 s->clock_en,
199 s->ttu_disable,
200 s->underflow_status);
201 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
202 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
203 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
204 DTN_INFO("\n");
205 }
206 }
207
208 DTN_INFO("\n=========RQ========\n");
209 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
210 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
211 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
212 for (i = 0; i < pool->pipe_count; i++) {
213 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
214 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
215
216 if (!s->blank_en)
217 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
218 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
219 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
220 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
221 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
222 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
223 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
224 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
225 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
226 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
227 }
228
229 DTN_INFO("========DLG========\n");
230 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
231 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
232 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
233 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
234 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
235 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
236 " x_rp_dlay x_rr_sfl rc_td_grp\n");
237
238 for (i = 0; i < pool->pipe_count; i++) {
239 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
240 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
241
242 if (!s->blank_en)
243 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
244 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
245 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %xh\n",
246 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
247 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
248 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
249 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
250 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
251 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
252 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
253 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
254 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
255 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
256 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
257 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
258 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
259 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
260 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
261 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
262 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
263 dlg_regs->xfc_reg_remote_surface_flip_latency, dlg_regs->refcyc_per_tdlut_group);
264 }
265
266 DTN_INFO("========TTU========\n");
267 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
268 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
269 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
270 for (i = 0; i < pool->pipe_count; i++) {
271 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
272 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
273
274 if (!s->blank_en)
275 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
276 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
277 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
278 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
279 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
280 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
281 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
282 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
283 }
284 DTN_INFO("\n");
285 }
286
dcn10_log_color_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)287 static void dcn10_log_color_state(struct dc *dc,
288 struct dc_log_buffer_ctx *log_ctx)
289 {
290 struct dc_context *dc_ctx = dc->ctx;
291 struct resource_pool *pool = dc->res_pool;
292 bool is_gamut_remap_available = false;
293 int i;
294
295 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
296 " GAMUT adjust "
297 "C11 C12 C13 C14 "
298 "C21 C22 C23 C24 "
299 "C31 C32 C33 C34 \n");
300 for (i = 0; i < pool->pipe_count; i++) {
301 struct dpp *dpp = pool->dpps[i];
302 struct dcn_dpp_state s = {0};
303
304 dpp->funcs->dpp_read_state(dpp, &s);
305 if (dpp->funcs->dpp_get_gamut_remap) {
306 dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
307 is_gamut_remap_available = true;
308 }
309
310 if (!s.is_enabled)
311 continue;
312
313 DTN_INFO("[%2d]: %11xh %11s %9s %9s",
314 dpp->inst,
315 s.igam_input_format,
316 (s.igam_lut_mode == 0) ? "BypassFixed" :
317 ((s.igam_lut_mode == 1) ? "BypassFloat" :
318 ((s.igam_lut_mode == 2) ? "RAM" :
319 ((s.igam_lut_mode == 3) ? "RAM" :
320 "Unknown"))),
321 (s.dgam_lut_mode == 0) ? "Bypass" :
322 ((s.dgam_lut_mode == 1) ? "sRGB" :
323 ((s.dgam_lut_mode == 2) ? "Ycc" :
324 ((s.dgam_lut_mode == 3) ? "RAM" :
325 ((s.dgam_lut_mode == 4) ? "RAM" :
326 "Unknown")))),
327 (s.rgam_lut_mode == 0) ? "Bypass" :
328 ((s.rgam_lut_mode == 1) ? "sRGB" :
329 ((s.rgam_lut_mode == 2) ? "Ycc" :
330 ((s.rgam_lut_mode == 3) ? "RAM" :
331 ((s.rgam_lut_mode == 4) ? "RAM" :
332 "Unknown")))));
333 if (is_gamut_remap_available)
334 DTN_INFO(" %12s "
335 "%010lld %010lld %010lld %010lld "
336 "%010lld %010lld %010lld %010lld "
337 "%010lld %010lld %010lld %010lld",
338 (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
339 ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : "SW"),
340 s.gamut_remap.temperature_matrix[0].value,
341 s.gamut_remap.temperature_matrix[1].value,
342 s.gamut_remap.temperature_matrix[2].value,
343 s.gamut_remap.temperature_matrix[3].value,
344 s.gamut_remap.temperature_matrix[4].value,
345 s.gamut_remap.temperature_matrix[5].value,
346 s.gamut_remap.temperature_matrix[6].value,
347 s.gamut_remap.temperature_matrix[7].value,
348 s.gamut_remap.temperature_matrix[8].value,
349 s.gamut_remap.temperature_matrix[9].value,
350 s.gamut_remap.temperature_matrix[10].value,
351 s.gamut_remap.temperature_matrix[11].value);
352
353 DTN_INFO("\n");
354 }
355 DTN_INFO("\n");
356 DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d"
357 " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
358 " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d"
359 " blnd_lut:%d oscs:%d\n\n",
360 dc->caps.color.dpp.input_lut_shared,
361 dc->caps.color.dpp.icsc,
362 dc->caps.color.dpp.dgam_ram,
363 dc->caps.color.dpp.dgam_rom_caps.srgb,
364 dc->caps.color.dpp.dgam_rom_caps.bt2020,
365 dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
366 dc->caps.color.dpp.dgam_rom_caps.pq,
367 dc->caps.color.dpp.dgam_rom_caps.hlg,
368 dc->caps.color.dpp.post_csc,
369 dc->caps.color.dpp.gamma_corr,
370 dc->caps.color.dpp.dgam_rom_for_yuv,
371 dc->caps.color.dpp.hw_3d_lut,
372 dc->caps.color.dpp.ogam_ram,
373 dc->caps.color.dpp.ocsc);
374
375 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
376 for (i = 0; i < pool->mpcc_count; i++) {
377 struct mpcc_state s = {0};
378
379 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
380 if (s.opp_id != 0xf)
381 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
382 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
383 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
384 s.idle);
385 }
386 DTN_INFO("\n");
387 DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
388 dc->caps.color.mpc.gamut_remap,
389 dc->caps.color.mpc.num_3dluts,
390 dc->caps.color.mpc.ogam_ram,
391 dc->caps.color.mpc.ocsc);
392 }
393
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)394 void dcn10_log_hw_state(struct dc *dc,
395 struct dc_log_buffer_ctx *log_ctx)
396 {
397 struct dc_context *dc_ctx = dc->ctx;
398 struct resource_pool *pool = dc->res_pool;
399 int i;
400
401 DTN_INFO_BEGIN();
402
403 dcn10_log_hubbub_state(dc, log_ctx);
404
405 dcn10_log_hubp_states(dc, log_ctx);
406
407 if (dc->hwss.log_color_state)
408 dc->hwss.log_color_state(dc, log_ctx);
409 else
410 dcn10_log_color_state(dc, log_ctx);
411
412 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
413
414 for (i = 0; i < pool->timing_generator_count; i++) {
415 struct timing_generator *tg = pool->timing_generators[i];
416 struct dcn_otg_state s = {0};
417 /* Read shared OTG state registers for all DCNx */
418 if (tg->funcs->read_otg_state)
419 tg->funcs->read_otg_state(tg, &s);
420
421 /*
422 * For DCN2 and greater, a register on the OPP is used to
423 * determine if the CRTC is blanked instead of the OTG. So use
424 * dpg_is_blanked() if exists, otherwise fallback on otg.
425 *
426 * TODO: Implement DCN-specific read_otg_state hooks.
427 */
428 if (pool->opps[i]->funcs->dpg_is_blanked)
429 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
430 else
431 s.blank_enabled = tg->funcs->is_blanked(tg);
432
433 //only print if OTG master is enabled
434 if ((s.otg_enabled & 1) == 0)
435 continue;
436
437 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
438 tg->inst,
439 s.v_blank_start,
440 s.v_blank_end,
441 s.v_sync_a_start,
442 s.v_sync_a_end,
443 s.v_sync_a_pol,
444 s.v_total_max,
445 s.v_total_min,
446 s.v_total_max_sel,
447 s.v_total_min_sel,
448 s.h_blank_start,
449 s.h_blank_end,
450 s.h_sync_a_start,
451 s.h_sync_a_end,
452 s.h_sync_a_pol,
453 s.h_total,
454 s.v_total,
455 s.underflow_occurred_status,
456 s.blank_enabled);
457
458 // Clear underflow for debug purposes
459 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
460 // This function is called only from Windows or Diags test environment, hence it's safe to clear
461 // it from here without affecting the original intent.
462 tg->funcs->clear_optc_underflow(tg);
463 }
464 DTN_INFO("\n");
465
466 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
467 // TODO: Update golden log header to reflect this name change
468 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
469 for (i = 0; i < pool->res_cap->num_dsc; i++) {
470 struct display_stream_compressor *dsc = pool->dscs[i];
471 struct dcn_dsc_state s = {0};
472
473 dsc->funcs->dsc_read_state(dsc, &s);
474 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
475 dsc->inst,
476 s.dsc_clock_en,
477 s.dsc_slice_width,
478 s.dsc_bits_per_pixel);
479 DTN_INFO("\n");
480 }
481 DTN_INFO("\n");
482
483 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
484 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
485 for (i = 0; i < pool->stream_enc_count; i++) {
486 struct stream_encoder *enc = pool->stream_enc[i];
487 struct enc_state s = {0};
488
489 if (enc->funcs->enc_read_state) {
490 enc->funcs->enc_read_state(enc, &s);
491 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
492 enc->id,
493 s.dsc_mode,
494 s.sec_gsp_pps_line_num,
495 s.vbid6_line_reference,
496 s.vbid6_line_num,
497 s.sec_gsp_pps_enable,
498 s.sec_stream_enable);
499 DTN_INFO("\n");
500 }
501 }
502 DTN_INFO("\n");
503
504 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
505 for (i = 0; i < dc->link_count; i++) {
506 struct link_encoder *lenc = dc->links[i]->link_enc;
507
508 struct link_enc_state s = {0};
509
510 if (lenc && lenc->funcs->read_state) {
511 lenc->funcs->read_state(lenc, &s);
512 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
513 i,
514 s.dphy_fec_en,
515 s.dphy_fec_ready_shadow,
516 s.dphy_fec_active_status,
517 s.dp_link_training_complete);
518 DTN_INFO("\n");
519 }
520 }
521 DTN_INFO("\n");
522
523 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
524 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
525 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
526 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
527 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
528 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
529 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
530 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
531 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
532
533 log_mpc_crc(dc, log_ctx);
534
535 {
536 if (pool->hpo_dp_stream_enc_count > 0) {
537 DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
538 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
539 struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
540 struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
541
542 if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
543 hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
544
545 DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
546 hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
547 hpo_dp_se_state.stream_enc_enabled,
548 hpo_dp_se_state.otg_inst,
549 (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
550 ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
551 (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
552 (hpo_dp_se_state.component_depth == 0) ? 6 :
553 ((hpo_dp_se_state.component_depth == 1) ? 8 :
554 (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
555 hpo_dp_se_state.vid_stream_enabled,
556 hpo_dp_se_state.sdp_enabled,
557 hpo_dp_se_state.compressed_format,
558 hpo_dp_se_state.mapped_to_link_enc);
559 }
560 }
561
562 DTN_INFO("\n");
563 }
564
565 /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
566 if (pool->hpo_dp_link_enc_count) {
567 DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
568
569 for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
570 struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
571 struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
572
573 if (hpo_dp_link_enc->funcs->read_state) {
574 hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
575 DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
576 hpo_dp_link_enc->inst,
577 hpo_dp_le_state.link_enc_enabled,
578 (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
579 (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
580 (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
581 hpo_dp_le_state.lane_count,
582 hpo_dp_le_state.stream_src[0],
583 hpo_dp_le_state.slot_count[0],
584 hpo_dp_le_state.vc_rate_x[0],
585 hpo_dp_le_state.vc_rate_y[0]);
586 DTN_INFO("\n");
587 }
588 }
589
590 DTN_INFO("\n");
591 }
592 }
593
594 DTN_INFO_END();
595 }
596
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)597 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
598 {
599 struct hubp *hubp = pipe_ctx->plane_res.hubp;
600 struct timing_generator *tg = pipe_ctx->stream_res.tg;
601
602 if (tg->funcs->is_optc_underflow_occurred(tg)) {
603 tg->funcs->clear_optc_underflow(tg);
604 return true;
605 }
606
607 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
608 hubp->funcs->hubp_clear_underflow(hubp);
609 return true;
610 }
611 return false;
612 }
613
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)614 void dcn10_enable_power_gating_plane(
615 struct dce_hwseq *hws,
616 bool enable)
617 {
618 bool force_on = true; /* disable power gating */
619
620 if (enable)
621 force_on = false;
622
623 /* DCHUBP0/1/2/3 */
624 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
625 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
626 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
627 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
628
629 /* DPP0/1/2/3 */
630 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
631 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
632 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
633 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
634 }
635
dcn10_disable_vga(struct dce_hwseq * hws)636 void dcn10_disable_vga(
637 struct dce_hwseq *hws)
638 {
639 unsigned int in_vga1_mode = 0;
640 unsigned int in_vga2_mode = 0;
641 unsigned int in_vga3_mode = 0;
642 unsigned int in_vga4_mode = 0;
643
644 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
645 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
646 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
647 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
648
649 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
650 in_vga3_mode == 0 && in_vga4_mode == 0)
651 return;
652
653 REG_WRITE(D1VGA_CONTROL, 0);
654 REG_WRITE(D2VGA_CONTROL, 0);
655 REG_WRITE(D3VGA_CONTROL, 0);
656 REG_WRITE(D4VGA_CONTROL, 0);
657
658 /* HW Engineer's Notes:
659 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
660 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
661 *
662 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
663 * VGA_TEST_ENABLE, to leave it in the same state as before.
664 */
665 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
666 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
667 }
668
669 /**
670 * dcn10_dpp_pg_control - DPP power gate control.
671 *
672 * @hws: dce_hwseq reference.
673 * @dpp_inst: DPP instance reference.
674 * @power_on: true if we want to enable power gate, false otherwise.
675 *
676 * Enable or disable power gate in the specific DPP instance.
677 */
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)678 void dcn10_dpp_pg_control(
679 struct dce_hwseq *hws,
680 unsigned int dpp_inst,
681 bool power_on)
682 {
683 uint32_t power_gate = power_on ? 0 : 1;
684 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
685
686 if (hws->ctx->dc->debug.disable_dpp_power_gate)
687 return;
688 if (REG(DOMAIN1_PG_CONFIG) == 0)
689 return;
690
691 switch (dpp_inst) {
692 case 0: /* DPP0 */
693 REG_UPDATE(DOMAIN1_PG_CONFIG,
694 DOMAIN1_POWER_GATE, power_gate);
695
696 REG_WAIT(DOMAIN1_PG_STATUS,
697 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
698 1, 1000);
699 break;
700 case 1: /* DPP1 */
701 REG_UPDATE(DOMAIN3_PG_CONFIG,
702 DOMAIN3_POWER_GATE, power_gate);
703
704 REG_WAIT(DOMAIN3_PG_STATUS,
705 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
706 1, 1000);
707 break;
708 case 2: /* DPP2 */
709 REG_UPDATE(DOMAIN5_PG_CONFIG,
710 DOMAIN5_POWER_GATE, power_gate);
711
712 REG_WAIT(DOMAIN5_PG_STATUS,
713 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
714 1, 1000);
715 break;
716 case 3: /* DPP3 */
717 REG_UPDATE(DOMAIN7_PG_CONFIG,
718 DOMAIN7_POWER_GATE, power_gate);
719
720 REG_WAIT(DOMAIN7_PG_STATUS,
721 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
722 1, 1000);
723 break;
724 default:
725 BREAK_TO_DEBUGGER();
726 break;
727 }
728 }
729
730 /**
731 * dcn10_hubp_pg_control - HUBP power gate control.
732 *
733 * @hws: dce_hwseq reference.
734 * @hubp_inst: DPP instance reference.
735 * @power_on: true if we want to enable power gate, false otherwise.
736 *
737 * Enable or disable power gate in the specific HUBP instance.
738 */
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)739 void dcn10_hubp_pg_control(
740 struct dce_hwseq *hws,
741 unsigned int hubp_inst,
742 bool power_on)
743 {
744 uint32_t power_gate = power_on ? 0 : 1;
745 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
746
747 if (hws->ctx->dc->debug.disable_hubp_power_gate)
748 return;
749 if (REG(DOMAIN0_PG_CONFIG) == 0)
750 return;
751
752 switch (hubp_inst) {
753 case 0: /* DCHUBP0 */
754 REG_UPDATE(DOMAIN0_PG_CONFIG,
755 DOMAIN0_POWER_GATE, power_gate);
756
757 REG_WAIT(DOMAIN0_PG_STATUS,
758 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
759 1, 1000);
760 break;
761 case 1: /* DCHUBP1 */
762 REG_UPDATE(DOMAIN2_PG_CONFIG,
763 DOMAIN2_POWER_GATE, power_gate);
764
765 REG_WAIT(DOMAIN2_PG_STATUS,
766 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
767 1, 1000);
768 break;
769 case 2: /* DCHUBP2 */
770 REG_UPDATE(DOMAIN4_PG_CONFIG,
771 DOMAIN4_POWER_GATE, power_gate);
772
773 REG_WAIT(DOMAIN4_PG_STATUS,
774 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
775 1, 1000);
776 break;
777 case 3: /* DCHUBP3 */
778 REG_UPDATE(DOMAIN6_PG_CONFIG,
779 DOMAIN6_POWER_GATE, power_gate);
780
781 REG_WAIT(DOMAIN6_PG_STATUS,
782 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
783 1, 1000);
784 break;
785 default:
786 BREAK_TO_DEBUGGER();
787 break;
788 }
789 }
790
power_on_plane_resources(struct dce_hwseq * hws,int plane_id)791 static void power_on_plane_resources(
792 struct dce_hwseq *hws,
793 int plane_id)
794 {
795 DC_LOGGER_INIT(hws->ctx->logger);
796
797 if (hws->funcs.dpp_root_clock_control)
798 hws->funcs.dpp_root_clock_control(hws, plane_id, true);
799
800 if (REG(DC_IP_REQUEST_CNTL)) {
801 REG_SET(DC_IP_REQUEST_CNTL, 0,
802 IP_REQUEST_EN, 1);
803
804 if (hws->funcs.dpp_pg_control)
805 hws->funcs.dpp_pg_control(hws, plane_id, true);
806
807 if (hws->funcs.hubp_pg_control)
808 hws->funcs.hubp_pg_control(hws, plane_id, true);
809
810 REG_SET(DC_IP_REQUEST_CNTL, 0,
811 IP_REQUEST_EN, 0);
812 DC_LOG_DEBUG(
813 "Un-gated front end for pipe %d\n", plane_id);
814 }
815 }
816
undo_DEGVIDCN10_253_wa(struct dc * dc)817 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
818 {
819 struct dce_hwseq *hws = dc->hwseq;
820 struct hubp *hubp = dc->res_pool->hubps[0];
821
822 if (!hws->wa_state.DEGVIDCN10_253_applied)
823 return;
824
825 hubp->funcs->set_blank(hubp, true);
826
827 REG_SET(DC_IP_REQUEST_CNTL, 0,
828 IP_REQUEST_EN, 1);
829
830 hws->funcs.hubp_pg_control(hws, 0, false);
831 REG_SET(DC_IP_REQUEST_CNTL, 0,
832 IP_REQUEST_EN, 0);
833
834 hws->wa_state.DEGVIDCN10_253_applied = false;
835 }
836
apply_DEGVIDCN10_253_wa(struct dc * dc)837 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
838 {
839 struct dce_hwseq *hws = dc->hwseq;
840 struct hubp *hubp = dc->res_pool->hubps[0];
841 int i;
842
843 if (dc->debug.disable_stutter)
844 return;
845
846 if (!hws->wa.DEGVIDCN10_253)
847 return;
848
849 for (i = 0; i < dc->res_pool->pipe_count; i++) {
850 if (!dc->res_pool->hubps[i]->power_gated)
851 return;
852 }
853
854 /* all pipe power gated, apply work around to enable stutter. */
855
856 REG_SET(DC_IP_REQUEST_CNTL, 0,
857 IP_REQUEST_EN, 1);
858
859 hws->funcs.hubp_pg_control(hws, 0, true);
860 REG_SET(DC_IP_REQUEST_CNTL, 0,
861 IP_REQUEST_EN, 0);
862
863 hubp->funcs->set_hubp_blank_en(hubp, false);
864 hws->wa_state.DEGVIDCN10_253_applied = true;
865 }
866
dcn10_bios_golden_init(struct dc * dc)867 void dcn10_bios_golden_init(struct dc *dc)
868 {
869 struct dce_hwseq *hws = dc->hwseq;
870 struct dc_bios *bp = dc->ctx->dc_bios;
871 int i;
872 bool allow_self_fresh_force_enable = true;
873
874 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
875 return;
876
877 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
878 allow_self_fresh_force_enable =
879 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
880
881
882 /* WA for making DF sleep when idle after resume from S0i3.
883 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
884 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
885 * before calling command table and it changed to 1 after,
886 * it should be set back to 0.
887 */
888
889 /* initialize dcn global */
890 bp->funcs->enable_disp_power_gating(bp,
891 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
892
893 for (i = 0; i < dc->res_pool->pipe_count; i++) {
894 /* initialize dcn per pipe */
895 bp->funcs->enable_disp_power_gating(bp,
896 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
897 }
898
899 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
900 if (allow_self_fresh_force_enable == false &&
901 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
902 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
903 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
904
905 }
906
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)907 static void false_optc_underflow_wa(
908 struct dc *dc,
909 const struct dc_stream_state *stream,
910 struct timing_generator *tg)
911 {
912 int i;
913 bool underflow;
914
915 if (!dc->hwseq->wa.false_optc_underflow)
916 return;
917
918 underflow = tg->funcs->is_optc_underflow_occurred(tg);
919
920 for (i = 0; i < dc->res_pool->pipe_count; i++) {
921 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
922
923 if (old_pipe_ctx->stream != stream)
924 continue;
925
926 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
927 }
928
929 if (tg->funcs->set_blank_data_double_buffer)
930 tg->funcs->set_blank_data_double_buffer(tg, true);
931
932 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
933 tg->funcs->clear_optc_underflow(tg);
934 }
935
calculate_vready_offset_for_group(struct pipe_ctx * pipe)936 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
937 {
938 struct pipe_ctx *other_pipe;
939 int vready_offset = pipe->pipe_dlg_param.vready_offset;
940
941 /* Always use the largest vready_offset of all connected pipes */
942 for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
943 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
944 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
945 }
946 for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
947 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
948 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
949 }
950 for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
951 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
952 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
953 }
954 for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
955 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
956 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
957 }
958
959 return vready_offset;
960 }
961
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)962 enum dc_status dcn10_enable_stream_timing(
963 struct pipe_ctx *pipe_ctx,
964 struct dc_state *context,
965 struct dc *dc)
966 {
967 struct dc_stream_state *stream = pipe_ctx->stream;
968 enum dc_color_space color_space;
969 struct tg_color black_color = {0};
970
971 /* by upper caller loop, pipe0 is parent pipe and be called first.
972 * back end is set up by for pipe0. Other children pipe share back end
973 * with pipe 0. No program is needed.
974 */
975 if (pipe_ctx->top_pipe != NULL)
976 return DC_OK;
977
978 /* TODO check if timing_changed, disable stream if timing changed */
979
980 /* HW program guide assume display already disable
981 * by unplug sequence. OTG assume stop.
982 */
983 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
984
985 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
986 pipe_ctx->clock_source,
987 &pipe_ctx->stream_res.pix_clk_params,
988 dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
989 &pipe_ctx->pll_settings)) {
990 BREAK_TO_DEBUGGER();
991 return DC_ERROR_UNEXPECTED;
992 }
993
994 if (dc_is_hdmi_tmds_signal(stream->signal)) {
995 stream->link->phy_state.symclk_ref_cnts.otg = 1;
996 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
997 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
998 else
999 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
1000 }
1001
1002 pipe_ctx->stream_res.tg->funcs->program_timing(
1003 pipe_ctx->stream_res.tg,
1004 &stream->timing,
1005 calculate_vready_offset_for_group(pipe_ctx),
1006 pipe_ctx->pipe_dlg_param.vstartup_start,
1007 pipe_ctx->pipe_dlg_param.vupdate_offset,
1008 pipe_ctx->pipe_dlg_param.vupdate_width,
1009 pipe_ctx->pipe_dlg_param.pstate_keepout,
1010 pipe_ctx->stream->signal,
1011 true);
1012
1013 #if 0 /* move to after enable_crtc */
1014 /* TODO: OPP FMT, ABM. etc. should be done here. */
1015 /* or FPGA now. instance 0 only. TODO: move to opp.c */
1016
1017 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
1018
1019 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
1020 pipe_ctx->stream_res.opp,
1021 &stream->bit_depth_params,
1022 &stream->clamping);
1023 #endif
1024 /* program otg blank color */
1025 color_space = stream->output_color_space;
1026 color_space_to_black_color(dc, color_space, &black_color);
1027
1028 /*
1029 * The way 420 is packed, 2 channels carry Y component, 1 channel
1030 * alternate between Cb and Cr, so both channels need the pixel
1031 * value for Y
1032 */
1033 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
1034 black_color.color_r_cr = black_color.color_g_y;
1035
1036 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
1037 pipe_ctx->stream_res.tg->funcs->set_blank_color(
1038 pipe_ctx->stream_res.tg,
1039 &black_color);
1040
1041 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
1042 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
1043 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
1044 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
1045 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
1046 }
1047
1048 /* VTG is within DCHUB command block. DCFCLK is always on */
1049 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
1050 BREAK_TO_DEBUGGER();
1051 return DC_ERROR_UNEXPECTED;
1052 }
1053
1054 /* TODO program crtc source select for non-virtual signal*/
1055 /* TODO program FMT */
1056 /* TODO setup link_enc */
1057 /* TODO set stream attributes */
1058 /* TODO program audio */
1059 /* TODO enable stream if timing changed */
1060 /* TODO unblank stream if DP */
1061
1062 return DC_OK;
1063 }
1064
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1065 static void dcn10_reset_back_end_for_pipe(
1066 struct dc *dc,
1067 struct pipe_ctx *pipe_ctx,
1068 struct dc_state *context)
1069 {
1070 int i;
1071 struct dc_link *link;
1072 DC_LOGGER_INIT(dc->ctx->logger);
1073 if (pipe_ctx->stream_res.stream_enc == NULL) {
1074 pipe_ctx->stream = NULL;
1075 return;
1076 }
1077
1078 link = pipe_ctx->stream->link;
1079 /* DPMS may already disable or */
1080 /* dpms_off status is incorrect due to fastboot
1081 * feature. When system resume from S4 with second
1082 * screen only, the dpms_off would be true but
1083 * VBIOS lit up eDP, so check link status too.
1084 */
1085 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1086 dc->link_srv->set_dpms_off(pipe_ctx);
1087 else if (pipe_ctx->stream_res.audio)
1088 dc->hwss.disable_audio_stream(pipe_ctx);
1089
1090 if (pipe_ctx->stream_res.audio) {
1091 /*disable az_endpoint*/
1092 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1093
1094 /*free audio*/
1095 if (dc->caps.dynamic_audio == true) {
1096 /*we have to dynamic arbitrate the audio endpoints*/
1097 /*we free the resource, need reset is_audio_acquired*/
1098 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1099 pipe_ctx->stream_res.audio, false);
1100 pipe_ctx->stream_res.audio = NULL;
1101 }
1102 }
1103
1104 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1105 * back end share by all pipes and will be disable only when disable
1106 * parent pipe.
1107 */
1108 if (pipe_ctx->top_pipe == NULL) {
1109
1110 if (pipe_ctx->stream_res.abm)
1111 dc->hwss.set_abm_immediate_disable(pipe_ctx);
1112
1113 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1114
1115 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1116 set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
1117 if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
1118 pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1119 }
1120
1121 for (i = 0; i < dc->res_pool->pipe_count; i++)
1122 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1123 break;
1124
1125 if (i == dc->res_pool->pipe_count)
1126 return;
1127
1128 pipe_ctx->stream = NULL;
1129 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1130 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1131 }
1132
dcn10_hw_wa_force_recovery(struct dc * dc)1133 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1134 {
1135 struct hubp *hubp ;
1136 unsigned int i;
1137
1138 if (!dc->debug.recovery_enabled)
1139 return false;
1140 /*
1141 DCHUBP_CNTL:HUBP_BLANK_EN=1
1142 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1143 DCHUBP_CNTL:HUBP_DISABLE=1
1144 DCHUBP_CNTL:HUBP_DISABLE=0
1145 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1146 DCSURF_PRIMARY_SURFACE_ADDRESS
1147 DCHUBP_CNTL:HUBP_BLANK_EN=0
1148 */
1149
1150 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1151 struct pipe_ctx *pipe_ctx =
1152 &dc->current_state->res_ctx.pipe_ctx[i];
1153 if (pipe_ctx != NULL) {
1154 hubp = pipe_ctx->plane_res.hubp;
1155 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1156 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1157 hubp->funcs->set_hubp_blank_en(hubp, true);
1158 }
1159 }
1160 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1161 hubbub1_soft_reset(dc->res_pool->hubbub, true);
1162
1163 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1164 struct pipe_ctx *pipe_ctx =
1165 &dc->current_state->res_ctx.pipe_ctx[i];
1166 if (pipe_ctx != NULL) {
1167 hubp = pipe_ctx->plane_res.hubp;
1168 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1169 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1170 hubp->funcs->hubp_disable_control(hubp, true);
1171 }
1172 }
1173 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1174 struct pipe_ctx *pipe_ctx =
1175 &dc->current_state->res_ctx.pipe_ctx[i];
1176 if (pipe_ctx != NULL) {
1177 hubp = pipe_ctx->plane_res.hubp;
1178 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1179 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1180 hubp->funcs->hubp_disable_control(hubp, true);
1181 }
1182 }
1183 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1184 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1185 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1186 struct pipe_ctx *pipe_ctx =
1187 &dc->current_state->res_ctx.pipe_ctx[i];
1188 if (pipe_ctx != NULL) {
1189 hubp = pipe_ctx->plane_res.hubp;
1190 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1191 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1192 hubp->funcs->set_hubp_blank_en(hubp, true);
1193 }
1194 }
1195 return true;
1196
1197 }
1198
dcn10_verify_allow_pstate_change_high(struct dc * dc)1199 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1200 {
1201 struct hubbub *hubbub = dc->res_pool->hubbub;
1202 static bool should_log_hw_state; /* prevent hw state log by default */
1203
1204 if (!hubbub->funcs->verify_allow_pstate_change_high)
1205 return;
1206
1207 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1208 int i = 0;
1209
1210 if (should_log_hw_state)
1211 dcn10_log_hw_state(dc, NULL);
1212
1213 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1214 BREAK_TO_DEBUGGER();
1215 if (dcn10_hw_wa_force_recovery(dc)) {
1216 /*check again*/
1217 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1218 BREAK_TO_DEBUGGER();
1219 }
1220 }
1221 }
1222
1223 /* trigger HW to start disconnect plane from stream on the next vsync */
dcn10_plane_atomic_disconnect(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)1224 void dcn10_plane_atomic_disconnect(struct dc *dc,
1225 struct dc_state *state,
1226 struct pipe_ctx *pipe_ctx)
1227 {
1228 struct dce_hwseq *hws = dc->hwseq;
1229 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1230 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1231 struct mpc *mpc = dc->res_pool->mpc;
1232 struct mpc_tree *mpc_tree_params;
1233 struct mpcc *mpcc_to_remove = NULL;
1234 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1235
1236 mpc_tree_params = &(opp->mpc_tree_params);
1237 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1238
1239 /*Already reset*/
1240 if (mpcc_to_remove == NULL)
1241 return;
1242
1243 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1244 // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1245 // so don't wait for MPCC_IDLE in the programming sequence
1246 if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM)
1247 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1248
1249 dc->optimized_required = true;
1250
1251 if (hubp->funcs->hubp_disconnect)
1252 hubp->funcs->hubp_disconnect(hubp);
1253
1254 if (dc->debug.sanity_checks)
1255 hws->funcs.verify_allow_pstate_change_high(dc);
1256 }
1257
1258 /**
1259 * dcn10_plane_atomic_power_down - Power down plane components.
1260 *
1261 * @dc: dc struct reference. used for grab hwseq.
1262 * @dpp: dpp struct reference.
1263 * @hubp: hubp struct reference.
1264 *
1265 * Keep in mind that this operation requires a power gate configuration;
1266 * however, requests for switch power gate are precisely controlled to avoid
1267 * problems. For this reason, power gate request is usually disabled. This
1268 * function first needs to enable the power gate request before disabling DPP
1269 * and HUBP. Finally, it disables the power gate request again.
1270 */
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1271 void dcn10_plane_atomic_power_down(struct dc *dc,
1272 struct dpp *dpp,
1273 struct hubp *hubp)
1274 {
1275 struct dce_hwseq *hws = dc->hwseq;
1276 DC_LOGGER_INIT(dc->ctx->logger);
1277
1278 if (REG(DC_IP_REQUEST_CNTL)) {
1279 REG_SET(DC_IP_REQUEST_CNTL, 0,
1280 IP_REQUEST_EN, 1);
1281
1282 if (hws->funcs.dpp_pg_control)
1283 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1284
1285 if (hws->funcs.hubp_pg_control)
1286 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1287
1288 hubp->funcs->hubp_reset(hubp);
1289 dpp->funcs->dpp_reset(dpp);
1290
1291 REG_SET(DC_IP_REQUEST_CNTL, 0,
1292 IP_REQUEST_EN, 0);
1293 DC_LOG_DEBUG(
1294 "Power gated front end %d\n", hubp->inst);
1295 }
1296
1297 if (hws->funcs.dpp_root_clock_control)
1298 hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
1299 }
1300
1301 /* disable HW used by plane.
1302 * note: cannot disable until disconnect is complete
1303 */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1304 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1305 {
1306 struct dce_hwseq *hws = dc->hwseq;
1307 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1308 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1309 int opp_id = hubp->opp_id;
1310
1311 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1312
1313 hubp->funcs->hubp_clk_cntl(hubp, false);
1314
1315 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1316
1317 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1318 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1319 pipe_ctx->stream_res.opp,
1320 false);
1321
1322 hubp->power_gated = true;
1323 dc->optimized_required = false; /* We're powering off, no need to optimize */
1324
1325 hws->funcs.plane_atomic_power_down(dc,
1326 pipe_ctx->plane_res.dpp,
1327 pipe_ctx->plane_res.hubp);
1328
1329 pipe_ctx->stream = NULL;
1330 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1331 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1332 pipe_ctx->top_pipe = NULL;
1333 pipe_ctx->bottom_pipe = NULL;
1334 pipe_ctx->plane_state = NULL;
1335 }
1336
dcn10_disable_plane(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)1337 void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
1338 {
1339 struct dce_hwseq *hws = dc->hwseq;
1340 DC_LOGGER_INIT(dc->ctx->logger);
1341
1342 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1343 return;
1344
1345 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1346
1347 apply_DEGVIDCN10_253_wa(dc);
1348
1349 DC_LOG_DC("Power down front end %d\n",
1350 pipe_ctx->pipe_idx);
1351 }
1352
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1353 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1354 {
1355 int i;
1356 struct dce_hwseq *hws = dc->hwseq;
1357 struct hubbub *hubbub = dc->res_pool->hubbub;
1358 bool can_apply_seamless_boot = false;
1359 bool tg_enabled[MAX_PIPES] = {false};
1360
1361 for (i = 0; i < context->stream_count; i++) {
1362 if (context->streams[i]->apply_seamless_boot_optimization) {
1363 can_apply_seamless_boot = true;
1364 break;
1365 }
1366 }
1367
1368 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1369 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1370 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1371
1372 /* There is assumption that pipe_ctx is not mapping irregularly
1373 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1374 * we will use the pipe, so don't disable
1375 */
1376 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1377 continue;
1378
1379 /* Blank controller using driver code instead of
1380 * command table.
1381 */
1382 if (tg->funcs->is_tg_enabled(tg)) {
1383 if (hws->funcs.init_blank != NULL) {
1384 hws->funcs.init_blank(dc, tg);
1385 tg->funcs->lock(tg);
1386 } else {
1387 tg->funcs->lock(tg);
1388 tg->funcs->set_blank(tg, true);
1389 hwss_wait_for_blank_complete(tg);
1390 }
1391 }
1392 }
1393
1394 /* Reset det size */
1395 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1396 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1397 struct hubp *hubp = dc->res_pool->hubps[i];
1398
1399 /* Do not need to reset for seamless boot */
1400 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1401 continue;
1402
1403 if (hubbub && hubp) {
1404 if (hubbub->funcs->program_det_size)
1405 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1406 if (hubbub->funcs->program_det_segments)
1407 hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
1408 }
1409 }
1410
1411 /* num_opp will be equal to number of mpcc */
1412 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1413 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1414
1415 /* Cannot reset the MPC mux if seamless boot */
1416 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1417 continue;
1418
1419 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1420 dc->res_pool->mpc, i);
1421 }
1422
1423 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1424 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1425 struct hubp *hubp = dc->res_pool->hubps[i];
1426 struct dpp *dpp = dc->res_pool->dpps[i];
1427 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1428
1429 /* There is assumption that pipe_ctx is not mapping irregularly
1430 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1431 * we will use the pipe, so don't disable
1432 */
1433 if (can_apply_seamless_boot &&
1434 pipe_ctx->stream != NULL &&
1435 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1436 pipe_ctx->stream_res.tg)) {
1437 // Enable double buffering for OTG_BLANK no matter if
1438 // seamless boot is enabled or not to suppress global sync
1439 // signals when OTG blanked. This is to prevent pipe from
1440 // requesting data while in PSR.
1441 tg->funcs->tg_init(tg);
1442 hubp->power_gated = true;
1443 tg_enabled[i] = true;
1444 continue;
1445 }
1446
1447 /* Disable on the current state so the new one isn't cleared. */
1448 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1449
1450 hubp->funcs->hubp_reset(hubp);
1451 dpp->funcs->dpp_reset(dpp);
1452
1453 pipe_ctx->stream_res.tg = tg;
1454 pipe_ctx->pipe_idx = i;
1455
1456 pipe_ctx->plane_res.hubp = hubp;
1457 pipe_ctx->plane_res.dpp = dpp;
1458 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1459 hubp->mpcc_id = dpp->inst;
1460 hubp->opp_id = OPP_ID_INVALID;
1461 hubp->power_gated = false;
1462
1463 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1464 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1465 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1466 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1467
1468 hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
1469
1470 if (tg->funcs->is_tg_enabled(tg))
1471 tg->funcs->unlock(tg);
1472
1473 dc->hwss.disable_plane(dc, context, pipe_ctx);
1474
1475 pipe_ctx->stream_res.tg = NULL;
1476 pipe_ctx->plane_res.hubp = NULL;
1477
1478 if (tg->funcs->is_tg_enabled(tg)) {
1479 if (tg->funcs->init_odm)
1480 tg->funcs->init_odm(tg);
1481 }
1482
1483 tg->funcs->tg_init(tg);
1484 }
1485
1486 /* Clean up MPC tree */
1487 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1488 if (tg_enabled[i]) {
1489 if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
1490 if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
1491 int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
1492
1493 if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
1494 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1495 }
1496 }
1497 }
1498 }
1499
1500 /* Power gate DSCs */
1501 if (hws->funcs.dsc_pg_control != NULL) {
1502 uint32_t num_opps = 0;
1503 uint32_t opp_id_src0 = OPP_ID_INVALID;
1504 uint32_t opp_id_src1 = OPP_ID_INVALID;
1505
1506 // Step 1: To find out which OPTC is running & OPTC DSC is ON
1507 // We can't use res_pool->res_cap->num_timing_generator to check
1508 // Because it records display pipes default setting built in driver,
1509 // not display pipes of the current chip.
1510 // Some ASICs would be fused display pipes less than the default setting.
1511 // In dcnxx_resource_construct function, driver would obatin real information.
1512 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1513 uint32_t optc_dsc_state = 0;
1514 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1515
1516 if (tg->funcs->is_tg_enabled(tg)) {
1517 if (tg->funcs->get_dsc_status)
1518 tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1519 // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1520 // non-zero value is DSC enabled
1521 if (optc_dsc_state != 0) {
1522 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1523 break;
1524 }
1525 }
1526 }
1527
1528 // Step 2: To power down DSC but skip DSC of running OPTC
1529 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1530 struct dcn_dsc_state s = {0};
1531
1532 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1533
1534 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1535 s.dsc_clock_en && s.dsc_fw_en)
1536 continue;
1537
1538 hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1539 }
1540 }
1541 }
1542
dcn10_init_hw(struct dc * dc)1543 void dcn10_init_hw(struct dc *dc)
1544 {
1545 int i;
1546 struct abm *abm = dc->res_pool->abm;
1547 struct dmcu *dmcu = dc->res_pool->dmcu;
1548 struct dce_hwseq *hws = dc->hwseq;
1549 struct dc_bios *dcb = dc->ctx->dc_bios;
1550 struct resource_pool *res_pool = dc->res_pool;
1551 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1552 uint32_t user_level = MAX_BACKLIGHT_LEVEL;
1553 bool is_optimized_init_done = false;
1554
1555 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1556 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1557
1558 /* Align bw context with hw config when system resume. */
1559 if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1560 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1561 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1562 }
1563
1564 // Initialize the dccg
1565 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1566 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1567
1568 if (!dcb->funcs->is_accelerated_mode(dcb))
1569 hws->funcs.disable_vga(dc->hwseq);
1570
1571 if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
1572 hws->funcs.bios_golden_init(dc);
1573
1574
1575 if (dc->ctx->dc_bios->fw_info_valid) {
1576 res_pool->ref_clocks.xtalin_clock_inKhz =
1577 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1578
1579 if (res_pool->dccg && res_pool->hubbub) {
1580
1581 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1582 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1583 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1584
1585 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1586 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1587 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1588 } else {
1589 // Not all ASICs have DCCG sw component
1590 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1591 res_pool->ref_clocks.xtalin_clock_inKhz;
1592 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1593 res_pool->ref_clocks.xtalin_clock_inKhz;
1594 }
1595 } else
1596 ASSERT_CRITICAL(false);
1597
1598 for (i = 0; i < dc->link_count; i++) {
1599 /* Power up AND update implementation according to the
1600 * required signal (which may be different from the
1601 * default signal on connector).
1602 */
1603 struct dc_link *link = dc->links[i];
1604
1605 if (!is_optimized_init_done)
1606 link->link_enc->funcs->hw_init(link->link_enc);
1607
1608 /* Check for enabled DIG to identify enabled display */
1609 if (link->link_enc->funcs->is_dig_enabled &&
1610 link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1611 link->link_status.link_active = true;
1612 if (link->link_enc->funcs->fec_is_active &&
1613 link->link_enc->funcs->fec_is_active(link->link_enc))
1614 link->fec_state = dc_link_fec_enabled;
1615 }
1616 }
1617
1618 /* we want to turn off all dp displays before doing detection */
1619 dc->link_srv->blank_all_dp_displays(dc);
1620
1621 if (hws->funcs.enable_power_gating_plane)
1622 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1623
1624 /* If taking control over from VBIOS, we may want to optimize our first
1625 * mode set, so we need to skip powering down pipes until we know which
1626 * pipes we want to use.
1627 * Otherwise, if taking control is not possible, we need to power
1628 * everything down.
1629 */
1630 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1631 if (!is_optimized_init_done) {
1632 hws->funcs.init_pipes(dc, dc->current_state);
1633 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1634 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1635 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1636 }
1637 }
1638
1639 if (!is_optimized_init_done) {
1640
1641 for (i = 0; i < res_pool->audio_count; i++) {
1642 struct audio *audio = res_pool->audios[i];
1643
1644 audio->funcs->hw_init(audio);
1645 }
1646
1647 for (i = 0; i < dc->link_count; i++) {
1648 struct dc_link *link = dc->links[i];
1649
1650 if (link->panel_cntl) {
1651 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1652 user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
1653 }
1654 }
1655
1656 if (abm != NULL)
1657 abm->funcs->abm_init(abm, backlight, user_level);
1658
1659 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1660 dmcu->funcs->dmcu_init(dmcu);
1661 }
1662
1663 if (abm != NULL && dmcu != NULL)
1664 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1665
1666 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1667 if (!is_optimized_init_done)
1668 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1669
1670 if (!dc->debug.disable_clock_gate) {
1671 /* enable all DCN clock gating */
1672 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1673
1674 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1675
1676 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1677 }
1678
1679 if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
1680 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1681 }
1682
1683 /* In headless boot cases, DIG may be turned
1684 * on which causes HW/SW discrepancies.
1685 * To avoid this, power down hardware on boot
1686 * if DIG is turned on
1687 */
dcn10_power_down_on_boot(struct dc * dc)1688 void dcn10_power_down_on_boot(struct dc *dc)
1689 {
1690 struct dc_link *edp_links[MAX_NUM_EDP];
1691 struct dc_link *edp_link = NULL;
1692 int edp_num;
1693 int i = 0;
1694
1695 dc_get_edp_links(dc, edp_links, &edp_num);
1696 if (edp_num)
1697 edp_link = edp_links[0];
1698
1699 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1700 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1701 dc->hwseq->funcs.edp_backlight_control &&
1702 dc->hwseq->funcs.power_down &&
1703 dc->hwss.edp_power_control) {
1704 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1705 dc->hwseq->funcs.power_down(dc);
1706 dc->hwss.edp_power_control(edp_link, false);
1707 } else {
1708 for (i = 0; i < dc->link_count; i++) {
1709 struct dc_link *link = dc->links[i];
1710
1711 if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1712 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1713 dc->hwseq->funcs.power_down) {
1714 dc->hwseq->funcs.power_down(dc);
1715 break;
1716 }
1717
1718 }
1719 }
1720
1721 /*
1722 * Call update_clocks with empty context
1723 * to send DISPLAY_OFF
1724 * Otherwise DISPLAY_OFF may not be asserted
1725 */
1726 if (dc->clk_mgr->funcs->set_low_power_state)
1727 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1728 }
1729
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1730 void dcn10_reset_hw_ctx_wrap(
1731 struct dc *dc,
1732 struct dc_state *context)
1733 {
1734 int i;
1735 struct dce_hwseq *hws = dc->hwseq;
1736
1737 /* Reset Back End*/
1738 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1739 struct pipe_ctx *pipe_ctx_old =
1740 &dc->current_state->res_ctx.pipe_ctx[i];
1741 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1742
1743 if (!pipe_ctx_old->stream)
1744 continue;
1745
1746 if (pipe_ctx_old->top_pipe)
1747 continue;
1748
1749 if (!pipe_ctx->stream ||
1750 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1751 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1752
1753 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1754 if (hws->funcs.enable_stream_gating)
1755 hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1756 if (old_clk)
1757 old_clk->funcs->cs_power_down(old_clk);
1758 }
1759 }
1760 }
1761
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1762 static bool patch_address_for_sbs_tb_stereo(
1763 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1764 {
1765 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1766 bool sec_split = pipe_ctx->top_pipe &&
1767 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1768 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1769 (pipe_ctx->stream->timing.timing_3d_format ==
1770 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1771 pipe_ctx->stream->timing.timing_3d_format ==
1772 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1773 *addr = plane_state->address.grph_stereo.left_addr;
1774 plane_state->address.grph_stereo.left_addr =
1775 plane_state->address.grph_stereo.right_addr;
1776 return true;
1777 } else {
1778 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1779 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1780 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1781 plane_state->address.grph_stereo.right_addr =
1782 plane_state->address.grph_stereo.left_addr;
1783 plane_state->address.grph_stereo.right_meta_addr =
1784 plane_state->address.grph_stereo.left_meta_addr;
1785 }
1786 }
1787 return false;
1788 }
1789
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)1790 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1791 {
1792 bool addr_patched = false;
1793 PHYSICAL_ADDRESS_LOC addr;
1794 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1795
1796 if (plane_state == NULL)
1797 return;
1798
1799 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1800
1801 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1802 pipe_ctx->plane_res.hubp,
1803 &plane_state->address,
1804 plane_state->flip_immediate);
1805
1806 plane_state->status.requested_address = plane_state->address;
1807
1808 if (plane_state->flip_immediate)
1809 plane_state->status.current_address = plane_state->address;
1810
1811 if (addr_patched)
1812 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1813 }
1814
dcn10_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)1815 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1816 const struct dc_plane_state *plane_state)
1817 {
1818 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1819 const struct dc_transfer_func *tf = NULL;
1820 bool result = true;
1821
1822 if (dpp_base == NULL)
1823 return false;
1824
1825 tf = &plane_state->in_transfer_func;
1826
1827 if (!dpp_base->ctx->dc->debug.always_use_regamma
1828 && !plane_state->gamma_correction.is_identity
1829 && dce_use_lut(plane_state->format))
1830 dpp_base->funcs->dpp_program_input_lut(dpp_base, &plane_state->gamma_correction);
1831
1832 if (tf->type == TF_TYPE_PREDEFINED) {
1833 switch (tf->tf) {
1834 case TRANSFER_FUNCTION_SRGB:
1835 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1836 break;
1837 case TRANSFER_FUNCTION_BT709:
1838 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1839 break;
1840 case TRANSFER_FUNCTION_LINEAR:
1841 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1842 break;
1843 case TRANSFER_FUNCTION_PQ:
1844 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1845 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1846 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1847 result = true;
1848 break;
1849 default:
1850 result = false;
1851 break;
1852 }
1853 } else if (tf->type == TF_TYPE_BYPASS) {
1854 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1855 } else {
1856 cm_helper_translate_curve_to_degamma_hw_format(tf,
1857 &dpp_base->degamma_params);
1858 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1859 &dpp_base->degamma_params);
1860 result = true;
1861 }
1862
1863 return result;
1864 }
1865
1866 #define MAX_NUM_HW_POINTS 0x200
1867
log_tf(struct dc_context * ctx,const struct dc_transfer_func * tf,uint32_t hw_points_num)1868 static void log_tf(struct dc_context *ctx,
1869 const struct dc_transfer_func *tf, uint32_t hw_points_num)
1870 {
1871 // DC_LOG_GAMMA is default logging of all hw points
1872 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1873 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1874 int i = 0;
1875
1876 DC_LOG_GAMMA("Gamma Correction TF");
1877 DC_LOG_ALL_GAMMA("Logging all tf points...");
1878 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1879
1880 for (i = 0; i < hw_points_num; i++) {
1881 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1882 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1883 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1884 }
1885
1886 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1887 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1888 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1889 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1890 }
1891 }
1892
dcn10_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)1893 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1894 const struct dc_stream_state *stream)
1895 {
1896 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1897
1898 if (!stream)
1899 return false;
1900
1901 if (dpp == NULL)
1902 return false;
1903
1904 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1905
1906 if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED &&
1907 stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB)
1908 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1909
1910 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1911 * update.
1912 */
1913 else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
1914 &stream->out_transfer_func,
1915 &dpp->regamma_params, false)) {
1916 dpp->funcs->dpp_program_regamma_pwl(
1917 dpp,
1918 &dpp->regamma_params, OPP_REGAMMA_USER);
1919 } else
1920 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1921
1922 if (stream->ctx) {
1923 log_tf(stream->ctx,
1924 &stream->out_transfer_func,
1925 dpp->regamma_params.hw_points_num);
1926 }
1927
1928 return true;
1929 }
1930
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1931 void dcn10_pipe_control_lock(
1932 struct dc *dc,
1933 struct pipe_ctx *pipe,
1934 bool lock)
1935 {
1936 struct dce_hwseq *hws = dc->hwseq;
1937
1938 /* use TG master update lock to lock everything on the TG
1939 * therefore only top pipe need to lock
1940 */
1941 if (!pipe || pipe->top_pipe)
1942 return;
1943
1944 if (dc->debug.sanity_checks)
1945 hws->funcs.verify_allow_pstate_change_high(dc);
1946
1947 if (lock)
1948 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1949 else
1950 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1951
1952 if (dc->debug.sanity_checks)
1953 hws->funcs.verify_allow_pstate_change_high(dc);
1954 }
1955
1956 /**
1957 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1958 *
1959 * Software keepout workaround to prevent cursor update locking from stalling
1960 * out cursor updates indefinitely or from old values from being retained in
1961 * the case where the viewport changes in the same frame as the cursor.
1962 *
1963 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1964 * too close to VUPDATE, then stall out until VUPDATE finishes.
1965 *
1966 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1967 * to avoid the need for this workaround.
1968 *
1969 * @dc: Current DC state
1970 * @pipe_ctx: Pipe_ctx pointer for delayed cursor update
1971 *
1972 * Return: void
1973 */
delay_cursor_until_vupdate(struct dc * dc,struct pipe_ctx * pipe_ctx)1974 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1975 {
1976 struct dc_stream_state *stream = pipe_ctx->stream;
1977 struct crtc_position position;
1978 uint32_t vupdate_start, vupdate_end;
1979 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1980 unsigned int us_per_line, us_vupdate;
1981
1982 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1983 return;
1984
1985 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1986 return;
1987
1988 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1989 &vupdate_end);
1990
1991 dc->hwss.get_position(&pipe_ctx, 1, &position);
1992 vpos = position.vertical_count;
1993
1994 if (vpos <= vupdate_start) {
1995 /* VPOS is in VACTIVE or back porch. */
1996 lines_to_vupdate = vupdate_start - vpos;
1997 } else {
1998 lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
1999 }
2000
2001 /* Calculate time until VUPDATE in microseconds. */
2002 us_per_line =
2003 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
2004 us_to_vupdate = lines_to_vupdate * us_per_line;
2005
2006 /* Stall out until the cursor update completes. */
2007 if (vupdate_end < vupdate_start)
2008 vupdate_end += stream->timing.v_total;
2009
2010 /* Position is in the range of vupdate start and end*/
2011 if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
2012 us_to_vupdate = 0;
2013
2014 /* 70 us is a conservative estimate of cursor update time*/
2015 if (us_to_vupdate > 70)
2016 return;
2017
2018 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
2019 udelay(us_to_vupdate + us_vupdate);
2020 }
2021
dcn10_cursor_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)2022 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
2023 {
2024 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
2025 if (!pipe || pipe->top_pipe)
2026 return;
2027
2028 /* Prevent cursor lock from stalling out cursor updates. */
2029 if (lock)
2030 delay_cursor_until_vupdate(dc, pipe);
2031
2032 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
2033 union dmub_hw_lock_flags hw_locks = { 0 };
2034 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2035
2036 hw_locks.bits.lock_cursor = 1;
2037 inst_flags.opp_inst = pipe->stream_res.opp->inst;
2038
2039 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2040 lock,
2041 &hw_locks,
2042 &inst_flags);
2043 } else
2044 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
2045 pipe->stream_res.opp->inst, lock);
2046 }
2047
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)2048 static bool wait_for_reset_trigger_to_occur(
2049 struct dc_context *dc_ctx,
2050 struct timing_generator *tg)
2051 {
2052 bool rc = false;
2053
2054 DC_LOGGER_INIT(dc_ctx->logger);
2055
2056 /* To avoid endless loop we wait at most
2057 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
2058 const uint32_t frames_to_wait_on_triggered_reset = 10;
2059 int i;
2060
2061 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
2062
2063 if (!tg->funcs->is_counter_moving(tg)) {
2064 DC_ERROR("TG counter is not moving!\n");
2065 break;
2066 }
2067
2068 if (tg->funcs->did_triggered_reset_occur(tg)) {
2069 rc = true;
2070 /* usually occurs at i=1 */
2071 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2072 i);
2073 break;
2074 }
2075
2076 /* Wait for one frame. */
2077 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2078 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2079 }
2080
2081 if (false == rc)
2082 DC_ERROR("GSL: Timeout on reset trigger!\n");
2083
2084 return rc;
2085 }
2086
reduceSizeAndFraction(uint64_t * numerator,uint64_t * denominator,bool checkUint32Bounary)2087 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2088 uint64_t *denominator,
2089 bool checkUint32Bounary)
2090 {
2091 int i;
2092 bool ret = checkUint32Bounary == false;
2093 uint64_t max_int32 = 0xffffffff;
2094 uint64_t num, denom;
2095 static const uint16_t prime_numbers[] = {
2096 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2097 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2098 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2099 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2100 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2101 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2102 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2103 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2104 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2105 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2106 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2107 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2108 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2109 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2110 941, 947, 953, 967, 971, 977, 983, 991, 997};
2111 int count = ARRAY_SIZE(prime_numbers);
2112
2113 num = *numerator;
2114 denom = *denominator;
2115 for (i = 0; i < count; i++) {
2116 uint32_t num_remainder, denom_remainder;
2117 uint64_t num_result, denom_result;
2118 if (checkUint32Bounary &&
2119 num <= max_int32 && denom <= max_int32) {
2120 ret = true;
2121 break;
2122 }
2123 do {
2124 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2125 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2126 if (num_remainder == 0 && denom_remainder == 0) {
2127 num = num_result;
2128 denom = denom_result;
2129 }
2130 } while (num_remainder == 0 && denom_remainder == 0);
2131 }
2132 *numerator = num;
2133 *denominator = denom;
2134 return ret;
2135 }
2136
is_low_refresh_rate(struct pipe_ctx * pipe)2137 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2138 {
2139 uint32_t master_pipe_refresh_rate =
2140 pipe->stream->timing.pix_clk_100hz * 100 /
2141 pipe->stream->timing.h_total /
2142 pipe->stream->timing.v_total;
2143 return master_pipe_refresh_rate <= 30;
2144 }
2145
get_clock_divider(struct pipe_ctx * pipe,bool account_low_refresh_rate)2146 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2147 bool account_low_refresh_rate)
2148 {
2149 uint32_t clock_divider = 1;
2150 uint32_t numpipes = 1;
2151
2152 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2153 clock_divider *= 2;
2154
2155 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2156 clock_divider *= 2;
2157
2158 while (pipe->next_odm_pipe) {
2159 pipe = pipe->next_odm_pipe;
2160 numpipes++;
2161 }
2162 clock_divider *= numpipes;
2163
2164 return clock_divider;
2165 }
2166
dcn10_align_pixel_clocks(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2167 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2168 struct pipe_ctx *grouped_pipes[])
2169 {
2170 struct dc_context *dc_ctx = dc->ctx;
2171 int i, master = -1, embedded = -1;
2172 struct dc_crtc_timing *hw_crtc_timing;
2173 uint64_t phase[MAX_PIPES];
2174 uint64_t modulo[MAX_PIPES];
2175 unsigned int pclk = 0;
2176
2177 uint32_t embedded_pix_clk_100hz;
2178 uint16_t embedded_h_total;
2179 uint16_t embedded_v_total;
2180 uint32_t dp_ref_clk_100hz =
2181 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2182
2183 DC_LOGGER_INIT(dc_ctx->logger);
2184
2185 hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2186 if (!hw_crtc_timing)
2187 return master;
2188
2189 if (dc->config.vblank_alignment_dto_params &&
2190 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2191 embedded_h_total =
2192 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2193 embedded_v_total =
2194 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2195 embedded_pix_clk_100hz =
2196 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2197
2198 for (i = 0; i < group_size; i++) {
2199 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2200 grouped_pipes[i]->stream_res.tg,
2201 &hw_crtc_timing[i]);
2202 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2203 dc->res_pool->dp_clock_source,
2204 grouped_pipes[i]->stream_res.tg->inst,
2205 &pclk);
2206 hw_crtc_timing[i].pix_clk_100hz = pclk;
2207 if (dc_is_embedded_signal(
2208 grouped_pipes[i]->stream->signal)) {
2209 embedded = i;
2210 master = i;
2211 phase[i] = embedded_pix_clk_100hz*(uint64_t)100;
2212 modulo[i] = dp_ref_clk_100hz*100;
2213 } else {
2214
2215 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2216 hw_crtc_timing[i].h_total*
2217 hw_crtc_timing[i].v_total;
2218 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2219 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2220 embedded_h_total*
2221 embedded_v_total;
2222
2223 if (reduceSizeAndFraction(&phase[i],
2224 &modulo[i], true) == false) {
2225 /*
2226 * this will help to stop reporting
2227 * this timing synchronizable
2228 */
2229 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2230 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2231 }
2232 }
2233 }
2234
2235 for (i = 0; i < group_size; i++) {
2236 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2237 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2238 dc->res_pool->dp_clock_source,
2239 grouped_pipes[i]->stream_res.tg->inst,
2240 phase[i], modulo[i]);
2241 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2242 dc->res_pool->dp_clock_source,
2243 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2244 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2245 pclk*get_clock_divider(grouped_pipes[i], false);
2246 if (master == -1)
2247 master = i;
2248 }
2249 }
2250
2251 }
2252
2253 kfree(hw_crtc_timing);
2254 return master;
2255 }
2256
dcn10_enable_vblanks_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2257 void dcn10_enable_vblanks_synchronization(
2258 struct dc *dc,
2259 int group_index,
2260 int group_size,
2261 struct pipe_ctx *grouped_pipes[])
2262 {
2263 struct dc_context *dc_ctx = dc->ctx;
2264 struct output_pixel_processor *opp;
2265 struct timing_generator *tg;
2266 int i, width = 0, height = 0, master;
2267
2268 DC_LOGGER_INIT(dc_ctx->logger);
2269
2270 for (i = 1; i < group_size; i++) {
2271 opp = grouped_pipes[i]->stream_res.opp;
2272 tg = grouped_pipes[i]->stream_res.tg;
2273 tg->funcs->get_otg_active_size(tg, &width, &height);
2274
2275 if (!tg->funcs->is_tg_enabled(tg)) {
2276 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2277 return;
2278 }
2279
2280 if (opp->funcs->opp_program_dpg_dimensions)
2281 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2282 }
2283
2284 for (i = 0; i < group_size; i++) {
2285 if (grouped_pipes[i]->stream == NULL)
2286 continue;
2287 grouped_pipes[i]->stream->vblank_synchronized = false;
2288 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2289 }
2290
2291 DC_SYNC_INFO("Aligning DP DTOs\n");
2292
2293 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2294
2295 DC_SYNC_INFO("Synchronizing VBlanks\n");
2296
2297 if (master >= 0) {
2298 for (i = 0; i < group_size; i++) {
2299 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2300 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2301 grouped_pipes[master]->stream_res.tg,
2302 grouped_pipes[i]->stream_res.tg,
2303 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2304 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2305 get_clock_divider(grouped_pipes[master], false),
2306 get_clock_divider(grouped_pipes[i], false));
2307 grouped_pipes[i]->stream->vblank_synchronized = true;
2308 }
2309 grouped_pipes[master]->stream->vblank_synchronized = true;
2310 DC_SYNC_INFO("Sync complete\n");
2311 }
2312
2313 for (i = 1; i < group_size; i++) {
2314 opp = grouped_pipes[i]->stream_res.opp;
2315 tg = grouped_pipes[i]->stream_res.tg;
2316 tg->funcs->get_otg_active_size(tg, &width, &height);
2317 if (opp->funcs->opp_program_dpg_dimensions)
2318 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2319 }
2320 }
2321
dcn10_enable_timing_synchronization(struct dc * dc,struct dc_state * state,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2322 void dcn10_enable_timing_synchronization(
2323 struct dc *dc,
2324 struct dc_state *state,
2325 int group_index,
2326 int group_size,
2327 struct pipe_ctx *grouped_pipes[])
2328 {
2329 struct dc_context *dc_ctx = dc->ctx;
2330 struct output_pixel_processor *opp;
2331 struct timing_generator *tg;
2332 int i, width = 0, height = 0;
2333
2334 DC_LOGGER_INIT(dc_ctx->logger);
2335
2336 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2337
2338 for (i = 1; i < group_size; i++) {
2339 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2340 continue;
2341
2342 opp = grouped_pipes[i]->stream_res.opp;
2343 tg = grouped_pipes[i]->stream_res.tg;
2344 tg->funcs->get_otg_active_size(tg, &width, &height);
2345
2346 if (!tg->funcs->is_tg_enabled(tg)) {
2347 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2348 return;
2349 }
2350
2351 if (opp->funcs->opp_program_dpg_dimensions)
2352 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2353 }
2354
2355 for (i = 0; i < group_size; i++) {
2356 if (grouped_pipes[i]->stream == NULL)
2357 continue;
2358
2359 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2360 continue;
2361
2362 grouped_pipes[i]->stream->vblank_synchronized = false;
2363 }
2364
2365 for (i = 1; i < group_size; i++) {
2366 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2367 continue;
2368
2369 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2370 grouped_pipes[i]->stream_res.tg,
2371 grouped_pipes[0]->stream_res.tg->inst);
2372 }
2373
2374 DC_SYNC_INFO("Waiting for trigger\n");
2375
2376 /* Need to get only check 1 pipe for having reset as all the others are
2377 * synchronized. Look at last pipe programmed to reset.
2378 */
2379
2380 if (grouped_pipes[1]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[1]) != SUBVP_PHANTOM)
2381 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2382
2383 for (i = 1; i < group_size; i++) {
2384 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2385 continue;
2386
2387 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2388 grouped_pipes[i]->stream_res.tg);
2389 }
2390
2391 for (i = 1; i < group_size; i++) {
2392 if (dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2393 continue;
2394
2395 opp = grouped_pipes[i]->stream_res.opp;
2396 tg = grouped_pipes[i]->stream_res.tg;
2397 tg->funcs->get_otg_active_size(tg, &width, &height);
2398 if (opp->funcs->opp_program_dpg_dimensions)
2399 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2400 }
2401
2402 DC_SYNC_INFO("Sync complete\n");
2403 }
2404
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2405 void dcn10_enable_per_frame_crtc_position_reset(
2406 struct dc *dc,
2407 int group_size,
2408 struct pipe_ctx *grouped_pipes[])
2409 {
2410 struct dc_context *dc_ctx = dc->ctx;
2411 int i;
2412
2413 DC_LOGGER_INIT(dc_ctx->logger);
2414
2415 DC_SYNC_INFO("Setting up\n");
2416 for (i = 0; i < group_size; i++)
2417 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2418 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2419 grouped_pipes[i]->stream_res.tg,
2420 0,
2421 &grouped_pipes[i]->stream->triggered_crtc_reset);
2422
2423 DC_SYNC_INFO("Waiting for trigger\n");
2424
2425 for (i = 0; i < group_size; i++)
2426 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2427
2428 DC_SYNC_INFO("Multi-display sync is complete\n");
2429 }
2430
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)2431 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2432 struct vm_system_aperture_param *apt,
2433 struct dce_hwseq *hws)
2434 {
2435 PHYSICAL_ADDRESS_LOC physical_page_number;
2436 uint32_t logical_addr_low;
2437 uint32_t logical_addr_high;
2438
2439 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2440 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2441 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2442 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2443
2444 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2445 LOGICAL_ADDR, &logical_addr_low);
2446
2447 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2448 LOGICAL_ADDR, &logical_addr_high);
2449
2450 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2451 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2452 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2453 }
2454
2455 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)2456 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2457 struct vm_context0_param *vm0,
2458 struct dce_hwseq *hws)
2459 {
2460 PHYSICAL_ADDRESS_LOC fb_base;
2461 PHYSICAL_ADDRESS_LOC fb_offset;
2462 uint32_t fb_base_value;
2463 uint32_t fb_offset_value;
2464
2465 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2466 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2467
2468 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2469 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2470 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2471 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2472
2473 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2474 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2475 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2476 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2477
2478 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2479 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2480 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2481 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2482
2483 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2484 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2485 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2486 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2487
2488 /*
2489 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2490 * Therefore we need to do
2491 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2492 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2493 */
2494 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2495 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2496 vm0->pte_base.quad_part += fb_base.quad_part;
2497 vm0->pte_base.quad_part -= fb_offset.quad_part;
2498 }
2499
2500
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)2501 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2502 {
2503 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2504 struct vm_system_aperture_param apt = {0};
2505 struct vm_context0_param vm0 = {0};
2506
2507 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2508 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2509
2510 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2511 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2512 }
2513
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2514 static void dcn10_enable_plane(
2515 struct dc *dc,
2516 struct pipe_ctx *pipe_ctx,
2517 struct dc_state *context)
2518 {
2519 struct dce_hwseq *hws = dc->hwseq;
2520
2521 if (dc->debug.sanity_checks) {
2522 hws->funcs.verify_allow_pstate_change_high(dc);
2523 }
2524
2525 undo_DEGVIDCN10_253_wa(dc);
2526
2527 power_on_plane_resources(dc->hwseq,
2528 pipe_ctx->plane_res.hubp->inst);
2529
2530 /* enable DCFCLK current DCHUB */
2531 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2532
2533 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2534 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2535 pipe_ctx->stream_res.opp,
2536 true);
2537
2538 if (dc->config.gpu_vm_support)
2539 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2540
2541 if (dc->debug.sanity_checks) {
2542 hws->funcs.verify_allow_pstate_change_high(dc);
2543 }
2544
2545 if (!pipe_ctx->top_pipe
2546 && pipe_ctx->plane_state
2547 && pipe_ctx->plane_state->flip_int_enabled
2548 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2549 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2550
2551 }
2552
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)2553 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2554 {
2555 int i = 0;
2556 struct dpp_grph_csc_adjustment adjust;
2557 memset(&adjust, 0, sizeof(adjust));
2558 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2559
2560
2561 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2562 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2563 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2564 adjust.temperature_matrix[i] =
2565 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2566 } else if (pipe_ctx->plane_state &&
2567 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2568 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2569 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2570 adjust.temperature_matrix[i] =
2571 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2572 }
2573
2574 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2575 }
2576
2577
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)2578 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2579 {
2580 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2581 if (pipe_ctx->top_pipe) {
2582 struct pipe_ctx *top = pipe_ctx->top_pipe;
2583
2584 while (top->top_pipe)
2585 top = top->top_pipe; // Traverse to top pipe_ctx
2586 if (top->plane_state && top->plane_state->layer_index == 0 && !top->plane_state->global_alpha)
2587 // Global alpha used by top plane for PIP overlay
2588 // Pre-multiplied/per-pixel alpha used by MPO
2589 // Check top plane's global alpha to ensure layer_index > 0 not caused by PIP
2590 return true; // MPO in use and front plane not hidden
2591 }
2592 }
2593 return false;
2594 }
2595
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)2596 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2597 {
2598 // Override rear plane RGB bias to fix MPO brightness
2599 uint16_t rgb_bias = matrix[3];
2600
2601 matrix[3] = 0;
2602 matrix[7] = 0;
2603 matrix[11] = 0;
2604 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2605 matrix[3] = rgb_bias;
2606 matrix[7] = rgb_bias;
2607 matrix[11] = rgb_bias;
2608 }
2609
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)2610 void dcn10_program_output_csc(struct dc *dc,
2611 struct pipe_ctx *pipe_ctx,
2612 enum dc_color_space colorspace,
2613 uint16_t *matrix,
2614 int opp_id)
2615 {
2616 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2617 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2618
2619 /* MPO is broken with RGB colorspaces when OCSC matrix
2620 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2621 * Blending adds offsets from front + rear to rear plane
2622 *
2623 * Fix is to set RGB bias to 0 on rear plane, top plane
2624 * black value pixels add offset instead of rear + front
2625 */
2626
2627 int16_t rgb_bias = matrix[3];
2628 // matrix[3/7/11] are all the same offset value
2629
2630 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2631 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2632 } else {
2633 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2634 }
2635 }
2636 } else {
2637 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2638 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2639 }
2640 }
2641
dcn10_update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2642 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2643 {
2644 struct dc_bias_and_scale bns_params = {0};
2645
2646 // program the input csc
2647 dpp->funcs->dpp_setup(dpp,
2648 plane_state->format,
2649 EXPANSION_MODE_ZERO,
2650 plane_state->input_csc_color_matrix,
2651 plane_state->color_space,
2652 NULL);
2653
2654 //set scale and bias registers
2655 build_prescale_params(&bns_params, plane_state);
2656 if (dpp->funcs->dpp_program_bias_and_scale)
2657 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2658 }
2659
dcn10_update_visual_confirm_color(struct dc * dc,struct pipe_ctx * pipe_ctx,int mpcc_id)2660 void dcn10_update_visual_confirm_color(struct dc *dc,
2661 struct pipe_ctx *pipe_ctx,
2662 int mpcc_id)
2663 {
2664 struct mpc *mpc = dc->res_pool->mpc;
2665
2666 if (mpc->funcs->set_bg_color) {
2667 memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
2668 mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
2669 }
2670 }
2671
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2672 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2673 {
2674 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2675 struct mpcc_blnd_cfg blnd_cfg = {0};
2676 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2677 int mpcc_id;
2678 struct mpcc *new_mpcc;
2679 struct mpc *mpc = dc->res_pool->mpc;
2680 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2681
2682 blnd_cfg.overlap_only = false;
2683 blnd_cfg.global_gain = 0xff;
2684
2685 if (per_pixel_alpha) {
2686 /* DCN1.0 has output CM before MPC which seems to screw with
2687 * pre-multiplied alpha.
2688 */
2689 blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2690 pipe_ctx->stream->output_color_space)
2691 && pipe_ctx->plane_state->pre_multiplied_alpha);
2692 if (pipe_ctx->plane_state->global_alpha) {
2693 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2694 blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2695 } else {
2696 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2697 }
2698 } else {
2699 blnd_cfg.pre_multiplied_alpha = false;
2700 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2701 }
2702
2703 if (pipe_ctx->plane_state->global_alpha)
2704 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2705 else
2706 blnd_cfg.global_alpha = 0xff;
2707
2708 /*
2709 * TODO: remove hack
2710 * Note: currently there is a bug in init_hw such that
2711 * on resume from hibernate, BIOS sets up MPCC0, and
2712 * we do mpcc_remove but the mpcc cannot go to idle
2713 * after remove. This cause us to pick mpcc1 here,
2714 * which causes a pstate hang for yet unknown reason.
2715 */
2716 mpcc_id = hubp->inst;
2717
2718 /* If there is no full update, don't need to touch MPC tree*/
2719 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2720 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2721 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2722 return;
2723 }
2724
2725 /* check if this MPCC is already being used */
2726 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2727 /* remove MPCC if being used */
2728 if (new_mpcc != NULL)
2729 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2730 else
2731 if (dc->debug.sanity_checks)
2732 mpc->funcs->assert_mpcc_idle_before_connect(
2733 dc->res_pool->mpc, mpcc_id);
2734
2735 /* Call MPC to insert new plane */
2736 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2737 mpc_tree_params,
2738 &blnd_cfg,
2739 NULL,
2740 NULL,
2741 hubp->inst,
2742 mpcc_id);
2743 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2744
2745 ASSERT(new_mpcc != NULL);
2746 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2747 hubp->mpcc_id = mpcc_id;
2748 }
2749
update_scaler(struct pipe_ctx * pipe_ctx)2750 static void update_scaler(struct pipe_ctx *pipe_ctx)
2751 {
2752 bool per_pixel_alpha =
2753 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2754
2755 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2756 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2757 /* scaler configuration */
2758 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2759 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2760 }
2761
dcn10_update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2762 static void dcn10_update_dchubp_dpp(
2763 struct dc *dc,
2764 struct pipe_ctx *pipe_ctx,
2765 struct dc_state *context)
2766 {
2767 struct dce_hwseq *hws = dc->hwseq;
2768 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2769 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2770 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2771 struct plane_size size = plane_state->plane_size;
2772 unsigned int compat_level = 0;
2773 bool should_divided_by_2 = false;
2774
2775 /* depends on DML calculation, DPP clock value may change dynamically */
2776 /* If request max dpp clk is lower than current dispclk, no need to
2777 * divided by 2
2778 */
2779 if (plane_state->update_flags.bits.full_update) {
2780
2781 /* new calculated dispclk, dppclk are stored in
2782 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2783 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2784 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2785 * dispclk will put in use after optimize_bandwidth when
2786 * ramp_up_dispclk_with_dpp is called.
2787 * there are two places for dppclk be put in use. One location
2788 * is the same as the location as dispclk. Another is within
2789 * update_dchubp_dpp which happens between pre_bandwidth and
2790 * optimize_bandwidth.
2791 * dppclk updated within update_dchubp_dpp will cause new
2792 * clock values of dispclk and dppclk not be in use at the same
2793 * time. when clocks are decreased, this may cause dppclk is
2794 * lower than previous configuration and let pipe stuck.
2795 * for example, eDP + external dp, change resolution of DP from
2796 * 1920x1080x144hz to 1280x960x60hz.
2797 * before change: dispclk = 337889 dppclk = 337889
2798 * change mode, dcn10_validate_bandwidth calculate
2799 * dispclk = 143122 dppclk = 143122
2800 * update_dchubp_dpp be executed before dispclk be updated,
2801 * dispclk = 337889, but dppclk use new value dispclk /2 =
2802 * 168944. this will cause pipe pstate warning issue.
2803 * solution: between pre_bandwidth and optimize_bandwidth, while
2804 * dispclk is going to be decreased, keep dppclk = dispclk
2805 **/
2806 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2807 dc->clk_mgr->clks.dispclk_khz)
2808 should_divided_by_2 = false;
2809 else
2810 should_divided_by_2 =
2811 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2812 dc->clk_mgr->clks.dispclk_khz / 2;
2813
2814 dpp->funcs->dpp_dppclk_control(
2815 dpp,
2816 should_divided_by_2,
2817 true);
2818
2819 if (dc->res_pool->dccg)
2820 dc->res_pool->dccg->funcs->update_dpp_dto(
2821 dc->res_pool->dccg,
2822 dpp->inst,
2823 pipe_ctx->plane_res.bw.dppclk_khz);
2824 else
2825 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2826 dc->clk_mgr->clks.dispclk_khz / 2 :
2827 dc->clk_mgr->clks.dispclk_khz;
2828 }
2829
2830 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2831 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2832 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2833 */
2834 if (plane_state->update_flags.bits.full_update) {
2835 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2836
2837 hubp->funcs->hubp_setup(
2838 hubp,
2839 &pipe_ctx->dlg_regs,
2840 &pipe_ctx->ttu_regs,
2841 &pipe_ctx->rq_regs,
2842 &pipe_ctx->pipe_dlg_param);
2843 hubp->funcs->hubp_setup_interdependent(
2844 hubp,
2845 &pipe_ctx->dlg_regs,
2846 &pipe_ctx->ttu_regs);
2847 }
2848
2849 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2850
2851 if (plane_state->update_flags.bits.full_update ||
2852 plane_state->update_flags.bits.bpp_change)
2853 dcn10_update_dpp(dpp, plane_state);
2854
2855 if (plane_state->update_flags.bits.full_update ||
2856 plane_state->update_flags.bits.per_pixel_alpha_change ||
2857 plane_state->update_flags.bits.global_alpha_change)
2858 hws->funcs.update_mpcc(dc, pipe_ctx);
2859
2860 if (plane_state->update_flags.bits.full_update ||
2861 plane_state->update_flags.bits.per_pixel_alpha_change ||
2862 plane_state->update_flags.bits.global_alpha_change ||
2863 plane_state->update_flags.bits.scaling_change ||
2864 plane_state->update_flags.bits.position_change) {
2865 update_scaler(pipe_ctx);
2866 }
2867
2868 if (plane_state->update_flags.bits.full_update ||
2869 plane_state->update_flags.bits.scaling_change ||
2870 plane_state->update_flags.bits.position_change) {
2871 hubp->funcs->mem_program_viewport(
2872 hubp,
2873 &pipe_ctx->plane_res.scl_data.viewport,
2874 &pipe_ctx->plane_res.scl_data.viewport_c);
2875 }
2876
2877 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2878 dc->hwss.set_cursor_attribute(pipe_ctx);
2879 dc->hwss.set_cursor_position(pipe_ctx);
2880
2881 if (dc->hwss.set_cursor_sdr_white_level)
2882 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2883 }
2884
2885 if (plane_state->update_flags.bits.full_update) {
2886 /*gamut remap*/
2887 dc->hwss.program_gamut_remap(pipe_ctx);
2888
2889 dc->hwss.program_output_csc(dc,
2890 pipe_ctx,
2891 pipe_ctx->stream->output_color_space,
2892 pipe_ctx->stream->csc_color_matrix.matrix,
2893 pipe_ctx->stream_res.opp->inst);
2894 }
2895
2896 if (plane_state->update_flags.bits.full_update ||
2897 plane_state->update_flags.bits.pixel_format_change ||
2898 plane_state->update_flags.bits.horizontal_mirror_change ||
2899 plane_state->update_flags.bits.rotation_change ||
2900 plane_state->update_flags.bits.swizzle_change ||
2901 plane_state->update_flags.bits.dcc_change ||
2902 plane_state->update_flags.bits.bpp_change ||
2903 plane_state->update_flags.bits.scaling_change ||
2904 plane_state->update_flags.bits.plane_size_change) {
2905 hubp->funcs->hubp_program_surface_config(
2906 hubp,
2907 plane_state->format,
2908 &plane_state->tiling_info,
2909 &size,
2910 plane_state->rotation,
2911 &plane_state->dcc,
2912 plane_state->horizontal_mirror,
2913 compat_level);
2914 }
2915
2916 hubp->power_gated = false;
2917
2918 dc->hwss.update_plane_addr(dc, pipe_ctx);
2919
2920 if (is_pipe_tree_visible(pipe_ctx))
2921 hubp->funcs->set_blank(hubp, false);
2922 }
2923
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)2924 void dcn10_blank_pixel_data(
2925 struct dc *dc,
2926 struct pipe_ctx *pipe_ctx,
2927 bool blank)
2928 {
2929 enum dc_color_space color_space;
2930 struct tg_color black_color = {0};
2931 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2932 struct dc_stream_state *stream = pipe_ctx->stream;
2933
2934 /* program otg blank color */
2935 color_space = stream->output_color_space;
2936 color_space_to_black_color(dc, color_space, &black_color);
2937
2938 /*
2939 * The way 420 is packed, 2 channels carry Y component, 1 channel
2940 * alternate between Cb and Cr, so both channels need the pixel
2941 * value for Y
2942 */
2943 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2944 black_color.color_r_cr = black_color.color_g_y;
2945
2946
2947 if (stream_res->tg->funcs->set_blank_color)
2948 stream_res->tg->funcs->set_blank_color(
2949 stream_res->tg,
2950 &black_color);
2951
2952 if (!blank) {
2953 if (stream_res->tg->funcs->set_blank)
2954 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2955 if (stream_res->abm) {
2956 dc->hwss.set_pipe(pipe_ctx);
2957 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2958 }
2959 } else {
2960 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2961 if (stream_res->tg->funcs->set_blank) {
2962 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2963 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2964 }
2965 }
2966 }
2967
dcn10_set_hdr_multiplier(struct pipe_ctx * pipe_ctx)2968 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2969 {
2970 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2971 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2972 struct custom_float_format fmt;
2973
2974 fmt.exponenta_bits = 6;
2975 fmt.mantissa_bits = 12;
2976 fmt.sign = true;
2977
2978
2979 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2980 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2981
2982 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2983 pipe_ctx->plane_res.dpp, hw_mult);
2984 }
2985
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2986 void dcn10_program_pipe(
2987 struct dc *dc,
2988 struct pipe_ctx *pipe_ctx,
2989 struct dc_state *context)
2990 {
2991 struct dce_hwseq *hws = dc->hwseq;
2992
2993 if (pipe_ctx->top_pipe == NULL) {
2994 bool blank = !is_pipe_tree_visible(pipe_ctx);
2995
2996 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2997 pipe_ctx->stream_res.tg,
2998 calculate_vready_offset_for_group(pipe_ctx),
2999 pipe_ctx->pipe_dlg_param.vstartup_start,
3000 pipe_ctx->pipe_dlg_param.vupdate_offset,
3001 pipe_ctx->pipe_dlg_param.vupdate_width,
3002 pipe_ctx->pipe_dlg_param.pstate_keepout);
3003
3004 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
3005 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
3006
3007 if (hws->funcs.setup_vupdate_interrupt)
3008 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
3009
3010 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
3011 }
3012
3013 if (pipe_ctx->plane_state->update_flags.bits.full_update)
3014 dcn10_enable_plane(dc, pipe_ctx, context);
3015
3016 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
3017
3018 hws->funcs.set_hdr_multiplier(pipe_ctx);
3019
3020 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
3021 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
3022 pipe_ctx->plane_state->update_flags.bits.gamma_change)
3023 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
3024
3025 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
3026 * only do gamma programming for full update.
3027 * TODO: This can be further optimized/cleaned up
3028 * Always call this for now since it does memcmp inside before
3029 * doing heavy calculation and programming
3030 */
3031 if (pipe_ctx->plane_state->update_flags.bits.full_update)
3032 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
3033 }
3034
dcn10_wait_for_pending_cleared(struct dc * dc,struct dc_state * context)3035 void dcn10_wait_for_pending_cleared(struct dc *dc,
3036 struct dc_state *context)
3037 {
3038 struct pipe_ctx *pipe_ctx;
3039 struct timing_generator *tg;
3040 int i;
3041
3042 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3043 pipe_ctx = &context->res_ctx.pipe_ctx[i];
3044 tg = pipe_ctx->stream_res.tg;
3045
3046 /*
3047 * Only wait for top pipe's tg penindg bit
3048 * Also skip if pipe is disabled.
3049 */
3050 if (pipe_ctx->top_pipe ||
3051 !pipe_ctx->stream || !pipe_ctx->plane_state ||
3052 !tg->funcs->is_tg_enabled(tg))
3053 continue;
3054
3055 /*
3056 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
3057 * For some reason waiting for OTG_UPDATE_PENDING cleared
3058 * seems to not trigger the update right away, and if we
3059 * lock again before VUPDATE then we don't get a separated
3060 * operation.
3061 */
3062 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
3063 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
3064 }
3065 }
3066
dcn10_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)3067 void dcn10_post_unlock_program_front_end(
3068 struct dc *dc,
3069 struct dc_state *context)
3070 {
3071 int i;
3072
3073 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3074 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3075
3076 if (!pipe_ctx->top_pipe &&
3077 !pipe_ctx->prev_odm_pipe &&
3078 pipe_ctx->stream) {
3079 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3080
3081 if (context->stream_status[i].plane_count == 0)
3082 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3083 }
3084 }
3085
3086 for (i = 0; i < dc->res_pool->pipe_count; i++)
3087 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3088 dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3089
3090 for (i = 0; i < dc->res_pool->pipe_count; i++)
3091 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3092 dc->hwss.optimize_bandwidth(dc, context);
3093 break;
3094 }
3095
3096 if (dc->hwseq->wa.DEGVIDCN10_254)
3097 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3098 }
3099
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)3100 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3101 {
3102 uint8_t i;
3103
3104 for (i = 0; i < context->stream_count; i++) {
3105 if (context->streams[i]->timing.timing_3d_format
3106 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3107 /*
3108 * Disable stutter
3109 */
3110 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3111 break;
3112 }
3113 }
3114 }
3115
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)3116 void dcn10_prepare_bandwidth(
3117 struct dc *dc,
3118 struct dc_state *context)
3119 {
3120 struct dce_hwseq *hws = dc->hwseq;
3121 struct hubbub *hubbub = dc->res_pool->hubbub;
3122 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3123
3124 if (dc->debug.sanity_checks)
3125 hws->funcs.verify_allow_pstate_change_high(dc);
3126
3127 if (context->stream_count == 0)
3128 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3129
3130 dc->clk_mgr->funcs->update_clocks(
3131 dc->clk_mgr,
3132 context,
3133 false);
3134
3135 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3136 &context->bw_ctx.bw.dcn.watermarks,
3137 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3138 true);
3139 dcn10_stereo_hw_frame_pack_wa(dc, context);
3140
3141 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3142 DC_FP_START();
3143 dcn_get_soc_clks(
3144 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3145 DC_FP_END();
3146 dcn_bw_notify_pplib_of_wm_ranges(
3147 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3148 }
3149
3150 if (dc->debug.sanity_checks)
3151 hws->funcs.verify_allow_pstate_change_high(dc);
3152 }
3153
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)3154 void dcn10_optimize_bandwidth(
3155 struct dc *dc,
3156 struct dc_state *context)
3157 {
3158 struct dce_hwseq *hws = dc->hwseq;
3159 struct hubbub *hubbub = dc->res_pool->hubbub;
3160 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3161
3162 if (dc->debug.sanity_checks)
3163 hws->funcs.verify_allow_pstate_change_high(dc);
3164
3165 if (context->stream_count == 0)
3166 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3167
3168 dc->clk_mgr->funcs->update_clocks(
3169 dc->clk_mgr,
3170 context,
3171 true);
3172
3173 hubbub->funcs->program_watermarks(hubbub,
3174 &context->bw_ctx.bw.dcn.watermarks,
3175 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3176 true);
3177
3178 dcn10_stereo_hw_frame_pack_wa(dc, context);
3179
3180 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3181 DC_FP_START();
3182 dcn_get_soc_clks(
3183 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3184 DC_FP_END();
3185 dcn_bw_notify_pplib_of_wm_ranges(
3186 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3187 }
3188
3189 if (dc->debug.sanity_checks)
3190 hws->funcs.verify_allow_pstate_change_high(dc);
3191 }
3192
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,struct dc_crtc_timing_adjust adjust)3193 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3194 int num_pipes, struct dc_crtc_timing_adjust adjust)
3195 {
3196 int i = 0;
3197 struct drr_params params = {0};
3198 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3199 unsigned int event_triggers = 0x800;
3200 // Note DRR trigger events are generated regardless of whether num frames met.
3201 unsigned int num_frames = 2;
3202
3203 params.vertical_total_max = adjust.v_total_max;
3204 params.vertical_total_min = adjust.v_total_min;
3205 params.vertical_total_mid = adjust.v_total_mid;
3206 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3207 /* TODO: If multiple pipes are to be supported, you need
3208 * some GSL stuff. Static screen triggers may be programmed differently
3209 * as well.
3210 */
3211 for (i = 0; i < num_pipes; i++) {
3212 /* dc_state_destruct() might null the stream resources, so fetch tg
3213 * here first to avoid a race condition. The lifetime of the pointee
3214 * itself (the timing_generator object) is not a problem here.
3215 */
3216 struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
3217
3218 if ((tg != NULL) && tg->funcs) {
3219 set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, ¶ms);
3220 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3221 if (tg->funcs->set_static_screen_control)
3222 tg->funcs->set_static_screen_control(
3223 tg, event_triggers, num_frames);
3224 }
3225 }
3226 }
3227
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)3228 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3229 int num_pipes,
3230 struct crtc_position *position)
3231 {
3232 int i = 0;
3233
3234 /* TODO: handle pipes > 1
3235 */
3236 for (i = 0; i < num_pipes; i++)
3237 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3238 }
3239
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)3240 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3241 int num_pipes, const struct dc_static_screen_params *params)
3242 {
3243 unsigned int i;
3244 unsigned int triggers = 0;
3245
3246 if (params->triggers.surface_update)
3247 triggers |= 0x80;
3248 if (params->triggers.cursor_update)
3249 triggers |= 0x2;
3250 if (params->triggers.force_trigger)
3251 triggers |= 0x1;
3252
3253 for (i = 0; i < num_pipes; i++)
3254 pipe_ctx[i]->stream_res.tg->funcs->
3255 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3256 triggers, params->num_frames);
3257 }
3258
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)3259 static void dcn10_config_stereo_parameters(
3260 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3261 {
3262 enum view_3d_format view_format = stream->view_format;
3263 enum dc_timing_3d_format timing_3d_format =\
3264 stream->timing.timing_3d_format;
3265 bool non_stereo_timing = false;
3266
3267 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3268 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3269 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3270 non_stereo_timing = true;
3271
3272 if (non_stereo_timing == false &&
3273 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3274
3275 flags->PROGRAM_STEREO = 1;
3276 flags->PROGRAM_POLARITY = 1;
3277 if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3278 timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3279 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3280 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3281
3282 if (stream->link && stream->link->ddc) {
3283 enum display_dongle_type dongle = \
3284 stream->link->ddc->dongle_type;
3285
3286 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3287 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3288 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3289 flags->DISABLE_STEREO_DP_SYNC = 1;
3290 }
3291 }
3292 flags->RIGHT_EYE_POLARITY =\
3293 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3294 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3295 flags->FRAME_PACKED = 1;
3296 }
3297
3298 return;
3299 }
3300
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)3301 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3302 {
3303 struct crtc_stereo_flags flags = { 0 };
3304 struct dc_stream_state *stream = pipe_ctx->stream;
3305
3306 dcn10_config_stereo_parameters(stream, &flags);
3307
3308 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3309 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3310 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3311 } else {
3312 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3313 }
3314
3315 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3316 pipe_ctx->stream_res.opp,
3317 flags.PROGRAM_STEREO == 1,
3318 &stream->timing);
3319
3320 pipe_ctx->stream_res.tg->funcs->program_stereo(
3321 pipe_ctx->stream_res.tg,
3322 &stream->timing,
3323 &flags);
3324
3325 return;
3326 }
3327
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3328 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3329 {
3330 int i;
3331
3332 for (i = 0; i < res_pool->pipe_count; i++) {
3333 if (res_pool->hubps[i]->inst == mpcc_inst)
3334 return res_pool->hubps[i];
3335 }
3336 ASSERT(false);
3337 return NULL;
3338 }
3339
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)3340 void dcn10_wait_for_mpcc_disconnect(
3341 struct dc *dc,
3342 struct resource_pool *res_pool,
3343 struct pipe_ctx *pipe_ctx)
3344 {
3345 struct dce_hwseq *hws = dc->hwseq;
3346 int mpcc_inst;
3347
3348 if (dc->debug.sanity_checks) {
3349 hws->funcs.verify_allow_pstate_change_high(dc);
3350 }
3351
3352 if (!pipe_ctx->stream_res.opp)
3353 return;
3354
3355 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3356 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3357 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3358
3359 if (pipe_ctx->stream_res.tg &&
3360 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3361 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3362 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3363 hubp->funcs->set_blank(hubp, true);
3364 }
3365 }
3366
3367 if (dc->debug.sanity_checks) {
3368 hws->funcs.verify_allow_pstate_change_high(dc);
3369 }
3370
3371 }
3372
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)3373 bool dcn10_dummy_display_power_gating(
3374 struct dc *dc,
3375 uint8_t controller_id,
3376 struct dc_bios *dcb,
3377 enum pipe_gating_control power_gating)
3378 {
3379 return true;
3380 }
3381
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)3382 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3383 {
3384 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3385 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3386 bool flip_pending;
3387 struct dc *dc = pipe_ctx->stream->ctx->dc;
3388
3389 if (plane_state == NULL)
3390 return;
3391
3392 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3393 pipe_ctx->plane_res.hubp);
3394
3395 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3396
3397 if (!flip_pending)
3398 plane_state->status.current_address = plane_state->status.requested_address;
3399
3400 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3401 tg->funcs->is_stereo_left_eye) {
3402 plane_state->status.is_right_eye =
3403 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3404 }
3405
3406 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3407 struct dce_hwseq *hwseq = dc->hwseq;
3408 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3409 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3410
3411 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3412 struct hubbub *hubbub = dc->res_pool->hubbub;
3413
3414 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3415 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3416 }
3417 }
3418 }
3419
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)3420 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3421 {
3422 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3423
3424 /* In DCN, this programming sequence is owned by the hubbub */
3425 hubbub->funcs->update_dchub(hubbub, dh_data);
3426 }
3427
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)3428 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3429 {
3430 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3431 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3432 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3433 struct dc_cursor_mi_param param = {
3434 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3435 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3436 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3437 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3438 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3439 .rotation = pipe_ctx->plane_state->rotation,
3440 .mirror = pipe_ctx->plane_state->horizontal_mirror,
3441 .stream = pipe_ctx->stream,
3442 };
3443 bool pipe_split_on = false;
3444 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3445 (pipe_ctx->prev_odm_pipe != NULL);
3446
3447 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3448 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3449 int x_pos = pos_cpy.x;
3450 int y_pos = pos_cpy.y;
3451
3452 if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3453 if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3454 (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3455 pipe_split_on = true;
3456 }
3457 }
3458
3459 /**
3460 * DC cursor is stream space, HW cursor is plane space and drawn
3461 * as part of the framebuffer.
3462 *
3463 * Cursor position can't be negative, but hotspot can be used to
3464 * shift cursor out of the plane bounds. Hotspot must be smaller
3465 * than the cursor size.
3466 */
3467
3468 /**
3469 * Translate cursor from stream space to plane space.
3470 *
3471 * If the cursor is scaled then we need to scale the position
3472 * to be in the approximately correct place. We can't do anything
3473 * about the actual size being incorrect, that's a limitation of
3474 * the hardware.
3475 */
3476 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3477 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3478 pipe_ctx->plane_state->dst_rect.width;
3479 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3480 pipe_ctx->plane_state->dst_rect.height;
3481 } else {
3482 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3483 pipe_ctx->plane_state->dst_rect.width;
3484 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3485 pipe_ctx->plane_state->dst_rect.height;
3486 }
3487
3488 /**
3489 * If the cursor's source viewport is clipped then we need to
3490 * translate the cursor to appear in the correct position on
3491 * the screen.
3492 *
3493 * This translation isn't affected by scaling so it needs to be
3494 * done *after* we adjust the position for the scale factor.
3495 *
3496 * This is only done by opt-in for now since there are still
3497 * some usecases like tiled display that might enable the
3498 * cursor on both streams while expecting dc to clip it.
3499 */
3500 if (pos_cpy.translate_by_source) {
3501 x_pos += pipe_ctx->plane_state->src_rect.x;
3502 y_pos += pipe_ctx->plane_state->src_rect.y;
3503 }
3504
3505 /**
3506 * If the position is negative then we need to add to the hotspot
3507 * to shift the cursor outside the plane.
3508 */
3509
3510 if (x_pos < 0) {
3511 pos_cpy.x_hotspot -= x_pos;
3512 x_pos = 0;
3513 }
3514
3515 if (y_pos < 0) {
3516 pos_cpy.y_hotspot -= y_pos;
3517 y_pos = 0;
3518 }
3519
3520 pos_cpy.x = (uint32_t)x_pos;
3521 pos_cpy.y = (uint32_t)y_pos;
3522
3523 if (pipe_ctx->plane_state->address.type
3524 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3525 pos_cpy.enable = false;
3526
3527 if (pos_cpy.enable && resource_can_pipe_disable_cursor(pipe_ctx))
3528 pos_cpy.enable = false;
3529
3530
3531 if (param.rotation == ROTATION_ANGLE_0) {
3532 int viewport_width =
3533 pipe_ctx->plane_res.scl_data.viewport.width;
3534 int viewport_x =
3535 pipe_ctx->plane_res.scl_data.viewport.x;
3536
3537 if (param.mirror) {
3538 if (pipe_split_on || odm_combine_on) {
3539 if (pos_cpy.x >= viewport_width + viewport_x) {
3540 pos_cpy.x = 2 * viewport_width
3541 - pos_cpy.x + 2 * viewport_x;
3542 } else {
3543 uint32_t temp_x = pos_cpy.x;
3544
3545 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3546 if (temp_x >= viewport_x +
3547 (int)hubp->curs_attr.width || pos_cpy.x
3548 <= (int)hubp->curs_attr.width +
3549 pipe_ctx->plane_state->src_rect.x) {
3550 pos_cpy.x = 2 * viewport_width - temp_x;
3551 }
3552 }
3553 } else {
3554 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3555 }
3556 }
3557 }
3558 // Swap axis and mirror horizontally
3559 else if (param.rotation == ROTATION_ANGLE_90) {
3560 uint32_t temp_x = pos_cpy.x;
3561
3562 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3563 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3564 pos_cpy.y = temp_x;
3565 }
3566 // Swap axis and mirror vertically
3567 else if (param.rotation == ROTATION_ANGLE_270) {
3568 uint32_t temp_y = pos_cpy.y;
3569 int viewport_height =
3570 pipe_ctx->plane_res.scl_data.viewport.height;
3571 int viewport_y =
3572 pipe_ctx->plane_res.scl_data.viewport.y;
3573
3574 /**
3575 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3576 * For pipe split cases:
3577 * - apply offset of viewport.y to normalize pos_cpy.x
3578 * - calculate the pos_cpy.y as before
3579 * - shift pos_cpy.y back by same offset to get final value
3580 * - since we iterate through both pipes, use the lower
3581 * viewport.y for offset
3582 * For non pipe split cases, use the same calculation for
3583 * pos_cpy.y as the 180 degree rotation case below,
3584 * but use pos_cpy.x as our input because we are rotating
3585 * 270 degrees
3586 */
3587 if (pipe_split_on || odm_combine_on) {
3588 int pos_cpy_x_offset;
3589 int other_pipe_viewport_y;
3590
3591 if (pipe_split_on) {
3592 if (pipe_ctx->bottom_pipe) {
3593 other_pipe_viewport_y =
3594 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3595 } else {
3596 other_pipe_viewport_y =
3597 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3598 }
3599 } else {
3600 if (pipe_ctx->next_odm_pipe) {
3601 other_pipe_viewport_y =
3602 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3603 } else {
3604 other_pipe_viewport_y =
3605 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3606 }
3607 }
3608 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3609 other_pipe_viewport_y : viewport_y;
3610 pos_cpy.x -= pos_cpy_x_offset;
3611 if (pos_cpy.x > viewport_height) {
3612 pos_cpy.x = pos_cpy.x - viewport_height;
3613 pos_cpy.y = viewport_height - pos_cpy.x;
3614 } else {
3615 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3616 }
3617 pos_cpy.y += pos_cpy_x_offset;
3618 } else {
3619 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3620 }
3621 pos_cpy.x = temp_y;
3622 }
3623 // Mirror horizontally and vertically
3624 else if (param.rotation == ROTATION_ANGLE_180) {
3625 int viewport_width =
3626 pipe_ctx->plane_res.scl_data.viewport.width;
3627 int viewport_x =
3628 pipe_ctx->plane_res.scl_data.viewport.x;
3629
3630 if (!param.mirror) {
3631 if (pipe_split_on || odm_combine_on) {
3632 if (pos_cpy.x >= viewport_width + viewport_x) {
3633 pos_cpy.x = 2 * viewport_width
3634 - pos_cpy.x + 2 * viewport_x;
3635 } else {
3636 uint32_t temp_x = pos_cpy.x;
3637
3638 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3639 if (temp_x >= viewport_x +
3640 (int)hubp->curs_attr.width || pos_cpy.x
3641 <= (int)hubp->curs_attr.width +
3642 pipe_ctx->plane_state->src_rect.x) {
3643 pos_cpy.x = temp_x + viewport_width;
3644 }
3645 }
3646 } else {
3647 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3648 }
3649 }
3650
3651 /**
3652 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3653 * Calculation:
3654 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3655 * pos_cpy.y_new = viewport.y + delta_from_bottom
3656 * Simplify it as:
3657 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3658 */
3659 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3660 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3661 }
3662
3663 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3664 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3665 }
3666
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3667 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3668 {
3669 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3670
3671 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3672 pipe_ctx->plane_res.hubp, attributes);
3673 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3674 pipe_ctx->plane_res.dpp, attributes);
3675 }
3676
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3677 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3678 {
3679 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3680 struct fixed31_32 multiplier;
3681 struct dpp_cursor_attributes opt_attr = { 0 };
3682 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3683 struct custom_float_format fmt;
3684
3685 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3686 return;
3687
3688 fmt.exponenta_bits = 5;
3689 fmt.mantissa_bits = 10;
3690 fmt.sign = true;
3691
3692 if (sdr_white_level > 80) {
3693 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3694 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3695 }
3696
3697 opt_attr.scale = hw_scale;
3698 opt_attr.bias = 0;
3699
3700 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3701 pipe_ctx->plane_res.dpp, &opt_attr);
3702 }
3703
3704 /*
3705 * apply_front_porch_workaround TODO FPGA still need?
3706 *
3707 * This is a workaround for a bug that has existed since R5xx and has not been
3708 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3709 */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3710 static void apply_front_porch_workaround(
3711 struct dc_crtc_timing *timing)
3712 {
3713 if (timing->flags.INTERLACE == 1) {
3714 if (timing->v_front_porch < 2)
3715 timing->v_front_porch = 2;
3716 } else {
3717 if (timing->v_front_porch < 1)
3718 timing->v_front_porch = 1;
3719 }
3720 }
3721
dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3722 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3723 {
3724 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3725 struct dc_crtc_timing patched_crtc_timing;
3726 int vesa_sync_start;
3727 int asic_blank_end;
3728 int interlace_factor;
3729
3730 patched_crtc_timing = *dc_crtc_timing;
3731 apply_front_porch_workaround(&patched_crtc_timing);
3732
3733 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3734
3735 vesa_sync_start = patched_crtc_timing.v_addressable +
3736 patched_crtc_timing.v_border_bottom +
3737 patched_crtc_timing.v_front_porch;
3738
3739 asic_blank_end = (patched_crtc_timing.v_total -
3740 vesa_sync_start -
3741 patched_crtc_timing.v_border_top)
3742 * interlace_factor;
3743
3744 return asic_blank_end -
3745 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3746 }
3747
dcn10_calc_vupdate_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3748 void dcn10_calc_vupdate_position(
3749 struct dc *dc,
3750 struct pipe_ctx *pipe_ctx,
3751 uint32_t *start_line,
3752 uint32_t *end_line)
3753 {
3754 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3755 int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3756
3757 if (vupdate_pos >= 0)
3758 *start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3759 else
3760 *start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3761 *end_line = (*start_line + 2) % timing->v_total;
3762 }
3763
dcn10_cal_vline_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3764 static void dcn10_cal_vline_position(
3765 struct dc *dc,
3766 struct pipe_ctx *pipe_ctx,
3767 uint32_t *start_line,
3768 uint32_t *end_line)
3769 {
3770 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3771 int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3772
3773 if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3774 if (vline_pos > 0)
3775 vline_pos--;
3776 else if (vline_pos < 0)
3777 vline_pos++;
3778
3779 vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3780 if (vline_pos >= 0)
3781 *start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3782 else
3783 *start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3784 *end_line = (*start_line + 2) % timing->v_total;
3785 } else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3786 // vsync is line 0 so start_line is just the requested line offset
3787 *start_line = vline_pos;
3788 *end_line = (*start_line + 2) % timing->v_total;
3789 } else
3790 ASSERT(0);
3791 }
3792
dcn10_setup_periodic_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3793 void dcn10_setup_periodic_interrupt(
3794 struct dc *dc,
3795 struct pipe_ctx *pipe_ctx)
3796 {
3797 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3798 uint32_t start_line = 0;
3799 uint32_t end_line = 0;
3800
3801 dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3802
3803 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3804 }
3805
dcn10_setup_vupdate_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3806 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3807 {
3808 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3809 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3810
3811 if (start_line < 0) {
3812 ASSERT(0);
3813 start_line = 0;
3814 }
3815
3816 if (tg->funcs->setup_vertical_interrupt2)
3817 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3818 }
3819
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)3820 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3821 struct dc_link_settings *link_settings)
3822 {
3823 struct encoder_unblank_param params = {0};
3824 struct dc_stream_state *stream = pipe_ctx->stream;
3825 struct dc_link *link = stream->link;
3826 struct dce_hwseq *hws = link->dc->hwseq;
3827
3828 /* only 3 items below are used by unblank */
3829 params.timing = pipe_ctx->stream->timing;
3830
3831 params.link_settings.link_rate = link_settings->link_rate;
3832
3833 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3834 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3835 params.timing.pix_clk_100hz /= 2;
3836 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
3837 }
3838
3839 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3840 hws->funcs.edp_backlight_control(link, true);
3841 }
3842 }
3843
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)3844 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3845 const uint8_t *custom_sdp_message,
3846 unsigned int sdp_message_size)
3847 {
3848 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3849 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3850 pipe_ctx->stream_res.stream_enc,
3851 custom_sdp_message,
3852 sdp_message_size);
3853 }
3854 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3855 enum dc_status dcn10_set_clock(struct dc *dc,
3856 enum dc_clock_type clock_type,
3857 uint32_t clk_khz,
3858 uint32_t stepping)
3859 {
3860 struct dc_state *context = dc->current_state;
3861 struct dc_clock_config clock_cfg = {0};
3862 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3863
3864 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3865 return DC_FAIL_UNSUPPORTED_1;
3866
3867 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3868 context, clock_type, &clock_cfg);
3869
3870 if (clk_khz > clock_cfg.max_clock_khz)
3871 return DC_FAIL_CLK_EXCEED_MAX;
3872
3873 if (clk_khz < clock_cfg.min_clock_khz)
3874 return DC_FAIL_CLK_BELOW_MIN;
3875
3876 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3877 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3878
3879 /*update internal request clock for update clock use*/
3880 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3881 current_clocks->dispclk_khz = clk_khz;
3882 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3883 current_clocks->dppclk_khz = clk_khz;
3884 else
3885 return DC_ERROR_UNEXPECTED;
3886
3887 if (dc->clk_mgr->funcs->update_clocks)
3888 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3889 context, true);
3890 return DC_OK;
3891
3892 }
3893
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3894 void dcn10_get_clock(struct dc *dc,
3895 enum dc_clock_type clock_type,
3896 struct dc_clock_config *clock_cfg)
3897 {
3898 struct dc_state *context = dc->current_state;
3899
3900 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3901 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3902
3903 }
3904
dcn10_get_dcc_en_bits(struct dc * dc,int * dcc_en_bits)3905 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3906 {
3907 struct resource_pool *pool = dc->res_pool;
3908 int i;
3909
3910 for (i = 0; i < pool->pipe_count; i++) {
3911 struct hubp *hubp = pool->hubps[i];
3912 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3913
3914 hubp->funcs->hubp_read_state(hubp);
3915
3916 if (!s->blank_en)
3917 dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3918 }
3919 }
3920
3921 /**
3922 * dcn10_reset_surface_dcc_and_tiling - Set DCC and tiling in DCN to their disable mode.
3923 *
3924 * @pipe_ctx: Pointer to the pipe context structure.
3925 * @plane_state: Surface state
3926 * @clear_tiling: If true set tiling to Linear, otherwise does not change tiling
3927 *
3928 * This function is responsible for call the HUBP block to disable DCC and set
3929 * tiling to the linear mode.
3930 */
dcn10_reset_surface_dcc_and_tiling(struct pipe_ctx * pipe_ctx,struct dc_plane_state * plane_state,bool clear_tiling)3931 void dcn10_reset_surface_dcc_and_tiling(struct pipe_ctx *pipe_ctx,
3932 struct dc_plane_state *plane_state,
3933 bool clear_tiling)
3934 {
3935 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3936
3937 if (!hubp)
3938 return;
3939
3940 /* if framebuffer is tiled, disable tiling */
3941 if (clear_tiling && hubp->funcs->hubp_clear_tiling)
3942 hubp->funcs->hubp_clear_tiling(hubp);
3943
3944 /* force page flip to see the new content of the framebuffer */
3945 hubp->funcs->hubp_program_surface_flip_and_addr(hubp,
3946 &plane_state->address,
3947 true);
3948 }
3949