1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hwseq.h"
33 #include "dcn10/dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10/dcn10_optc.h"
38 #include "dcn10/dcn10_dpp.h"
39 #include "dcn10/dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10/dcn10_hubp.h"
46 #include "dcn10/dcn10_hubbub.h"
47 #include "dcn10/dcn10_cm_common.h"
48 #include "dccg.h"
49 #include "clk_mgr.h"
50 #include "link_hwss.h"
51 #include "dpcd_defs.h"
52 #include "dsc.h"
53 #include "dce/dmub_psr.h"
54 #include "dc_dmub_srv.h"
55 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dc_trace.h"
57 #include "dce/dmub_outbox.h"
58 #include "link.h"
59 #include "dc_state_priv.h"
60
61 #define DC_LOGGER \
62 dc_logger
63 #define DC_LOGGER_INIT(logger) \
64 struct dal_logger *dc_logger = logger
65
66 #define CTX \
67 hws->ctx
68 #define REG(reg)\
69 hws->regs->reg
70
71 #undef FN
72 #define FN(reg_name, field_name) \
73 hws->shifts->field_name, hws->masks->field_name
74
75 /*print is 17 wide, first two characters are spaces*/
76 #define DTN_INFO_MICRO_SEC(ref_cycle) \
77 print_microsec(dc_ctx, log_ctx, ref_cycle)
78
79 #define GAMMA_HW_POINTS_NUM 256
80
81 #define PGFSM_POWER_ON 0
82 #define PGFSM_POWER_OFF 2
83
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)84 static void print_microsec(struct dc_context *dc_ctx,
85 struct dc_log_buffer_ctx *log_ctx,
86 uint32_t ref_cycle)
87 {
88 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
89 static const unsigned int frac = 1000;
90 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
91
92 DTN_INFO(" %11d.%03d",
93 us_x10 / frac,
94 us_x10 % frac);
95 }
96
dcn10_lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)97 void dcn10_lock_all_pipes(struct dc *dc,
98 struct dc_state *context,
99 bool lock)
100 {
101 struct pipe_ctx *pipe_ctx;
102 struct pipe_ctx *old_pipe_ctx;
103 struct timing_generator *tg;
104 int i;
105
106 for (i = 0; i < dc->res_pool->pipe_count; i++) {
107 old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
108 pipe_ctx = &context->res_ctx.pipe_ctx[i];
109 tg = pipe_ctx->stream_res.tg;
110
111 /*
112 * Only lock the top pipe's tg to prevent redundant
113 * (un)locking. Also skip if pipe is disabled.
114 */
115 if (pipe_ctx->top_pipe ||
116 !pipe_ctx->stream ||
117 (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
118 !tg->funcs->is_tg_enabled(tg) ||
119 dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM)
120 continue;
121
122 if (lock)
123 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
124 else
125 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
126 }
127 }
128
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)129 static void log_mpc_crc(struct dc *dc,
130 struct dc_log_buffer_ctx *log_ctx)
131 {
132 struct dc_context *dc_ctx = dc->ctx;
133 struct dce_hwseq *hws = dc->hwseq;
134
135 if (REG(MPC_CRC_RESULT_GB))
136 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
137 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
138 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
139 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
140 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
141 }
142
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)143 static void dcn10_log_hubbub_state(struct dc *dc,
144 struct dc_log_buffer_ctx *log_ctx)
145 {
146 struct dc_context *dc_ctx = dc->ctx;
147 struct dcn_hubbub_wm wm;
148 int i;
149
150 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
151 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
152
153 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
154 " sr_enter sr_exit dram_clk_change\n");
155
156 for (i = 0; i < 4; i++) {
157 struct dcn_hubbub_wm_set *s;
158
159 s = &wm.sets[i];
160 DTN_INFO("WM_Set[%d]:", s->wm_set);
161 DTN_INFO_MICRO_SEC(s->data_urgent);
162 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
163 DTN_INFO_MICRO_SEC(s->sr_enter);
164 DTN_INFO_MICRO_SEC(s->sr_exit);
165 DTN_INFO_MICRO_SEC(s->dram_clk_change);
166 DTN_INFO("\n");
167 }
168
169 DTN_INFO("\n");
170 }
171
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)172 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
173 {
174 struct dc_context *dc_ctx = dc->ctx;
175 struct resource_pool *pool = dc->res_pool;
176 int i;
177
178 DTN_INFO(
179 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
180 for (i = 0; i < pool->pipe_count; i++) {
181 struct hubp *hubp = pool->hubps[i];
182 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
183
184 hubp->funcs->hubp_read_state(hubp);
185
186 if (!s->blank_en) {
187 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
188 hubp->inst,
189 s->pixel_format,
190 s->inuse_addr_hi,
191 s->viewport_width,
192 s->viewport_height,
193 s->rotation_angle,
194 s->h_mirror_en,
195 s->sw_mode,
196 s->dcc_en,
197 s->blank_en,
198 s->clock_en,
199 s->ttu_disable,
200 s->underflow_status);
201 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
202 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
203 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
204 DTN_INFO("\n");
205 }
206 }
207
208 DTN_INFO("\n=========RQ========\n");
209 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
210 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
211 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
212 for (i = 0; i < pool->pipe_count; i++) {
213 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
214 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
215
216 if (!s->blank_en)
217 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
218 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
219 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
220 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
221 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
222 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
223 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
224 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
225 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
226 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
227 }
228
229 DTN_INFO("========DLG========\n");
230 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
231 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
232 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
233 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
234 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
235 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
236 " x_rp_dlay x_rr_sfl\n");
237 for (i = 0; i < pool->pipe_count; i++) {
238 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
239 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
240
241 if (!s->blank_en)
242 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
243 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
244 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
245 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
246 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
247 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
248 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
249 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
250 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
251 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
252 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
253 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
254 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
255 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
256 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
257 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
258 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
259 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
260 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
261 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
262 dlg_regs->xfc_reg_remote_surface_flip_latency);
263 }
264
265 DTN_INFO("========TTU========\n");
266 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
267 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
268 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
269 for (i = 0; i < pool->pipe_count; i++) {
270 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
271 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
272
273 if (!s->blank_en)
274 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
275 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
276 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
277 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
278 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
279 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
280 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
281 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
282 }
283 DTN_INFO("\n");
284 }
285
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)286 void dcn10_log_hw_state(struct dc *dc,
287 struct dc_log_buffer_ctx *log_ctx)
288 {
289 struct dc_context *dc_ctx = dc->ctx;
290 struct resource_pool *pool = dc->res_pool;
291 int i;
292
293 DTN_INFO_BEGIN();
294
295 dcn10_log_hubbub_state(dc, log_ctx);
296
297 dcn10_log_hubp_states(dc, log_ctx);
298
299 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
300 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
301 "C31 C32 C33 C34\n");
302 for (i = 0; i < pool->pipe_count; i++) {
303 struct dpp *dpp = pool->dpps[i];
304 struct dcn_dpp_state s = {0};
305
306 dpp->funcs->dpp_read_state(dpp, &s);
307
308 if (!s.is_enabled)
309 continue;
310
311 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
312 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
313 dpp->inst,
314 s.igam_input_format,
315 (s.igam_lut_mode == 0) ? "BypassFixed" :
316 ((s.igam_lut_mode == 1) ? "BypassFloat" :
317 ((s.igam_lut_mode == 2) ? "RAM" :
318 ((s.igam_lut_mode == 3) ? "RAM" :
319 "Unknown"))),
320 (s.dgam_lut_mode == 0) ? "Bypass" :
321 ((s.dgam_lut_mode == 1) ? "sRGB" :
322 ((s.dgam_lut_mode == 2) ? "Ycc" :
323 ((s.dgam_lut_mode == 3) ? "RAM" :
324 ((s.dgam_lut_mode == 4) ? "RAM" :
325 "Unknown")))),
326 (s.rgam_lut_mode == 0) ? "Bypass" :
327 ((s.rgam_lut_mode == 1) ? "sRGB" :
328 ((s.rgam_lut_mode == 2) ? "Ycc" :
329 ((s.rgam_lut_mode == 3) ? "RAM" :
330 ((s.rgam_lut_mode == 4) ? "RAM" :
331 "Unknown")))),
332 s.gamut_remap_mode,
333 s.gamut_remap_c11_c12,
334 s.gamut_remap_c13_c14,
335 s.gamut_remap_c21_c22,
336 s.gamut_remap_c23_c24,
337 s.gamut_remap_c31_c32,
338 s.gamut_remap_c33_c34);
339 DTN_INFO("\n");
340 }
341 DTN_INFO("\n");
342
343 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
344 for (i = 0; i < pool->pipe_count; i++) {
345 struct mpcc_state s = {0};
346
347 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
348 if (s.opp_id != 0xf)
349 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
350 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
351 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
352 s.idle);
353 }
354 DTN_INFO("\n");
355
356 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
357
358 for (i = 0; i < pool->timing_generator_count; i++) {
359 struct timing_generator *tg = pool->timing_generators[i];
360 struct dcn_otg_state s = {0};
361 /* Read shared OTG state registers for all DCNx */
362 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
363
364 /*
365 * For DCN2 and greater, a register on the OPP is used to
366 * determine if the CRTC is blanked instead of the OTG. So use
367 * dpg_is_blanked() if exists, otherwise fallback on otg.
368 *
369 * TODO: Implement DCN-specific read_otg_state hooks.
370 */
371 if (pool->opps[i]->funcs->dpg_is_blanked)
372 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
373 else
374 s.blank_enabled = tg->funcs->is_blanked(tg);
375
376 //only print if OTG master is enabled
377 if ((s.otg_enabled & 1) == 0)
378 continue;
379
380 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
381 tg->inst,
382 s.v_blank_start,
383 s.v_blank_end,
384 s.v_sync_a_start,
385 s.v_sync_a_end,
386 s.v_sync_a_pol,
387 s.v_total_max,
388 s.v_total_min,
389 s.v_total_max_sel,
390 s.v_total_min_sel,
391 s.h_blank_start,
392 s.h_blank_end,
393 s.h_sync_a_start,
394 s.h_sync_a_end,
395 s.h_sync_a_pol,
396 s.h_total,
397 s.v_total,
398 s.underflow_occurred_status,
399 s.blank_enabled);
400
401 // Clear underflow for debug purposes
402 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
403 // This function is called only from Windows or Diags test environment, hence it's safe to clear
404 // it from here without affecting the original intent.
405 tg->funcs->clear_optc_underflow(tg);
406 }
407 DTN_INFO("\n");
408
409 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
410 // TODO: Update golden log header to reflect this name change
411 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
412 for (i = 0; i < pool->res_cap->num_dsc; i++) {
413 struct display_stream_compressor *dsc = pool->dscs[i];
414 struct dcn_dsc_state s = {0};
415
416 dsc->funcs->dsc_read_state(dsc, &s);
417 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
418 dsc->inst,
419 s.dsc_clock_en,
420 s.dsc_slice_width,
421 s.dsc_bits_per_pixel);
422 DTN_INFO("\n");
423 }
424 DTN_INFO("\n");
425
426 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
427 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
428 for (i = 0; i < pool->stream_enc_count; i++) {
429 struct stream_encoder *enc = pool->stream_enc[i];
430 struct enc_state s = {0};
431
432 if (enc->funcs->enc_read_state) {
433 enc->funcs->enc_read_state(enc, &s);
434 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
435 enc->id,
436 s.dsc_mode,
437 s.sec_gsp_pps_line_num,
438 s.vbid6_line_reference,
439 s.vbid6_line_num,
440 s.sec_gsp_pps_enable,
441 s.sec_stream_enable);
442 DTN_INFO("\n");
443 }
444 }
445 DTN_INFO("\n");
446
447 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
448 for (i = 0; i < dc->link_count; i++) {
449 struct link_encoder *lenc = dc->links[i]->link_enc;
450
451 struct link_enc_state s = {0};
452
453 if (lenc && lenc->funcs->read_state) {
454 lenc->funcs->read_state(lenc, &s);
455 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
456 i,
457 s.dphy_fec_en,
458 s.dphy_fec_ready_shadow,
459 s.dphy_fec_active_status,
460 s.dp_link_training_complete);
461 DTN_INFO("\n");
462 }
463 }
464 DTN_INFO("\n");
465
466 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
467 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
468 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
469 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
470 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
471 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
472 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
473 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
474 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
475
476 log_mpc_crc(dc, log_ctx);
477
478 {
479 if (pool->hpo_dp_stream_enc_count > 0) {
480 DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
481 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
482 struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
483 struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
484
485 if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
486 hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
487
488 DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
489 hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
490 hpo_dp_se_state.stream_enc_enabled,
491 hpo_dp_se_state.otg_inst,
492 (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
493 ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
494 (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
495 (hpo_dp_se_state.component_depth == 0) ? 6 :
496 ((hpo_dp_se_state.component_depth == 1) ? 8 :
497 (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
498 hpo_dp_se_state.vid_stream_enabled,
499 hpo_dp_se_state.sdp_enabled,
500 hpo_dp_se_state.compressed_format,
501 hpo_dp_se_state.mapped_to_link_enc);
502 }
503 }
504
505 DTN_INFO("\n");
506 }
507
508 /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
509 if (pool->hpo_dp_link_enc_count) {
510 DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
511
512 for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
513 struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
514 struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
515
516 if (hpo_dp_link_enc->funcs->read_state) {
517 hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
518 DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
519 hpo_dp_link_enc->inst,
520 hpo_dp_le_state.link_enc_enabled,
521 (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
522 (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
523 (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
524 hpo_dp_le_state.lane_count,
525 hpo_dp_le_state.stream_src[0],
526 hpo_dp_le_state.slot_count[0],
527 hpo_dp_le_state.vc_rate_x[0],
528 hpo_dp_le_state.vc_rate_y[0]);
529 DTN_INFO("\n");
530 }
531 }
532
533 DTN_INFO("\n");
534 }
535 }
536
537 DTN_INFO_END();
538 }
539
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)540 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
541 {
542 struct hubp *hubp = pipe_ctx->plane_res.hubp;
543 struct timing_generator *tg = pipe_ctx->stream_res.tg;
544
545 if (tg->funcs->is_optc_underflow_occurred(tg)) {
546 tg->funcs->clear_optc_underflow(tg);
547 return true;
548 }
549
550 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
551 hubp->funcs->hubp_clear_underflow(hubp);
552 return true;
553 }
554 return false;
555 }
556
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)557 void dcn10_enable_power_gating_plane(
558 struct dce_hwseq *hws,
559 bool enable)
560 {
561 bool force_on = true; /* disable power gating */
562
563 if (enable)
564 force_on = false;
565
566 /* DCHUBP0/1/2/3 */
567 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
568 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
569 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
570 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
571
572 /* DPP0/1/2/3 */
573 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
574 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
575 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
576 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
577 }
578
dcn10_disable_vga(struct dce_hwseq * hws)579 void dcn10_disable_vga(
580 struct dce_hwseq *hws)
581 {
582 unsigned int in_vga1_mode = 0;
583 unsigned int in_vga2_mode = 0;
584 unsigned int in_vga3_mode = 0;
585 unsigned int in_vga4_mode = 0;
586
587 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
588 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
589 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
590 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
591
592 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
593 in_vga3_mode == 0 && in_vga4_mode == 0)
594 return;
595
596 REG_WRITE(D1VGA_CONTROL, 0);
597 REG_WRITE(D2VGA_CONTROL, 0);
598 REG_WRITE(D3VGA_CONTROL, 0);
599 REG_WRITE(D4VGA_CONTROL, 0);
600
601 /* HW Engineer's Notes:
602 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
603 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
604 *
605 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
606 * VGA_TEST_ENABLE, to leave it in the same state as before.
607 */
608 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
609 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
610 }
611
612 /**
613 * dcn10_dpp_pg_control - DPP power gate control.
614 *
615 * @hws: dce_hwseq reference.
616 * @dpp_inst: DPP instance reference.
617 * @power_on: true if we want to enable power gate, false otherwise.
618 *
619 * Enable or disable power gate in the specific DPP instance.
620 */
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)621 void dcn10_dpp_pg_control(
622 struct dce_hwseq *hws,
623 unsigned int dpp_inst,
624 bool power_on)
625 {
626 uint32_t power_gate = power_on ? 0 : 1;
627 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
628
629 if (hws->ctx->dc->debug.disable_dpp_power_gate)
630 return;
631 if (REG(DOMAIN1_PG_CONFIG) == 0)
632 return;
633
634 switch (dpp_inst) {
635 case 0: /* DPP0 */
636 REG_UPDATE(DOMAIN1_PG_CONFIG,
637 DOMAIN1_POWER_GATE, power_gate);
638
639 REG_WAIT(DOMAIN1_PG_STATUS,
640 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
641 1, 1000);
642 break;
643 case 1: /* DPP1 */
644 REG_UPDATE(DOMAIN3_PG_CONFIG,
645 DOMAIN3_POWER_GATE, power_gate);
646
647 REG_WAIT(DOMAIN3_PG_STATUS,
648 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
649 1, 1000);
650 break;
651 case 2: /* DPP2 */
652 REG_UPDATE(DOMAIN5_PG_CONFIG,
653 DOMAIN5_POWER_GATE, power_gate);
654
655 REG_WAIT(DOMAIN5_PG_STATUS,
656 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
657 1, 1000);
658 break;
659 case 3: /* DPP3 */
660 REG_UPDATE(DOMAIN7_PG_CONFIG,
661 DOMAIN7_POWER_GATE, power_gate);
662
663 REG_WAIT(DOMAIN7_PG_STATUS,
664 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
665 1, 1000);
666 break;
667 default:
668 BREAK_TO_DEBUGGER();
669 break;
670 }
671 }
672
673 /**
674 * dcn10_hubp_pg_control - HUBP power gate control.
675 *
676 * @hws: dce_hwseq reference.
677 * @hubp_inst: DPP instance reference.
678 * @power_on: true if we want to enable power gate, false otherwise.
679 *
680 * Enable or disable power gate in the specific HUBP instance.
681 */
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)682 void dcn10_hubp_pg_control(
683 struct dce_hwseq *hws,
684 unsigned int hubp_inst,
685 bool power_on)
686 {
687 uint32_t power_gate = power_on ? 0 : 1;
688 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
689
690 if (hws->ctx->dc->debug.disable_hubp_power_gate)
691 return;
692 if (REG(DOMAIN0_PG_CONFIG) == 0)
693 return;
694
695 switch (hubp_inst) {
696 case 0: /* DCHUBP0 */
697 REG_UPDATE(DOMAIN0_PG_CONFIG,
698 DOMAIN0_POWER_GATE, power_gate);
699
700 REG_WAIT(DOMAIN0_PG_STATUS,
701 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
702 1, 1000);
703 break;
704 case 1: /* DCHUBP1 */
705 REG_UPDATE(DOMAIN2_PG_CONFIG,
706 DOMAIN2_POWER_GATE, power_gate);
707
708 REG_WAIT(DOMAIN2_PG_STATUS,
709 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
710 1, 1000);
711 break;
712 case 2: /* DCHUBP2 */
713 REG_UPDATE(DOMAIN4_PG_CONFIG,
714 DOMAIN4_POWER_GATE, power_gate);
715
716 REG_WAIT(DOMAIN4_PG_STATUS,
717 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
718 1, 1000);
719 break;
720 case 3: /* DCHUBP3 */
721 REG_UPDATE(DOMAIN6_PG_CONFIG,
722 DOMAIN6_POWER_GATE, power_gate);
723
724 REG_WAIT(DOMAIN6_PG_STATUS,
725 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
726 1, 1000);
727 break;
728 default:
729 BREAK_TO_DEBUGGER();
730 break;
731 }
732 }
733
power_on_plane_resources(struct dce_hwseq * hws,int plane_id)734 static void power_on_plane_resources(
735 struct dce_hwseq *hws,
736 int plane_id)
737 {
738 DC_LOGGER_INIT(hws->ctx->logger);
739
740 if (hws->funcs.dpp_root_clock_control)
741 hws->funcs.dpp_root_clock_control(hws, plane_id, true);
742
743 if (REG(DC_IP_REQUEST_CNTL)) {
744 REG_SET(DC_IP_REQUEST_CNTL, 0,
745 IP_REQUEST_EN, 1);
746
747 if (hws->funcs.dpp_pg_control)
748 hws->funcs.dpp_pg_control(hws, plane_id, true);
749
750 if (hws->funcs.hubp_pg_control)
751 hws->funcs.hubp_pg_control(hws, plane_id, true);
752
753 REG_SET(DC_IP_REQUEST_CNTL, 0,
754 IP_REQUEST_EN, 0);
755 DC_LOG_DEBUG(
756 "Un-gated front end for pipe %d\n", plane_id);
757 }
758 }
759
undo_DEGVIDCN10_253_wa(struct dc * dc)760 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
761 {
762 struct dce_hwseq *hws = dc->hwseq;
763 struct hubp *hubp = dc->res_pool->hubps[0];
764
765 if (!hws->wa_state.DEGVIDCN10_253_applied)
766 return;
767
768 hubp->funcs->set_blank(hubp, true);
769
770 REG_SET(DC_IP_REQUEST_CNTL, 0,
771 IP_REQUEST_EN, 1);
772
773 hws->funcs.hubp_pg_control(hws, 0, false);
774 REG_SET(DC_IP_REQUEST_CNTL, 0,
775 IP_REQUEST_EN, 0);
776
777 hws->wa_state.DEGVIDCN10_253_applied = false;
778 }
779
apply_DEGVIDCN10_253_wa(struct dc * dc)780 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
781 {
782 struct dce_hwseq *hws = dc->hwseq;
783 struct hubp *hubp = dc->res_pool->hubps[0];
784 int i;
785
786 if (dc->debug.disable_stutter)
787 return;
788
789 if (!hws->wa.DEGVIDCN10_253)
790 return;
791
792 for (i = 0; i < dc->res_pool->pipe_count; i++) {
793 if (!dc->res_pool->hubps[i]->power_gated)
794 return;
795 }
796
797 /* all pipe power gated, apply work around to enable stutter. */
798
799 REG_SET(DC_IP_REQUEST_CNTL, 0,
800 IP_REQUEST_EN, 1);
801
802 hws->funcs.hubp_pg_control(hws, 0, true);
803 REG_SET(DC_IP_REQUEST_CNTL, 0,
804 IP_REQUEST_EN, 0);
805
806 hubp->funcs->set_hubp_blank_en(hubp, false);
807 hws->wa_state.DEGVIDCN10_253_applied = true;
808 }
809
dcn10_bios_golden_init(struct dc * dc)810 void dcn10_bios_golden_init(struct dc *dc)
811 {
812 struct dce_hwseq *hws = dc->hwseq;
813 struct dc_bios *bp = dc->ctx->dc_bios;
814 int i;
815 bool allow_self_fresh_force_enable = true;
816
817 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
818 return;
819
820 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
821 allow_self_fresh_force_enable =
822 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
823
824
825 /* WA for making DF sleep when idle after resume from S0i3.
826 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
827 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
828 * before calling command table and it changed to 1 after,
829 * it should be set back to 0.
830 */
831
832 /* initialize dcn global */
833 bp->funcs->enable_disp_power_gating(bp,
834 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
835
836 for (i = 0; i < dc->res_pool->pipe_count; i++) {
837 /* initialize dcn per pipe */
838 bp->funcs->enable_disp_power_gating(bp,
839 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
840 }
841
842 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
843 if (allow_self_fresh_force_enable == false &&
844 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
845 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
846 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
847
848 }
849
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)850 static void false_optc_underflow_wa(
851 struct dc *dc,
852 const struct dc_stream_state *stream,
853 struct timing_generator *tg)
854 {
855 int i;
856 bool underflow;
857
858 if (!dc->hwseq->wa.false_optc_underflow)
859 return;
860
861 underflow = tg->funcs->is_optc_underflow_occurred(tg);
862
863 for (i = 0; i < dc->res_pool->pipe_count; i++) {
864 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
865
866 if (old_pipe_ctx->stream != stream)
867 continue;
868
869 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
870 }
871
872 if (tg->funcs->set_blank_data_double_buffer)
873 tg->funcs->set_blank_data_double_buffer(tg, true);
874
875 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
876 tg->funcs->clear_optc_underflow(tg);
877 }
878
calculate_vready_offset_for_group(struct pipe_ctx * pipe)879 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
880 {
881 struct pipe_ctx *other_pipe;
882 int vready_offset = pipe->pipe_dlg_param.vready_offset;
883
884 /* Always use the largest vready_offset of all connected pipes */
885 for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
886 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
887 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
888 }
889 for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
890 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
891 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
892 }
893 for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
894 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
895 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
896 }
897 for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
898 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
899 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
900 }
901
902 return vready_offset;
903 }
904
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)905 enum dc_status dcn10_enable_stream_timing(
906 struct pipe_ctx *pipe_ctx,
907 struct dc_state *context,
908 struct dc *dc)
909 {
910 struct dc_stream_state *stream = pipe_ctx->stream;
911 enum dc_color_space color_space;
912 struct tg_color black_color = {0};
913
914 /* by upper caller loop, pipe0 is parent pipe and be called first.
915 * back end is set up by for pipe0. Other children pipe share back end
916 * with pipe 0. No program is needed.
917 */
918 if (pipe_ctx->top_pipe != NULL)
919 return DC_OK;
920
921 /* TODO check if timing_changed, disable stream if timing changed */
922
923 /* HW program guide assume display already disable
924 * by unplug sequence. OTG assume stop.
925 */
926 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
927
928 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
929 pipe_ctx->clock_source,
930 &pipe_ctx->stream_res.pix_clk_params,
931 dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
932 &pipe_ctx->pll_settings)) {
933 BREAK_TO_DEBUGGER();
934 return DC_ERROR_UNEXPECTED;
935 }
936
937 if (dc_is_hdmi_tmds_signal(stream->signal)) {
938 stream->link->phy_state.symclk_ref_cnts.otg = 1;
939 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
940 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
941 else
942 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
943 }
944
945 pipe_ctx->stream_res.tg->funcs->program_timing(
946 pipe_ctx->stream_res.tg,
947 &stream->timing,
948 calculate_vready_offset_for_group(pipe_ctx),
949 pipe_ctx->pipe_dlg_param.vstartup_start,
950 pipe_ctx->pipe_dlg_param.vupdate_offset,
951 pipe_ctx->pipe_dlg_param.vupdate_width,
952 pipe_ctx->stream->signal,
953 true);
954
955 #if 0 /* move to after enable_crtc */
956 /* TODO: OPP FMT, ABM. etc. should be done here. */
957 /* or FPGA now. instance 0 only. TODO: move to opp.c */
958
959 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
960
961 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
962 pipe_ctx->stream_res.opp,
963 &stream->bit_depth_params,
964 &stream->clamping);
965 #endif
966 /* program otg blank color */
967 color_space = stream->output_color_space;
968 color_space_to_black_color(dc, color_space, &black_color);
969
970 /*
971 * The way 420 is packed, 2 channels carry Y component, 1 channel
972 * alternate between Cb and Cr, so both channels need the pixel
973 * value for Y
974 */
975 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
976 black_color.color_r_cr = black_color.color_g_y;
977
978 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
979 pipe_ctx->stream_res.tg->funcs->set_blank_color(
980 pipe_ctx->stream_res.tg,
981 &black_color);
982
983 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
984 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
985 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
986 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
987 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
988 }
989
990 /* VTG is within DCHUB command block. DCFCLK is always on */
991 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
992 BREAK_TO_DEBUGGER();
993 return DC_ERROR_UNEXPECTED;
994 }
995
996 /* TODO program crtc source select for non-virtual signal*/
997 /* TODO program FMT */
998 /* TODO setup link_enc */
999 /* TODO set stream attributes */
1000 /* TODO program audio */
1001 /* TODO enable stream if timing changed */
1002 /* TODO unblank stream if DP */
1003
1004 return DC_OK;
1005 }
1006
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1007 static void dcn10_reset_back_end_for_pipe(
1008 struct dc *dc,
1009 struct pipe_ctx *pipe_ctx,
1010 struct dc_state *context)
1011 {
1012 int i;
1013 struct dc_link *link;
1014 DC_LOGGER_INIT(dc->ctx->logger);
1015 if (pipe_ctx->stream_res.stream_enc == NULL) {
1016 pipe_ctx->stream = NULL;
1017 return;
1018 }
1019
1020 link = pipe_ctx->stream->link;
1021 /* DPMS may already disable or */
1022 /* dpms_off status is incorrect due to fastboot
1023 * feature. When system resume from S4 with second
1024 * screen only, the dpms_off would be true but
1025 * VBIOS lit up eDP, so check link status too.
1026 */
1027 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1028 dc->link_srv->set_dpms_off(pipe_ctx);
1029 else if (pipe_ctx->stream_res.audio)
1030 dc->hwss.disable_audio_stream(pipe_ctx);
1031
1032 if (pipe_ctx->stream_res.audio) {
1033 /*disable az_endpoint*/
1034 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1035
1036 /*free audio*/
1037 if (dc->caps.dynamic_audio == true) {
1038 /*we have to dynamic arbitrate the audio endpoints*/
1039 /*we free the resource, need reset is_audio_acquired*/
1040 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1041 pipe_ctx->stream_res.audio, false);
1042 pipe_ctx->stream_res.audio = NULL;
1043 }
1044 }
1045
1046 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1047 * back end share by all pipes and will be disable only when disable
1048 * parent pipe.
1049 */
1050 if (pipe_ctx->top_pipe == NULL) {
1051
1052 if (pipe_ctx->stream_res.abm)
1053 dc->hwss.set_abm_immediate_disable(pipe_ctx);
1054
1055 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1056
1057 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1058 if (pipe_ctx->stream_res.tg->funcs->set_drr)
1059 pipe_ctx->stream_res.tg->funcs->set_drr(
1060 pipe_ctx->stream_res.tg, NULL);
1061 if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
1062 pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1063 }
1064
1065 for (i = 0; i < dc->res_pool->pipe_count; i++)
1066 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1067 break;
1068
1069 if (i == dc->res_pool->pipe_count)
1070 return;
1071
1072 pipe_ctx->stream = NULL;
1073 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1074 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1075 }
1076
dcn10_hw_wa_force_recovery(struct dc * dc)1077 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1078 {
1079 struct hubp *hubp ;
1080 unsigned int i;
1081 bool need_recover = true;
1082
1083 if (!dc->debug.recovery_enabled)
1084 return false;
1085
1086 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1087 struct pipe_ctx *pipe_ctx =
1088 &dc->current_state->res_ctx.pipe_ctx[i];
1089 if (pipe_ctx != NULL) {
1090 hubp = pipe_ctx->plane_res.hubp;
1091 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1092 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1093 /* one pipe underflow, we will reset all the pipes*/
1094 need_recover = true;
1095 }
1096 }
1097 }
1098 }
1099 if (!need_recover)
1100 return false;
1101 /*
1102 DCHUBP_CNTL:HUBP_BLANK_EN=1
1103 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1104 DCHUBP_CNTL:HUBP_DISABLE=1
1105 DCHUBP_CNTL:HUBP_DISABLE=0
1106 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1107 DCSURF_PRIMARY_SURFACE_ADDRESS
1108 DCHUBP_CNTL:HUBP_BLANK_EN=0
1109 */
1110
1111 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1112 struct pipe_ctx *pipe_ctx =
1113 &dc->current_state->res_ctx.pipe_ctx[i];
1114 if (pipe_ctx != NULL) {
1115 hubp = pipe_ctx->plane_res.hubp;
1116 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1117 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1118 hubp->funcs->set_hubp_blank_en(hubp, true);
1119 }
1120 }
1121 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1122 hubbub1_soft_reset(dc->res_pool->hubbub, true);
1123
1124 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1125 struct pipe_ctx *pipe_ctx =
1126 &dc->current_state->res_ctx.pipe_ctx[i];
1127 if (pipe_ctx != NULL) {
1128 hubp = pipe_ctx->plane_res.hubp;
1129 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1130 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1131 hubp->funcs->hubp_disable_control(hubp, true);
1132 }
1133 }
1134 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1135 struct pipe_ctx *pipe_ctx =
1136 &dc->current_state->res_ctx.pipe_ctx[i];
1137 if (pipe_ctx != NULL) {
1138 hubp = pipe_ctx->plane_res.hubp;
1139 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1140 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1141 hubp->funcs->hubp_disable_control(hubp, true);
1142 }
1143 }
1144 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1145 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1146 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1147 struct pipe_ctx *pipe_ctx =
1148 &dc->current_state->res_ctx.pipe_ctx[i];
1149 if (pipe_ctx != NULL) {
1150 hubp = pipe_ctx->plane_res.hubp;
1151 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1152 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1153 hubp->funcs->set_hubp_blank_en(hubp, true);
1154 }
1155 }
1156 return true;
1157
1158 }
1159
dcn10_verify_allow_pstate_change_high(struct dc * dc)1160 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1161 {
1162 struct hubbub *hubbub = dc->res_pool->hubbub;
1163 static bool should_log_hw_state; /* prevent hw state log by default */
1164
1165 if (!hubbub->funcs->verify_allow_pstate_change_high)
1166 return;
1167
1168 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1169 int i = 0;
1170
1171 if (should_log_hw_state)
1172 dcn10_log_hw_state(dc, NULL);
1173
1174 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1175 BREAK_TO_DEBUGGER();
1176 if (dcn10_hw_wa_force_recovery(dc)) {
1177 /*check again*/
1178 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1179 BREAK_TO_DEBUGGER();
1180 }
1181 }
1182 }
1183
1184 /* trigger HW to start disconnect plane from stream on the next vsync */
dcn10_plane_atomic_disconnect(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)1185 void dcn10_plane_atomic_disconnect(struct dc *dc,
1186 struct dc_state *state,
1187 struct pipe_ctx *pipe_ctx)
1188 {
1189 struct dce_hwseq *hws = dc->hwseq;
1190 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1191 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1192 struct mpc *mpc = dc->res_pool->mpc;
1193 struct mpc_tree *mpc_tree_params;
1194 struct mpcc *mpcc_to_remove = NULL;
1195 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1196
1197 mpc_tree_params = &(opp->mpc_tree_params);
1198 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1199
1200 /*Already reset*/
1201 if (mpcc_to_remove == NULL)
1202 return;
1203
1204 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1205 // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1206 // so don't wait for MPCC_IDLE in the programming sequence
1207 if (opp != NULL && dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM)
1208 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1209
1210 dc->optimized_required = true;
1211
1212 if (hubp->funcs->hubp_disconnect)
1213 hubp->funcs->hubp_disconnect(hubp);
1214
1215 if (dc->debug.sanity_checks)
1216 hws->funcs.verify_allow_pstate_change_high(dc);
1217 }
1218
1219 /**
1220 * dcn10_plane_atomic_power_down - Power down plane components.
1221 *
1222 * @dc: dc struct reference. used for grab hwseq.
1223 * @dpp: dpp struct reference.
1224 * @hubp: hubp struct reference.
1225 *
1226 * Keep in mind that this operation requires a power gate configuration;
1227 * however, requests for switch power gate are precisely controlled to avoid
1228 * problems. For this reason, power gate request is usually disabled. This
1229 * function first needs to enable the power gate request before disabling DPP
1230 * and HUBP. Finally, it disables the power gate request again.
1231 */
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1232 void dcn10_plane_atomic_power_down(struct dc *dc,
1233 struct dpp *dpp,
1234 struct hubp *hubp)
1235 {
1236 struct dce_hwseq *hws = dc->hwseq;
1237 DC_LOGGER_INIT(dc->ctx->logger);
1238
1239 if (REG(DC_IP_REQUEST_CNTL)) {
1240 REG_SET(DC_IP_REQUEST_CNTL, 0,
1241 IP_REQUEST_EN, 1);
1242
1243 if (hws->funcs.dpp_pg_control)
1244 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1245
1246 if (hws->funcs.hubp_pg_control)
1247 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1248
1249 dpp->funcs->dpp_reset(dpp);
1250
1251 REG_SET(DC_IP_REQUEST_CNTL, 0,
1252 IP_REQUEST_EN, 0);
1253 DC_LOG_DEBUG(
1254 "Power gated front end %d\n", hubp->inst);
1255 }
1256
1257 if (hws->funcs.dpp_root_clock_control)
1258 hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
1259 }
1260
1261 /* disable HW used by plane.
1262 * note: cannot disable until disconnect is complete
1263 */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1264 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1265 {
1266 struct dce_hwseq *hws = dc->hwseq;
1267 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1268 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1269 int opp_id = hubp->opp_id;
1270
1271 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1272
1273 hubp->funcs->hubp_clk_cntl(hubp, false);
1274
1275 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1276
1277 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1278 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1279 pipe_ctx->stream_res.opp,
1280 false);
1281
1282 hubp->power_gated = true;
1283 dc->optimized_required = false; /* We're powering off, no need to optimize */
1284
1285 hws->funcs.plane_atomic_power_down(dc,
1286 pipe_ctx->plane_res.dpp,
1287 pipe_ctx->plane_res.hubp);
1288
1289 pipe_ctx->stream = NULL;
1290 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1291 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1292 pipe_ctx->top_pipe = NULL;
1293 pipe_ctx->bottom_pipe = NULL;
1294 pipe_ctx->plane_state = NULL;
1295 }
1296
dcn10_disable_plane(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)1297 void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
1298 {
1299 struct dce_hwseq *hws = dc->hwseq;
1300 DC_LOGGER_INIT(dc->ctx->logger);
1301
1302 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1303 return;
1304
1305 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1306
1307 apply_DEGVIDCN10_253_wa(dc);
1308
1309 DC_LOG_DC("Power down front end %d\n",
1310 pipe_ctx->pipe_idx);
1311 }
1312
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1313 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1314 {
1315 int i;
1316 struct dce_hwseq *hws = dc->hwseq;
1317 struct hubbub *hubbub = dc->res_pool->hubbub;
1318 bool can_apply_seamless_boot = false;
1319
1320 for (i = 0; i < context->stream_count; i++) {
1321 if (context->streams[i]->apply_seamless_boot_optimization) {
1322 can_apply_seamless_boot = true;
1323 break;
1324 }
1325 }
1326
1327 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1328 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1329 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1330
1331 /* There is assumption that pipe_ctx is not mapping irregularly
1332 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1333 * we will use the pipe, so don't disable
1334 */
1335 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1336 continue;
1337
1338 /* Blank controller using driver code instead of
1339 * command table.
1340 */
1341 if (tg->funcs->is_tg_enabled(tg)) {
1342 if (hws->funcs.init_blank != NULL) {
1343 hws->funcs.init_blank(dc, tg);
1344 tg->funcs->lock(tg);
1345 } else {
1346 tg->funcs->lock(tg);
1347 tg->funcs->set_blank(tg, true);
1348 hwss_wait_for_blank_complete(tg);
1349 }
1350 }
1351 }
1352
1353 /* Reset det size */
1354 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1355 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1356 struct hubp *hubp = dc->res_pool->hubps[i];
1357
1358 /* Do not need to reset for seamless boot */
1359 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1360 continue;
1361
1362 if (hubbub && hubp) {
1363 if (hubbub->funcs->program_det_size)
1364 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1365 }
1366 }
1367
1368 /* num_opp will be equal to number of mpcc */
1369 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1370 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1371
1372 /* Cannot reset the MPC mux if seamless boot */
1373 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1374 continue;
1375
1376 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1377 dc->res_pool->mpc, i);
1378 }
1379
1380 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1381 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1382 struct hubp *hubp = dc->res_pool->hubps[i];
1383 struct dpp *dpp = dc->res_pool->dpps[i];
1384 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1385
1386 /* There is assumption that pipe_ctx is not mapping irregularly
1387 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1388 * we will use the pipe, so don't disable
1389 */
1390 if (can_apply_seamless_boot &&
1391 pipe_ctx->stream != NULL &&
1392 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1393 pipe_ctx->stream_res.tg)) {
1394 // Enable double buffering for OTG_BLANK no matter if
1395 // seamless boot is enabled or not to suppress global sync
1396 // signals when OTG blanked. This is to prevent pipe from
1397 // requesting data while in PSR.
1398 tg->funcs->tg_init(tg);
1399 hubp->power_gated = true;
1400 continue;
1401 }
1402
1403 /* Disable on the current state so the new one isn't cleared. */
1404 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1405
1406 dpp->funcs->dpp_reset(dpp);
1407
1408 pipe_ctx->stream_res.tg = tg;
1409 pipe_ctx->pipe_idx = i;
1410
1411 pipe_ctx->plane_res.hubp = hubp;
1412 pipe_ctx->plane_res.dpp = dpp;
1413 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1414 hubp->mpcc_id = dpp->inst;
1415 hubp->opp_id = OPP_ID_INVALID;
1416 hubp->power_gated = false;
1417
1418 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1419 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1420 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1421 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1422
1423 hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
1424
1425 if (tg->funcs->is_tg_enabled(tg))
1426 tg->funcs->unlock(tg);
1427
1428 dc->hwss.disable_plane(dc, context, pipe_ctx);
1429
1430 pipe_ctx->stream_res.tg = NULL;
1431 pipe_ctx->plane_res.hubp = NULL;
1432
1433 if (tg->funcs->is_tg_enabled(tg)) {
1434 if (tg->funcs->init_odm)
1435 tg->funcs->init_odm(tg);
1436 }
1437
1438 tg->funcs->tg_init(tg);
1439 }
1440
1441 /* Power gate DSCs */
1442 if (hws->funcs.dsc_pg_control != NULL) {
1443 uint32_t num_opps = 0;
1444 uint32_t opp_id_src0 = OPP_ID_INVALID;
1445 uint32_t opp_id_src1 = OPP_ID_INVALID;
1446
1447 // Step 1: To find out which OPTC is running & OPTC DSC is ON
1448 // We can't use res_pool->res_cap->num_timing_generator to check
1449 // Because it records display pipes default setting built in driver,
1450 // not display pipes of the current chip.
1451 // Some ASICs would be fused display pipes less than the default setting.
1452 // In dcnxx_resource_construct function, driver would obatin real information.
1453 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1454 uint32_t optc_dsc_state = 0;
1455 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1456
1457 if (tg->funcs->is_tg_enabled(tg)) {
1458 if (tg->funcs->get_dsc_status)
1459 tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1460 // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1461 // non-zero value is DSC enabled
1462 if (optc_dsc_state != 0) {
1463 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1464 break;
1465 }
1466 }
1467 }
1468
1469 // Step 2: To power down DSC but skip DSC of running OPTC
1470 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1471 struct dcn_dsc_state s = {0};
1472
1473 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1474
1475 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1476 s.dsc_clock_en && s.dsc_fw_en)
1477 continue;
1478
1479 hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1480 }
1481 }
1482 }
1483
dcn10_init_hw(struct dc * dc)1484 void dcn10_init_hw(struct dc *dc)
1485 {
1486 int i;
1487 struct abm *abm = dc->res_pool->abm;
1488 struct dmcu *dmcu = dc->res_pool->dmcu;
1489 struct dce_hwseq *hws = dc->hwseq;
1490 struct dc_bios *dcb = dc->ctx->dc_bios;
1491 struct resource_pool *res_pool = dc->res_pool;
1492 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1493 uint32_t user_level = MAX_BACKLIGHT_LEVEL;
1494 bool is_optimized_init_done = false;
1495
1496 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1497 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1498
1499 /* Align bw context with hw config when system resume. */
1500 if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1501 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1502 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1503 }
1504
1505 // Initialize the dccg
1506 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1507 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1508
1509 if (!dcb->funcs->is_accelerated_mode(dcb))
1510 hws->funcs.disable_vga(dc->hwseq);
1511
1512 if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
1513 hws->funcs.bios_golden_init(dc);
1514
1515
1516 if (dc->ctx->dc_bios->fw_info_valid) {
1517 res_pool->ref_clocks.xtalin_clock_inKhz =
1518 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1519
1520 if (res_pool->dccg && res_pool->hubbub) {
1521
1522 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1523 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1524 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1525
1526 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1527 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1528 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1529 } else {
1530 // Not all ASICs have DCCG sw component
1531 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1532 res_pool->ref_clocks.xtalin_clock_inKhz;
1533 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1534 res_pool->ref_clocks.xtalin_clock_inKhz;
1535 }
1536 } else
1537 ASSERT_CRITICAL(false);
1538
1539 for (i = 0; i < dc->link_count; i++) {
1540 /* Power up AND update implementation according to the
1541 * required signal (which may be different from the
1542 * default signal on connector).
1543 */
1544 struct dc_link *link = dc->links[i];
1545
1546 if (!is_optimized_init_done)
1547 link->link_enc->funcs->hw_init(link->link_enc);
1548
1549 /* Check for enabled DIG to identify enabled display */
1550 if (link->link_enc->funcs->is_dig_enabled &&
1551 link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1552 link->link_status.link_active = true;
1553 if (link->link_enc->funcs->fec_is_active &&
1554 link->link_enc->funcs->fec_is_active(link->link_enc))
1555 link->fec_state = dc_link_fec_enabled;
1556 }
1557 }
1558
1559 /* we want to turn off all dp displays before doing detection */
1560 dc->link_srv->blank_all_dp_displays(dc);
1561
1562 if (hws->funcs.enable_power_gating_plane)
1563 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1564
1565 /* If taking control over from VBIOS, we may want to optimize our first
1566 * mode set, so we need to skip powering down pipes until we know which
1567 * pipes we want to use.
1568 * Otherwise, if taking control is not possible, we need to power
1569 * everything down.
1570 */
1571 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1572 if (!is_optimized_init_done) {
1573 hws->funcs.init_pipes(dc, dc->current_state);
1574 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1575 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1576 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1577 }
1578 }
1579
1580 if (!is_optimized_init_done) {
1581
1582 for (i = 0; i < res_pool->audio_count; i++) {
1583 struct audio *audio = res_pool->audios[i];
1584
1585 audio->funcs->hw_init(audio);
1586 }
1587
1588 for (i = 0; i < dc->link_count; i++) {
1589 struct dc_link *link = dc->links[i];
1590
1591 if (link->panel_cntl) {
1592 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1593 user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
1594 }
1595 }
1596
1597 if (abm != NULL)
1598 abm->funcs->abm_init(abm, backlight, user_level);
1599
1600 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1601 dmcu->funcs->dmcu_init(dmcu);
1602 }
1603
1604 if (abm != NULL && dmcu != NULL)
1605 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1606
1607 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1608 if (!is_optimized_init_done)
1609 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1610
1611 if (!dc->debug.disable_clock_gate) {
1612 /* enable all DCN clock gating */
1613 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1614
1615 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1616
1617 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1618 }
1619
1620 if (dc->clk_mgr->funcs->notify_wm_ranges)
1621 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1622 }
1623
1624 /* In headless boot cases, DIG may be turned
1625 * on which causes HW/SW discrepancies.
1626 * To avoid this, power down hardware on boot
1627 * if DIG is turned on
1628 */
dcn10_power_down_on_boot(struct dc * dc)1629 void dcn10_power_down_on_boot(struct dc *dc)
1630 {
1631 struct dc_link *edp_links[MAX_NUM_EDP];
1632 struct dc_link *edp_link = NULL;
1633 int edp_num;
1634 int i = 0;
1635
1636 dc_get_edp_links(dc, edp_links, &edp_num);
1637 if (edp_num)
1638 edp_link = edp_links[0];
1639
1640 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1641 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1642 dc->hwseq->funcs.edp_backlight_control &&
1643 dc->hwss.power_down &&
1644 dc->hwss.edp_power_control) {
1645 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1646 dc->hwss.power_down(dc);
1647 dc->hwss.edp_power_control(edp_link, false);
1648 } else {
1649 for (i = 0; i < dc->link_count; i++) {
1650 struct dc_link *link = dc->links[i];
1651
1652 if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1653 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1654 dc->hwss.power_down) {
1655 dc->hwss.power_down(dc);
1656 break;
1657 }
1658
1659 }
1660 }
1661
1662 /*
1663 * Call update_clocks with empty context
1664 * to send DISPLAY_OFF
1665 * Otherwise DISPLAY_OFF may not be asserted
1666 */
1667 if (dc->clk_mgr->funcs->set_low_power_state)
1668 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1669 }
1670
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1671 void dcn10_reset_hw_ctx_wrap(
1672 struct dc *dc,
1673 struct dc_state *context)
1674 {
1675 int i;
1676 struct dce_hwseq *hws = dc->hwseq;
1677
1678 /* Reset Back End*/
1679 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1680 struct pipe_ctx *pipe_ctx_old =
1681 &dc->current_state->res_ctx.pipe_ctx[i];
1682 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1683
1684 if (!pipe_ctx_old->stream)
1685 continue;
1686
1687 if (pipe_ctx_old->top_pipe)
1688 continue;
1689
1690 if (!pipe_ctx->stream ||
1691 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1692 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1693
1694 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1695 if (hws->funcs.enable_stream_gating)
1696 hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1697 if (old_clk)
1698 old_clk->funcs->cs_power_down(old_clk);
1699 }
1700 }
1701 }
1702
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1703 static bool patch_address_for_sbs_tb_stereo(
1704 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1705 {
1706 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1707 bool sec_split = pipe_ctx->top_pipe &&
1708 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1709 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1710 (pipe_ctx->stream->timing.timing_3d_format ==
1711 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1712 pipe_ctx->stream->timing.timing_3d_format ==
1713 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1714 *addr = plane_state->address.grph_stereo.left_addr;
1715 plane_state->address.grph_stereo.left_addr =
1716 plane_state->address.grph_stereo.right_addr;
1717 return true;
1718 } else {
1719 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1720 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1721 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1722 plane_state->address.grph_stereo.right_addr =
1723 plane_state->address.grph_stereo.left_addr;
1724 plane_state->address.grph_stereo.right_meta_addr =
1725 plane_state->address.grph_stereo.left_meta_addr;
1726 }
1727 }
1728 return false;
1729 }
1730
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)1731 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1732 {
1733 bool addr_patched = false;
1734 PHYSICAL_ADDRESS_LOC addr;
1735 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1736
1737 if (plane_state == NULL)
1738 return;
1739
1740 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1741
1742 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1743 pipe_ctx->plane_res.hubp,
1744 &plane_state->address,
1745 plane_state->flip_immediate);
1746
1747 plane_state->status.requested_address = plane_state->address;
1748
1749 if (plane_state->flip_immediate)
1750 plane_state->status.current_address = plane_state->address;
1751
1752 if (addr_patched)
1753 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1754 }
1755
dcn10_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)1756 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1757 const struct dc_plane_state *plane_state)
1758 {
1759 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1760 const struct dc_transfer_func *tf = NULL;
1761 bool result = true;
1762
1763 if (dpp_base == NULL)
1764 return false;
1765
1766 if (plane_state->in_transfer_func)
1767 tf = plane_state->in_transfer_func;
1768
1769 if (plane_state->gamma_correction &&
1770 !dpp_base->ctx->dc->debug.always_use_regamma
1771 && !plane_state->gamma_correction->is_identity
1772 && dce_use_lut(plane_state->format))
1773 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1774
1775 if (tf == NULL)
1776 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1777 else if (tf->type == TF_TYPE_PREDEFINED) {
1778 switch (tf->tf) {
1779 case TRANSFER_FUNCTION_SRGB:
1780 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1781 break;
1782 case TRANSFER_FUNCTION_BT709:
1783 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1784 break;
1785 case TRANSFER_FUNCTION_LINEAR:
1786 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1787 break;
1788 case TRANSFER_FUNCTION_PQ:
1789 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1790 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1791 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1792 result = true;
1793 break;
1794 default:
1795 result = false;
1796 break;
1797 }
1798 } else if (tf->type == TF_TYPE_BYPASS) {
1799 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1800 } else {
1801 cm_helper_translate_curve_to_degamma_hw_format(tf,
1802 &dpp_base->degamma_params);
1803 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1804 &dpp_base->degamma_params);
1805 result = true;
1806 }
1807
1808 return result;
1809 }
1810
1811 #define MAX_NUM_HW_POINTS 0x200
1812
log_tf(struct dc_context * ctx,struct dc_transfer_func * tf,uint32_t hw_points_num)1813 static void log_tf(struct dc_context *ctx,
1814 struct dc_transfer_func *tf, uint32_t hw_points_num)
1815 {
1816 // DC_LOG_GAMMA is default logging of all hw points
1817 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1818 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1819 int i = 0;
1820
1821 DC_LOG_GAMMA("Gamma Correction TF");
1822 DC_LOG_ALL_GAMMA("Logging all tf points...");
1823 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1824
1825 for (i = 0; i < hw_points_num; i++) {
1826 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1827 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1828 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1829 }
1830
1831 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1832 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1833 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1834 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1835 }
1836 }
1837
dcn10_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)1838 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1839 const struct dc_stream_state *stream)
1840 {
1841 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1842
1843 if (dpp == NULL)
1844 return false;
1845
1846 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1847
1848 if (stream->out_transfer_func &&
1849 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1850 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1851 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1852
1853 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1854 * update.
1855 */
1856 else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
1857 stream->out_transfer_func,
1858 &dpp->regamma_params, false)) {
1859 dpp->funcs->dpp_program_regamma_pwl(
1860 dpp,
1861 &dpp->regamma_params, OPP_REGAMMA_USER);
1862 } else
1863 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1864
1865 if (stream != NULL && stream->ctx != NULL &&
1866 stream->out_transfer_func != NULL) {
1867 log_tf(stream->ctx,
1868 stream->out_transfer_func,
1869 dpp->regamma_params.hw_points_num);
1870 }
1871
1872 return true;
1873 }
1874
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1875 void dcn10_pipe_control_lock(
1876 struct dc *dc,
1877 struct pipe_ctx *pipe,
1878 bool lock)
1879 {
1880 struct dce_hwseq *hws = dc->hwseq;
1881
1882 /* use TG master update lock to lock everything on the TG
1883 * therefore only top pipe need to lock
1884 */
1885 if (!pipe || pipe->top_pipe)
1886 return;
1887
1888 if (dc->debug.sanity_checks)
1889 hws->funcs.verify_allow_pstate_change_high(dc);
1890
1891 if (lock)
1892 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1893 else
1894 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1895
1896 if (dc->debug.sanity_checks)
1897 hws->funcs.verify_allow_pstate_change_high(dc);
1898 }
1899
1900 /**
1901 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1902 *
1903 * Software keepout workaround to prevent cursor update locking from stalling
1904 * out cursor updates indefinitely or from old values from being retained in
1905 * the case where the viewport changes in the same frame as the cursor.
1906 *
1907 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1908 * too close to VUPDATE, then stall out until VUPDATE finishes.
1909 *
1910 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1911 * to avoid the need for this workaround.
1912 *
1913 * @dc: Current DC state
1914 * @pipe_ctx: Pipe_ctx pointer for delayed cursor update
1915 *
1916 * Return: void
1917 */
delay_cursor_until_vupdate(struct dc * dc,struct pipe_ctx * pipe_ctx)1918 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1919 {
1920 struct dc_stream_state *stream = pipe_ctx->stream;
1921 struct crtc_position position;
1922 uint32_t vupdate_start, vupdate_end;
1923 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1924 unsigned int us_per_line, us_vupdate;
1925
1926 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1927 return;
1928
1929 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1930 return;
1931
1932 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1933 &vupdate_end);
1934
1935 dc->hwss.get_position(&pipe_ctx, 1, &position);
1936 vpos = position.vertical_count;
1937
1938 /* Avoid wraparound calculation issues */
1939 vupdate_start += stream->timing.v_total;
1940 vupdate_end += stream->timing.v_total;
1941 vpos += stream->timing.v_total;
1942
1943 if (vpos <= vupdate_start) {
1944 /* VPOS is in VACTIVE or back porch. */
1945 lines_to_vupdate = vupdate_start - vpos;
1946 } else if (vpos > vupdate_end) {
1947 /* VPOS is in the front porch. */
1948 return;
1949 } else {
1950 /* VPOS is in VUPDATE. */
1951 lines_to_vupdate = 0;
1952 }
1953
1954 /* Calculate time until VUPDATE in microseconds. */
1955 us_per_line =
1956 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1957 us_to_vupdate = lines_to_vupdate * us_per_line;
1958
1959 /* 70 us is a conservative estimate of cursor update time*/
1960 if (us_to_vupdate > 70)
1961 return;
1962
1963 /* Stall out until the cursor update completes. */
1964 if (vupdate_end < vupdate_start)
1965 vupdate_end += stream->timing.v_total;
1966 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1967 udelay(us_to_vupdate + us_vupdate);
1968 }
1969
dcn10_cursor_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1970 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1971 {
1972 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1973 if (!pipe || pipe->top_pipe)
1974 return;
1975
1976 /* Prevent cursor lock from stalling out cursor updates. */
1977 if (lock)
1978 delay_cursor_until_vupdate(dc, pipe);
1979
1980 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1981 union dmub_hw_lock_flags hw_locks = { 0 };
1982 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1983
1984 hw_locks.bits.lock_cursor = 1;
1985 inst_flags.opp_inst = pipe->stream_res.opp->inst;
1986
1987 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1988 lock,
1989 &hw_locks,
1990 &inst_flags);
1991 } else
1992 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1993 pipe->stream_res.opp->inst, lock);
1994 }
1995
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)1996 static bool wait_for_reset_trigger_to_occur(
1997 struct dc_context *dc_ctx,
1998 struct timing_generator *tg)
1999 {
2000 bool rc = false;
2001
2002 DC_LOGGER_INIT(dc_ctx->logger);
2003
2004 /* To avoid endless loop we wait at most
2005 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
2006 const uint32_t frames_to_wait_on_triggered_reset = 10;
2007 int i;
2008
2009 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
2010
2011 if (!tg->funcs->is_counter_moving(tg)) {
2012 DC_ERROR("TG counter is not moving!\n");
2013 break;
2014 }
2015
2016 if (tg->funcs->did_triggered_reset_occur(tg)) {
2017 rc = true;
2018 /* usually occurs at i=1 */
2019 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2020 i);
2021 break;
2022 }
2023
2024 /* Wait for one frame. */
2025 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2026 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2027 }
2028
2029 if (false == rc)
2030 DC_ERROR("GSL: Timeout on reset trigger!\n");
2031
2032 return rc;
2033 }
2034
reduceSizeAndFraction(uint64_t * numerator,uint64_t * denominator,bool checkUint32Bounary)2035 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2036 uint64_t *denominator,
2037 bool checkUint32Bounary)
2038 {
2039 int i;
2040 bool ret = checkUint32Bounary == false;
2041 uint64_t max_int32 = 0xffffffff;
2042 uint64_t num, denom;
2043 static const uint16_t prime_numbers[] = {
2044 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2045 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2046 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2047 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2048 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2049 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2050 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2051 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2052 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2053 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2054 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2055 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2056 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2057 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2058 941, 947, 953, 967, 971, 977, 983, 991, 997};
2059 int count = ARRAY_SIZE(prime_numbers);
2060
2061 num = *numerator;
2062 denom = *denominator;
2063 for (i = 0; i < count; i++) {
2064 uint32_t num_remainder, denom_remainder;
2065 uint64_t num_result, denom_result;
2066 if (checkUint32Bounary &&
2067 num <= max_int32 && denom <= max_int32) {
2068 ret = true;
2069 break;
2070 }
2071 do {
2072 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2073 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2074 if (num_remainder == 0 && denom_remainder == 0) {
2075 num = num_result;
2076 denom = denom_result;
2077 }
2078 } while (num_remainder == 0 && denom_remainder == 0);
2079 }
2080 *numerator = num;
2081 *denominator = denom;
2082 return ret;
2083 }
2084
is_low_refresh_rate(struct pipe_ctx * pipe)2085 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2086 {
2087 uint32_t master_pipe_refresh_rate =
2088 pipe->stream->timing.pix_clk_100hz * 100 /
2089 pipe->stream->timing.h_total /
2090 pipe->stream->timing.v_total;
2091 return master_pipe_refresh_rate <= 30;
2092 }
2093
get_clock_divider(struct pipe_ctx * pipe,bool account_low_refresh_rate)2094 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2095 bool account_low_refresh_rate)
2096 {
2097 uint32_t clock_divider = 1;
2098 uint32_t numpipes = 1;
2099
2100 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2101 clock_divider *= 2;
2102
2103 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2104 clock_divider *= 2;
2105
2106 while (pipe->next_odm_pipe) {
2107 pipe = pipe->next_odm_pipe;
2108 numpipes++;
2109 }
2110 clock_divider *= numpipes;
2111
2112 return clock_divider;
2113 }
2114
dcn10_align_pixel_clocks(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2115 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2116 struct pipe_ctx *grouped_pipes[])
2117 {
2118 struct dc_context *dc_ctx = dc->ctx;
2119 int i, master = -1, embedded = -1;
2120 struct dc_crtc_timing *hw_crtc_timing;
2121 uint64_t phase[MAX_PIPES];
2122 uint64_t modulo[MAX_PIPES];
2123 unsigned int pclk;
2124
2125 uint32_t embedded_pix_clk_100hz;
2126 uint16_t embedded_h_total;
2127 uint16_t embedded_v_total;
2128 uint32_t dp_ref_clk_100hz =
2129 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2130
2131 DC_LOGGER_INIT(dc_ctx->logger);
2132
2133 hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2134 if (!hw_crtc_timing)
2135 return master;
2136
2137 if (dc->config.vblank_alignment_dto_params &&
2138 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2139 embedded_h_total =
2140 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2141 embedded_v_total =
2142 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2143 embedded_pix_clk_100hz =
2144 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2145
2146 for (i = 0; i < group_size; i++) {
2147 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2148 grouped_pipes[i]->stream_res.tg,
2149 &hw_crtc_timing[i]);
2150 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2151 dc->res_pool->dp_clock_source,
2152 grouped_pipes[i]->stream_res.tg->inst,
2153 &pclk);
2154 hw_crtc_timing[i].pix_clk_100hz = pclk;
2155 if (dc_is_embedded_signal(
2156 grouped_pipes[i]->stream->signal)) {
2157 embedded = i;
2158 master = i;
2159 phase[i] = embedded_pix_clk_100hz*100;
2160 modulo[i] = dp_ref_clk_100hz*100;
2161 } else {
2162
2163 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2164 hw_crtc_timing[i].h_total*
2165 hw_crtc_timing[i].v_total;
2166 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2167 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2168 embedded_h_total*
2169 embedded_v_total;
2170
2171 if (reduceSizeAndFraction(&phase[i],
2172 &modulo[i], true) == false) {
2173 /*
2174 * this will help to stop reporting
2175 * this timing synchronizable
2176 */
2177 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2178 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2179 }
2180 }
2181 }
2182
2183 for (i = 0; i < group_size; i++) {
2184 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2185 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2186 dc->res_pool->dp_clock_source,
2187 grouped_pipes[i]->stream_res.tg->inst,
2188 phase[i], modulo[i]);
2189 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2190 dc->res_pool->dp_clock_source,
2191 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2192 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2193 pclk*get_clock_divider(grouped_pipes[i], false);
2194 if (master == -1)
2195 master = i;
2196 }
2197 }
2198
2199 }
2200
2201 kfree(hw_crtc_timing);
2202 return master;
2203 }
2204
dcn10_enable_vblanks_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2205 void dcn10_enable_vblanks_synchronization(
2206 struct dc *dc,
2207 int group_index,
2208 int group_size,
2209 struct pipe_ctx *grouped_pipes[])
2210 {
2211 struct dc_context *dc_ctx = dc->ctx;
2212 struct output_pixel_processor *opp;
2213 struct timing_generator *tg;
2214 int i, width, height, master;
2215
2216 DC_LOGGER_INIT(dc_ctx->logger);
2217
2218 for (i = 1; i < group_size; i++) {
2219 opp = grouped_pipes[i]->stream_res.opp;
2220 tg = grouped_pipes[i]->stream_res.tg;
2221 tg->funcs->get_otg_active_size(tg, &width, &height);
2222
2223 if (!tg->funcs->is_tg_enabled(tg)) {
2224 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2225 return;
2226 }
2227
2228 if (opp->funcs->opp_program_dpg_dimensions)
2229 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2230 }
2231
2232 for (i = 0; i < group_size; i++) {
2233 if (grouped_pipes[i]->stream == NULL)
2234 continue;
2235 grouped_pipes[i]->stream->vblank_synchronized = false;
2236 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2237 }
2238
2239 DC_SYNC_INFO("Aligning DP DTOs\n");
2240
2241 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2242
2243 DC_SYNC_INFO("Synchronizing VBlanks\n");
2244
2245 if (master >= 0) {
2246 for (i = 0; i < group_size; i++) {
2247 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2248 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2249 grouped_pipes[master]->stream_res.tg,
2250 grouped_pipes[i]->stream_res.tg,
2251 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2252 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2253 get_clock_divider(grouped_pipes[master], false),
2254 get_clock_divider(grouped_pipes[i], false));
2255 grouped_pipes[i]->stream->vblank_synchronized = true;
2256 }
2257 grouped_pipes[master]->stream->vblank_synchronized = true;
2258 DC_SYNC_INFO("Sync complete\n");
2259 }
2260
2261 for (i = 1; i < group_size; i++) {
2262 opp = grouped_pipes[i]->stream_res.opp;
2263 tg = grouped_pipes[i]->stream_res.tg;
2264 tg->funcs->get_otg_active_size(tg, &width, &height);
2265 if (opp->funcs->opp_program_dpg_dimensions)
2266 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2267 }
2268 }
2269
dcn10_enable_timing_synchronization(struct dc * dc,struct dc_state * state,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2270 void dcn10_enable_timing_synchronization(
2271 struct dc *dc,
2272 struct dc_state *state,
2273 int group_index,
2274 int group_size,
2275 struct pipe_ctx *grouped_pipes[])
2276 {
2277 struct dc_context *dc_ctx = dc->ctx;
2278 struct output_pixel_processor *opp;
2279 struct timing_generator *tg;
2280 int i, width, height;
2281
2282 DC_LOGGER_INIT(dc_ctx->logger);
2283
2284 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2285
2286 for (i = 1; i < group_size; i++) {
2287 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2288 continue;
2289
2290 opp = grouped_pipes[i]->stream_res.opp;
2291 tg = grouped_pipes[i]->stream_res.tg;
2292 tg->funcs->get_otg_active_size(tg, &width, &height);
2293
2294 if (!tg->funcs->is_tg_enabled(tg)) {
2295 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2296 return;
2297 }
2298
2299 if (opp->funcs->opp_program_dpg_dimensions)
2300 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2301 }
2302
2303 for (i = 0; i < group_size; i++) {
2304 if (grouped_pipes[i]->stream == NULL)
2305 continue;
2306
2307 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2308 continue;
2309
2310 grouped_pipes[i]->stream->vblank_synchronized = false;
2311 }
2312
2313 for (i = 1; i < group_size; i++) {
2314 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2315 continue;
2316
2317 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2318 grouped_pipes[i]->stream_res.tg,
2319 grouped_pipes[0]->stream_res.tg->inst);
2320 }
2321
2322 DC_SYNC_INFO("Waiting for trigger\n");
2323
2324 /* Need to get only check 1 pipe for having reset as all the others are
2325 * synchronized. Look at last pipe programmed to reset.
2326 */
2327
2328 if (grouped_pipes[1]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[1]) != SUBVP_PHANTOM)
2329 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2330
2331 for (i = 1; i < group_size; i++) {
2332 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2333 continue;
2334
2335 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2336 grouped_pipes[i]->stream_res.tg);
2337 }
2338
2339 for (i = 1; i < group_size; i++) {
2340 if (dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2341 continue;
2342
2343 opp = grouped_pipes[i]->stream_res.opp;
2344 tg = grouped_pipes[i]->stream_res.tg;
2345 tg->funcs->get_otg_active_size(tg, &width, &height);
2346 if (opp->funcs->opp_program_dpg_dimensions)
2347 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2348 }
2349
2350 DC_SYNC_INFO("Sync complete\n");
2351 }
2352
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2353 void dcn10_enable_per_frame_crtc_position_reset(
2354 struct dc *dc,
2355 int group_size,
2356 struct pipe_ctx *grouped_pipes[])
2357 {
2358 struct dc_context *dc_ctx = dc->ctx;
2359 int i;
2360
2361 DC_LOGGER_INIT(dc_ctx->logger);
2362
2363 DC_SYNC_INFO("Setting up\n");
2364 for (i = 0; i < group_size; i++)
2365 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2366 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2367 grouped_pipes[i]->stream_res.tg,
2368 0,
2369 &grouped_pipes[i]->stream->triggered_crtc_reset);
2370
2371 DC_SYNC_INFO("Waiting for trigger\n");
2372
2373 for (i = 0; i < group_size; i++)
2374 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2375
2376 DC_SYNC_INFO("Multi-display sync is complete\n");
2377 }
2378
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)2379 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2380 struct vm_system_aperture_param *apt,
2381 struct dce_hwseq *hws)
2382 {
2383 PHYSICAL_ADDRESS_LOC physical_page_number;
2384 uint32_t logical_addr_low;
2385 uint32_t logical_addr_high;
2386
2387 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2388 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2389 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2390 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2391
2392 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2393 LOGICAL_ADDR, &logical_addr_low);
2394
2395 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2396 LOGICAL_ADDR, &logical_addr_high);
2397
2398 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2399 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2400 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2401 }
2402
2403 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)2404 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2405 struct vm_context0_param *vm0,
2406 struct dce_hwseq *hws)
2407 {
2408 PHYSICAL_ADDRESS_LOC fb_base;
2409 PHYSICAL_ADDRESS_LOC fb_offset;
2410 uint32_t fb_base_value;
2411 uint32_t fb_offset_value;
2412
2413 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2414 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2415
2416 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2417 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2418 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2419 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2420
2421 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2422 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2423 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2424 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2425
2426 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2427 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2428 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2429 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2430
2431 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2432 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2433 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2434 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2435
2436 /*
2437 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2438 * Therefore we need to do
2439 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2440 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2441 */
2442 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2443 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2444 vm0->pte_base.quad_part += fb_base.quad_part;
2445 vm0->pte_base.quad_part -= fb_offset.quad_part;
2446 }
2447
2448
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)2449 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2450 {
2451 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2452 struct vm_system_aperture_param apt = {0};
2453 struct vm_context0_param vm0 = {0};
2454
2455 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2456 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2457
2458 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2459 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2460 }
2461
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2462 static void dcn10_enable_plane(
2463 struct dc *dc,
2464 struct pipe_ctx *pipe_ctx,
2465 struct dc_state *context)
2466 {
2467 struct dce_hwseq *hws = dc->hwseq;
2468
2469 if (dc->debug.sanity_checks) {
2470 hws->funcs.verify_allow_pstate_change_high(dc);
2471 }
2472
2473 undo_DEGVIDCN10_253_wa(dc);
2474
2475 power_on_plane_resources(dc->hwseq,
2476 pipe_ctx->plane_res.hubp->inst);
2477
2478 /* enable DCFCLK current DCHUB */
2479 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2480
2481 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2482 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2483 pipe_ctx->stream_res.opp,
2484 true);
2485
2486 if (dc->config.gpu_vm_support)
2487 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2488
2489 if (dc->debug.sanity_checks) {
2490 hws->funcs.verify_allow_pstate_change_high(dc);
2491 }
2492
2493 if (!pipe_ctx->top_pipe
2494 && pipe_ctx->plane_state
2495 && pipe_ctx->plane_state->flip_int_enabled
2496 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2497 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2498
2499 }
2500
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)2501 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2502 {
2503 int i = 0;
2504 struct dpp_grph_csc_adjustment adjust;
2505 memset(&adjust, 0, sizeof(adjust));
2506 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2507
2508
2509 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2510 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2511 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2512 adjust.temperature_matrix[i] =
2513 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2514 } else if (pipe_ctx->plane_state &&
2515 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2516 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2517 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2518 adjust.temperature_matrix[i] =
2519 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2520 }
2521
2522 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2523 }
2524
2525
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)2526 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2527 {
2528 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2529 if (pipe_ctx->top_pipe) {
2530 struct pipe_ctx *top = pipe_ctx->top_pipe;
2531
2532 while (top->top_pipe)
2533 top = top->top_pipe; // Traverse to top pipe_ctx
2534 if (top->plane_state && top->plane_state->layer_index == 0)
2535 return true; // Front MPO plane not hidden
2536 }
2537 }
2538 return false;
2539 }
2540
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)2541 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2542 {
2543 // Override rear plane RGB bias to fix MPO brightness
2544 uint16_t rgb_bias = matrix[3];
2545
2546 matrix[3] = 0;
2547 matrix[7] = 0;
2548 matrix[11] = 0;
2549 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2550 matrix[3] = rgb_bias;
2551 matrix[7] = rgb_bias;
2552 matrix[11] = rgb_bias;
2553 }
2554
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)2555 void dcn10_program_output_csc(struct dc *dc,
2556 struct pipe_ctx *pipe_ctx,
2557 enum dc_color_space colorspace,
2558 uint16_t *matrix,
2559 int opp_id)
2560 {
2561 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2562 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2563
2564 /* MPO is broken with RGB colorspaces when OCSC matrix
2565 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2566 * Blending adds offsets from front + rear to rear plane
2567 *
2568 * Fix is to set RGB bias to 0 on rear plane, top plane
2569 * black value pixels add offset instead of rear + front
2570 */
2571
2572 int16_t rgb_bias = matrix[3];
2573 // matrix[3/7/11] are all the same offset value
2574
2575 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2576 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2577 } else {
2578 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2579 }
2580 }
2581 } else {
2582 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2583 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2584 }
2585 }
2586
dcn10_update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2587 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2588 {
2589 struct dc_bias_and_scale bns_params = {0};
2590
2591 // program the input csc
2592 dpp->funcs->dpp_setup(dpp,
2593 plane_state->format,
2594 EXPANSION_MODE_ZERO,
2595 plane_state->input_csc_color_matrix,
2596 plane_state->color_space,
2597 NULL);
2598
2599 //set scale and bias registers
2600 build_prescale_params(&bns_params, plane_state);
2601 if (dpp->funcs->dpp_program_bias_and_scale)
2602 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2603 }
2604
dcn10_update_visual_confirm_color(struct dc * dc,struct pipe_ctx * pipe_ctx,int mpcc_id)2605 void dcn10_update_visual_confirm_color(struct dc *dc,
2606 struct pipe_ctx *pipe_ctx,
2607 int mpcc_id)
2608 {
2609 struct mpc *mpc = dc->res_pool->mpc;
2610
2611 if (mpc->funcs->set_bg_color) {
2612 memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
2613 mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
2614 }
2615 }
2616
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2617 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2618 {
2619 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2620 struct mpcc_blnd_cfg blnd_cfg = {0};
2621 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2622 int mpcc_id;
2623 struct mpcc *new_mpcc;
2624 struct mpc *mpc = dc->res_pool->mpc;
2625 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2626
2627 blnd_cfg.overlap_only = false;
2628 blnd_cfg.global_gain = 0xff;
2629
2630 if (per_pixel_alpha) {
2631 /* DCN1.0 has output CM before MPC which seems to screw with
2632 * pre-multiplied alpha.
2633 */
2634 blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2635 pipe_ctx->stream->output_color_space)
2636 && pipe_ctx->plane_state->pre_multiplied_alpha);
2637 if (pipe_ctx->plane_state->global_alpha) {
2638 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2639 blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2640 } else {
2641 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2642 }
2643 } else {
2644 blnd_cfg.pre_multiplied_alpha = false;
2645 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2646 }
2647
2648 if (pipe_ctx->plane_state->global_alpha)
2649 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2650 else
2651 blnd_cfg.global_alpha = 0xff;
2652
2653 /*
2654 * TODO: remove hack
2655 * Note: currently there is a bug in init_hw such that
2656 * on resume from hibernate, BIOS sets up MPCC0, and
2657 * we do mpcc_remove but the mpcc cannot go to idle
2658 * after remove. This cause us to pick mpcc1 here,
2659 * which causes a pstate hang for yet unknown reason.
2660 */
2661 mpcc_id = hubp->inst;
2662
2663 /* If there is no full update, don't need to touch MPC tree*/
2664 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2665 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2666 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2667 return;
2668 }
2669
2670 /* check if this MPCC is already being used */
2671 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2672 /* remove MPCC if being used */
2673 if (new_mpcc != NULL)
2674 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2675 else
2676 if (dc->debug.sanity_checks)
2677 mpc->funcs->assert_mpcc_idle_before_connect(
2678 dc->res_pool->mpc, mpcc_id);
2679
2680 /* Call MPC to insert new plane */
2681 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2682 mpc_tree_params,
2683 &blnd_cfg,
2684 NULL,
2685 NULL,
2686 hubp->inst,
2687 mpcc_id);
2688 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2689
2690 ASSERT(new_mpcc != NULL);
2691 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2692 hubp->mpcc_id = mpcc_id;
2693 }
2694
update_scaler(struct pipe_ctx * pipe_ctx)2695 static void update_scaler(struct pipe_ctx *pipe_ctx)
2696 {
2697 bool per_pixel_alpha =
2698 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2699
2700 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2701 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2702 /* scaler configuration */
2703 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2704 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2705 }
2706
dcn10_update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2707 static void dcn10_update_dchubp_dpp(
2708 struct dc *dc,
2709 struct pipe_ctx *pipe_ctx,
2710 struct dc_state *context)
2711 {
2712 struct dce_hwseq *hws = dc->hwseq;
2713 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2714 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2715 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2716 struct plane_size size = plane_state->plane_size;
2717 unsigned int compat_level = 0;
2718 bool should_divided_by_2 = false;
2719
2720 /* depends on DML calculation, DPP clock value may change dynamically */
2721 /* If request max dpp clk is lower than current dispclk, no need to
2722 * divided by 2
2723 */
2724 if (plane_state->update_flags.bits.full_update) {
2725
2726 /* new calculated dispclk, dppclk are stored in
2727 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2728 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2729 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2730 * dispclk will put in use after optimize_bandwidth when
2731 * ramp_up_dispclk_with_dpp is called.
2732 * there are two places for dppclk be put in use. One location
2733 * is the same as the location as dispclk. Another is within
2734 * update_dchubp_dpp which happens between pre_bandwidth and
2735 * optimize_bandwidth.
2736 * dppclk updated within update_dchubp_dpp will cause new
2737 * clock values of dispclk and dppclk not be in use at the same
2738 * time. when clocks are decreased, this may cause dppclk is
2739 * lower than previous configuration and let pipe stuck.
2740 * for example, eDP + external dp, change resolution of DP from
2741 * 1920x1080x144hz to 1280x960x60hz.
2742 * before change: dispclk = 337889 dppclk = 337889
2743 * change mode, dcn10_validate_bandwidth calculate
2744 * dispclk = 143122 dppclk = 143122
2745 * update_dchubp_dpp be executed before dispclk be updated,
2746 * dispclk = 337889, but dppclk use new value dispclk /2 =
2747 * 168944. this will cause pipe pstate warning issue.
2748 * solution: between pre_bandwidth and optimize_bandwidth, while
2749 * dispclk is going to be decreased, keep dppclk = dispclk
2750 **/
2751 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2752 dc->clk_mgr->clks.dispclk_khz)
2753 should_divided_by_2 = false;
2754 else
2755 should_divided_by_2 =
2756 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2757 dc->clk_mgr->clks.dispclk_khz / 2;
2758
2759 dpp->funcs->dpp_dppclk_control(
2760 dpp,
2761 should_divided_by_2,
2762 true);
2763
2764 if (dc->res_pool->dccg)
2765 dc->res_pool->dccg->funcs->update_dpp_dto(
2766 dc->res_pool->dccg,
2767 dpp->inst,
2768 pipe_ctx->plane_res.bw.dppclk_khz);
2769 else
2770 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2771 dc->clk_mgr->clks.dispclk_khz / 2 :
2772 dc->clk_mgr->clks.dispclk_khz;
2773 }
2774
2775 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2776 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2777 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2778 */
2779 if (plane_state->update_flags.bits.full_update) {
2780 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2781
2782 hubp->funcs->hubp_setup(
2783 hubp,
2784 &pipe_ctx->dlg_regs,
2785 &pipe_ctx->ttu_regs,
2786 &pipe_ctx->rq_regs,
2787 &pipe_ctx->pipe_dlg_param);
2788 hubp->funcs->hubp_setup_interdependent(
2789 hubp,
2790 &pipe_ctx->dlg_regs,
2791 &pipe_ctx->ttu_regs);
2792 }
2793
2794 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2795
2796 if (plane_state->update_flags.bits.full_update ||
2797 plane_state->update_flags.bits.bpp_change)
2798 dcn10_update_dpp(dpp, plane_state);
2799
2800 if (plane_state->update_flags.bits.full_update ||
2801 plane_state->update_flags.bits.per_pixel_alpha_change ||
2802 plane_state->update_flags.bits.global_alpha_change)
2803 hws->funcs.update_mpcc(dc, pipe_ctx);
2804
2805 if (plane_state->update_flags.bits.full_update ||
2806 plane_state->update_flags.bits.per_pixel_alpha_change ||
2807 plane_state->update_flags.bits.global_alpha_change ||
2808 plane_state->update_flags.bits.scaling_change ||
2809 plane_state->update_flags.bits.position_change) {
2810 update_scaler(pipe_ctx);
2811 }
2812
2813 if (plane_state->update_flags.bits.full_update ||
2814 plane_state->update_flags.bits.scaling_change ||
2815 plane_state->update_flags.bits.position_change) {
2816 hubp->funcs->mem_program_viewport(
2817 hubp,
2818 &pipe_ctx->plane_res.scl_data.viewport,
2819 &pipe_ctx->plane_res.scl_data.viewport_c);
2820 }
2821
2822 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2823 dc->hwss.set_cursor_position(pipe_ctx);
2824 dc->hwss.set_cursor_attribute(pipe_ctx);
2825
2826 if (dc->hwss.set_cursor_sdr_white_level)
2827 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2828 }
2829
2830 if (plane_state->update_flags.bits.full_update) {
2831 /*gamut remap*/
2832 dc->hwss.program_gamut_remap(pipe_ctx);
2833
2834 dc->hwss.program_output_csc(dc,
2835 pipe_ctx,
2836 pipe_ctx->stream->output_color_space,
2837 pipe_ctx->stream->csc_color_matrix.matrix,
2838 pipe_ctx->stream_res.opp->inst);
2839 }
2840
2841 if (plane_state->update_flags.bits.full_update ||
2842 plane_state->update_flags.bits.pixel_format_change ||
2843 plane_state->update_flags.bits.horizontal_mirror_change ||
2844 plane_state->update_flags.bits.rotation_change ||
2845 plane_state->update_flags.bits.swizzle_change ||
2846 plane_state->update_flags.bits.dcc_change ||
2847 plane_state->update_flags.bits.bpp_change ||
2848 plane_state->update_flags.bits.scaling_change ||
2849 plane_state->update_flags.bits.plane_size_change) {
2850 hubp->funcs->hubp_program_surface_config(
2851 hubp,
2852 plane_state->format,
2853 &plane_state->tiling_info,
2854 &size,
2855 plane_state->rotation,
2856 &plane_state->dcc,
2857 plane_state->horizontal_mirror,
2858 compat_level);
2859 }
2860
2861 hubp->power_gated = false;
2862
2863 hws->funcs.update_plane_addr(dc, pipe_ctx);
2864
2865 if (is_pipe_tree_visible(pipe_ctx))
2866 hubp->funcs->set_blank(hubp, false);
2867 }
2868
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)2869 void dcn10_blank_pixel_data(
2870 struct dc *dc,
2871 struct pipe_ctx *pipe_ctx,
2872 bool blank)
2873 {
2874 enum dc_color_space color_space;
2875 struct tg_color black_color = {0};
2876 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2877 struct dc_stream_state *stream = pipe_ctx->stream;
2878
2879 /* program otg blank color */
2880 color_space = stream->output_color_space;
2881 color_space_to_black_color(dc, color_space, &black_color);
2882
2883 /*
2884 * The way 420 is packed, 2 channels carry Y component, 1 channel
2885 * alternate between Cb and Cr, so both channels need the pixel
2886 * value for Y
2887 */
2888 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2889 black_color.color_r_cr = black_color.color_g_y;
2890
2891
2892 if (stream_res->tg->funcs->set_blank_color)
2893 stream_res->tg->funcs->set_blank_color(
2894 stream_res->tg,
2895 &black_color);
2896
2897 if (!blank) {
2898 if (stream_res->tg->funcs->set_blank)
2899 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2900 if (stream_res->abm) {
2901 dc->hwss.set_pipe(pipe_ctx);
2902 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2903 }
2904 } else {
2905 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2906 if (stream_res->tg->funcs->set_blank) {
2907 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2908 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2909 }
2910 }
2911 }
2912
dcn10_set_hdr_multiplier(struct pipe_ctx * pipe_ctx)2913 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2914 {
2915 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2916 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2917 struct custom_float_format fmt;
2918
2919 fmt.exponenta_bits = 6;
2920 fmt.mantissa_bits = 12;
2921 fmt.sign = true;
2922
2923
2924 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2925 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2926
2927 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2928 pipe_ctx->plane_res.dpp, hw_mult);
2929 }
2930
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2931 void dcn10_program_pipe(
2932 struct dc *dc,
2933 struct pipe_ctx *pipe_ctx,
2934 struct dc_state *context)
2935 {
2936 struct dce_hwseq *hws = dc->hwseq;
2937
2938 if (pipe_ctx->top_pipe == NULL) {
2939 bool blank = !is_pipe_tree_visible(pipe_ctx);
2940
2941 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2942 pipe_ctx->stream_res.tg,
2943 calculate_vready_offset_for_group(pipe_ctx),
2944 pipe_ctx->pipe_dlg_param.vstartup_start,
2945 pipe_ctx->pipe_dlg_param.vupdate_offset,
2946 pipe_ctx->pipe_dlg_param.vupdate_width);
2947
2948 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2949 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2950
2951 if (hws->funcs.setup_vupdate_interrupt)
2952 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2953
2954 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2955 }
2956
2957 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2958 dcn10_enable_plane(dc, pipe_ctx, context);
2959
2960 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2961
2962 hws->funcs.set_hdr_multiplier(pipe_ctx);
2963
2964 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2965 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2966 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2967 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2968
2969 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2970 * only do gamma programming for full update.
2971 * TODO: This can be further optimized/cleaned up
2972 * Always call this for now since it does memcmp inside before
2973 * doing heavy calculation and programming
2974 */
2975 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2976 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2977 }
2978
dcn10_wait_for_pending_cleared(struct dc * dc,struct dc_state * context)2979 void dcn10_wait_for_pending_cleared(struct dc *dc,
2980 struct dc_state *context)
2981 {
2982 struct pipe_ctx *pipe_ctx;
2983 struct timing_generator *tg;
2984 int i;
2985
2986 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2987 pipe_ctx = &context->res_ctx.pipe_ctx[i];
2988 tg = pipe_ctx->stream_res.tg;
2989
2990 /*
2991 * Only wait for top pipe's tg penindg bit
2992 * Also skip if pipe is disabled.
2993 */
2994 if (pipe_ctx->top_pipe ||
2995 !pipe_ctx->stream || !pipe_ctx->plane_state ||
2996 !tg->funcs->is_tg_enabled(tg))
2997 continue;
2998
2999 /*
3000 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
3001 * For some reason waiting for OTG_UPDATE_PENDING cleared
3002 * seems to not trigger the update right away, and if we
3003 * lock again before VUPDATE then we don't get a separated
3004 * operation.
3005 */
3006 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
3007 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
3008 }
3009 }
3010
dcn10_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)3011 void dcn10_post_unlock_program_front_end(
3012 struct dc *dc,
3013 struct dc_state *context)
3014 {
3015 int i;
3016
3017 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3018 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3019
3020 if (!pipe_ctx->top_pipe &&
3021 !pipe_ctx->prev_odm_pipe &&
3022 pipe_ctx->stream) {
3023 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3024
3025 if (context->stream_status[i].plane_count == 0)
3026 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3027 }
3028 }
3029
3030 for (i = 0; i < dc->res_pool->pipe_count; i++)
3031 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3032 dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3033
3034 for (i = 0; i < dc->res_pool->pipe_count; i++)
3035 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3036 dc->hwss.optimize_bandwidth(dc, context);
3037 break;
3038 }
3039
3040 if (dc->hwseq->wa.DEGVIDCN10_254)
3041 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3042 }
3043
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)3044 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3045 {
3046 uint8_t i;
3047
3048 for (i = 0; i < context->stream_count; i++) {
3049 if (context->streams[i]->timing.timing_3d_format
3050 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3051 /*
3052 * Disable stutter
3053 */
3054 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3055 break;
3056 }
3057 }
3058 }
3059
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)3060 void dcn10_prepare_bandwidth(
3061 struct dc *dc,
3062 struct dc_state *context)
3063 {
3064 struct dce_hwseq *hws = dc->hwseq;
3065 struct hubbub *hubbub = dc->res_pool->hubbub;
3066 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3067
3068 if (dc->debug.sanity_checks)
3069 hws->funcs.verify_allow_pstate_change_high(dc);
3070
3071 if (context->stream_count == 0)
3072 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3073
3074 dc->clk_mgr->funcs->update_clocks(
3075 dc->clk_mgr,
3076 context,
3077 false);
3078
3079 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3080 &context->bw_ctx.bw.dcn.watermarks,
3081 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3082 true);
3083 dcn10_stereo_hw_frame_pack_wa(dc, context);
3084
3085 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3086 DC_FP_START();
3087 dcn_get_soc_clks(
3088 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3089 DC_FP_END();
3090 dcn_bw_notify_pplib_of_wm_ranges(
3091 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3092 }
3093
3094 if (dc->debug.sanity_checks)
3095 hws->funcs.verify_allow_pstate_change_high(dc);
3096 }
3097
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)3098 void dcn10_optimize_bandwidth(
3099 struct dc *dc,
3100 struct dc_state *context)
3101 {
3102 struct dce_hwseq *hws = dc->hwseq;
3103 struct hubbub *hubbub = dc->res_pool->hubbub;
3104 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3105
3106 if (dc->debug.sanity_checks)
3107 hws->funcs.verify_allow_pstate_change_high(dc);
3108
3109 if (context->stream_count == 0)
3110 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3111
3112 dc->clk_mgr->funcs->update_clocks(
3113 dc->clk_mgr,
3114 context,
3115 true);
3116
3117 hubbub->funcs->program_watermarks(hubbub,
3118 &context->bw_ctx.bw.dcn.watermarks,
3119 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3120 true);
3121
3122 dcn10_stereo_hw_frame_pack_wa(dc, context);
3123
3124 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3125 DC_FP_START();
3126 dcn_get_soc_clks(
3127 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3128 DC_FP_END();
3129 dcn_bw_notify_pplib_of_wm_ranges(
3130 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3131 }
3132
3133 if (dc->debug.sanity_checks)
3134 hws->funcs.verify_allow_pstate_change_high(dc);
3135 }
3136
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,struct dc_crtc_timing_adjust adjust)3137 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3138 int num_pipes, struct dc_crtc_timing_adjust adjust)
3139 {
3140 int i = 0;
3141 struct drr_params params = {0};
3142 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3143 unsigned int event_triggers = 0x800;
3144 // Note DRR trigger events are generated regardless of whether num frames met.
3145 unsigned int num_frames = 2;
3146
3147 params.vertical_total_max = adjust.v_total_max;
3148 params.vertical_total_min = adjust.v_total_min;
3149 params.vertical_total_mid = adjust.v_total_mid;
3150 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3151 /* TODO: If multiple pipes are to be supported, you need
3152 * some GSL stuff. Static screen triggers may be programmed differently
3153 * as well.
3154 */
3155 for (i = 0; i < num_pipes; i++) {
3156 if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
3157 if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
3158 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3159 pipe_ctx[i]->stream_res.tg, ¶ms);
3160 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3161 if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
3162 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3163 pipe_ctx[i]->stream_res.tg,
3164 event_triggers, num_frames);
3165 }
3166 }
3167 }
3168
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)3169 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3170 int num_pipes,
3171 struct crtc_position *position)
3172 {
3173 int i = 0;
3174
3175 /* TODO: handle pipes > 1
3176 */
3177 for (i = 0; i < num_pipes; i++)
3178 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3179 }
3180
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)3181 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3182 int num_pipes, const struct dc_static_screen_params *params)
3183 {
3184 unsigned int i;
3185 unsigned int triggers = 0;
3186
3187 if (params->triggers.surface_update)
3188 triggers |= 0x80;
3189 if (params->triggers.cursor_update)
3190 triggers |= 0x2;
3191 if (params->triggers.force_trigger)
3192 triggers |= 0x1;
3193
3194 for (i = 0; i < num_pipes; i++)
3195 pipe_ctx[i]->stream_res.tg->funcs->
3196 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3197 triggers, params->num_frames);
3198 }
3199
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)3200 static void dcn10_config_stereo_parameters(
3201 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3202 {
3203 enum view_3d_format view_format = stream->view_format;
3204 enum dc_timing_3d_format timing_3d_format =\
3205 stream->timing.timing_3d_format;
3206 bool non_stereo_timing = false;
3207
3208 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3209 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3210 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3211 non_stereo_timing = true;
3212
3213 if (non_stereo_timing == false &&
3214 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3215
3216 flags->PROGRAM_STEREO = 1;
3217 flags->PROGRAM_POLARITY = 1;
3218 if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3219 timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3220 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3221 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3222
3223 if (stream->link && stream->link->ddc) {
3224 enum display_dongle_type dongle = \
3225 stream->link->ddc->dongle_type;
3226
3227 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3228 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3229 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3230 flags->DISABLE_STEREO_DP_SYNC = 1;
3231 }
3232 }
3233 flags->RIGHT_EYE_POLARITY =\
3234 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3235 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3236 flags->FRAME_PACKED = 1;
3237 }
3238
3239 return;
3240 }
3241
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)3242 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3243 {
3244 struct crtc_stereo_flags flags = { 0 };
3245 struct dc_stream_state *stream = pipe_ctx->stream;
3246
3247 dcn10_config_stereo_parameters(stream, &flags);
3248
3249 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3250 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3251 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3252 } else {
3253 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3254 }
3255
3256 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3257 pipe_ctx->stream_res.opp,
3258 flags.PROGRAM_STEREO == 1,
3259 &stream->timing);
3260
3261 pipe_ctx->stream_res.tg->funcs->program_stereo(
3262 pipe_ctx->stream_res.tg,
3263 &stream->timing,
3264 &flags);
3265
3266 return;
3267 }
3268
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3269 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3270 {
3271 int i;
3272
3273 for (i = 0; i < res_pool->pipe_count; i++) {
3274 if (res_pool->hubps[i]->inst == mpcc_inst)
3275 return res_pool->hubps[i];
3276 }
3277 ASSERT(false);
3278 return NULL;
3279 }
3280
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)3281 void dcn10_wait_for_mpcc_disconnect(
3282 struct dc *dc,
3283 struct resource_pool *res_pool,
3284 struct pipe_ctx *pipe_ctx)
3285 {
3286 struct dce_hwseq *hws = dc->hwseq;
3287 int mpcc_inst;
3288
3289 if (dc->debug.sanity_checks) {
3290 hws->funcs.verify_allow_pstate_change_high(dc);
3291 }
3292
3293 if (!pipe_ctx->stream_res.opp)
3294 return;
3295
3296 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3297 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3298 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3299
3300 if (pipe_ctx->stream_res.tg &&
3301 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3302 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3303 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3304 hubp->funcs->set_blank(hubp, true);
3305 }
3306 }
3307
3308 if (dc->debug.sanity_checks) {
3309 hws->funcs.verify_allow_pstate_change_high(dc);
3310 }
3311
3312 }
3313
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)3314 bool dcn10_dummy_display_power_gating(
3315 struct dc *dc,
3316 uint8_t controller_id,
3317 struct dc_bios *dcb,
3318 enum pipe_gating_control power_gating)
3319 {
3320 return true;
3321 }
3322
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)3323 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3324 {
3325 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3326 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3327 bool flip_pending;
3328 struct dc *dc = pipe_ctx->stream->ctx->dc;
3329
3330 if (plane_state == NULL)
3331 return;
3332
3333 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3334 pipe_ctx->plane_res.hubp);
3335
3336 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3337
3338 if (!flip_pending)
3339 plane_state->status.current_address = plane_state->status.requested_address;
3340
3341 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3342 tg->funcs->is_stereo_left_eye) {
3343 plane_state->status.is_right_eye =
3344 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3345 }
3346
3347 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3348 struct dce_hwseq *hwseq = dc->hwseq;
3349 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3350 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3351
3352 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3353 struct hubbub *hubbub = dc->res_pool->hubbub;
3354
3355 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3356 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3357 }
3358 }
3359 }
3360
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)3361 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3362 {
3363 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3364
3365 /* In DCN, this programming sequence is owned by the hubbub */
3366 hubbub->funcs->update_dchub(hubbub, dh_data);
3367 }
3368
dcn10_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)3369 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3370 {
3371 struct pipe_ctx *test_pipe, *split_pipe;
3372 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3373 struct rect r1 = scl_data->recout, r2, r2_half;
3374 int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3375 int cur_layer = pipe_ctx->plane_state->layer_index;
3376
3377 /**
3378 * Disable the cursor if there's another pipe above this with a
3379 * plane that contains this pipe's viewport to prevent double cursor
3380 * and incorrect scaling artifacts.
3381 */
3382 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3383 test_pipe = test_pipe->top_pipe) {
3384 // Skip invisible layer and pipe-split plane on same layer
3385 if (!test_pipe->plane_state ||
3386 !test_pipe->plane_state->visible ||
3387 test_pipe->plane_state->layer_index == cur_layer)
3388 continue;
3389
3390 r2 = test_pipe->plane_res.scl_data.recout;
3391 r2_r = r2.x + r2.width;
3392 r2_b = r2.y + r2.height;
3393 split_pipe = test_pipe;
3394
3395 /**
3396 * There is another half plane on same layer because of
3397 * pipe-split, merge together per same height.
3398 */
3399 for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3400 split_pipe = split_pipe->top_pipe)
3401 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3402 r2_half = split_pipe->plane_res.scl_data.recout;
3403 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3404 r2.width = r2.width + r2_half.width;
3405 r2_r = r2.x + r2.width;
3406 break;
3407 }
3408
3409 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3410 return true;
3411 }
3412
3413 return false;
3414 }
3415
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)3416 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3417 {
3418 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3419 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3420 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3421 struct dc_cursor_mi_param param = {
3422 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3423 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3424 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3425 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3426 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3427 .rotation = pipe_ctx->plane_state->rotation,
3428 .mirror = pipe_ctx->plane_state->horizontal_mirror,
3429 .stream = pipe_ctx->stream,
3430 };
3431 bool pipe_split_on = false;
3432 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3433 (pipe_ctx->prev_odm_pipe != NULL);
3434
3435 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3436 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3437 int x_pos = pos_cpy.x;
3438 int y_pos = pos_cpy.y;
3439
3440 if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3441 if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3442 (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3443 pipe_split_on = true;
3444 }
3445 }
3446
3447 /**
3448 * DC cursor is stream space, HW cursor is plane space and drawn
3449 * as part of the framebuffer.
3450 *
3451 * Cursor position can't be negative, but hotspot can be used to
3452 * shift cursor out of the plane bounds. Hotspot must be smaller
3453 * than the cursor size.
3454 */
3455
3456 /**
3457 * Translate cursor from stream space to plane space.
3458 *
3459 * If the cursor is scaled then we need to scale the position
3460 * to be in the approximately correct place. We can't do anything
3461 * about the actual size being incorrect, that's a limitation of
3462 * the hardware.
3463 */
3464 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3465 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3466 pipe_ctx->plane_state->dst_rect.width;
3467 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3468 pipe_ctx->plane_state->dst_rect.height;
3469 } else {
3470 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3471 pipe_ctx->plane_state->dst_rect.width;
3472 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3473 pipe_ctx->plane_state->dst_rect.height;
3474 }
3475
3476 /**
3477 * If the cursor's source viewport is clipped then we need to
3478 * translate the cursor to appear in the correct position on
3479 * the screen.
3480 *
3481 * This translation isn't affected by scaling so it needs to be
3482 * done *after* we adjust the position for the scale factor.
3483 *
3484 * This is only done by opt-in for now since there are still
3485 * some usecases like tiled display that might enable the
3486 * cursor on both streams while expecting dc to clip it.
3487 */
3488 if (pos_cpy.translate_by_source) {
3489 x_pos += pipe_ctx->plane_state->src_rect.x;
3490 y_pos += pipe_ctx->plane_state->src_rect.y;
3491 }
3492
3493 /**
3494 * If the position is negative then we need to add to the hotspot
3495 * to shift the cursor outside the plane.
3496 */
3497
3498 if (x_pos < 0) {
3499 pos_cpy.x_hotspot -= x_pos;
3500 x_pos = 0;
3501 }
3502
3503 if (y_pos < 0) {
3504 pos_cpy.y_hotspot -= y_pos;
3505 y_pos = 0;
3506 }
3507
3508 pos_cpy.x = (uint32_t)x_pos;
3509 pos_cpy.y = (uint32_t)y_pos;
3510
3511 if (pipe_ctx->plane_state->address.type
3512 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3513 pos_cpy.enable = false;
3514
3515 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3516 pos_cpy.enable = false;
3517
3518
3519 if (param.rotation == ROTATION_ANGLE_0) {
3520 int viewport_width =
3521 pipe_ctx->plane_res.scl_data.viewport.width;
3522 int viewport_x =
3523 pipe_ctx->plane_res.scl_data.viewport.x;
3524
3525 if (param.mirror) {
3526 if (pipe_split_on || odm_combine_on) {
3527 if (pos_cpy.x >= viewport_width + viewport_x) {
3528 pos_cpy.x = 2 * viewport_width
3529 - pos_cpy.x + 2 * viewport_x;
3530 } else {
3531 uint32_t temp_x = pos_cpy.x;
3532
3533 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3534 if (temp_x >= viewport_x +
3535 (int)hubp->curs_attr.width || pos_cpy.x
3536 <= (int)hubp->curs_attr.width +
3537 pipe_ctx->plane_state->src_rect.x) {
3538 pos_cpy.x = temp_x + viewport_width;
3539 }
3540 }
3541 } else {
3542 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3543 }
3544 }
3545 }
3546 // Swap axis and mirror horizontally
3547 else if (param.rotation == ROTATION_ANGLE_90) {
3548 uint32_t temp_x = pos_cpy.x;
3549
3550 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3551 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3552 pos_cpy.y = temp_x;
3553 }
3554 // Swap axis and mirror vertically
3555 else if (param.rotation == ROTATION_ANGLE_270) {
3556 uint32_t temp_y = pos_cpy.y;
3557 int viewport_height =
3558 pipe_ctx->plane_res.scl_data.viewport.height;
3559 int viewport_y =
3560 pipe_ctx->plane_res.scl_data.viewport.y;
3561
3562 /**
3563 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3564 * For pipe split cases:
3565 * - apply offset of viewport.y to normalize pos_cpy.x
3566 * - calculate the pos_cpy.y as before
3567 * - shift pos_cpy.y back by same offset to get final value
3568 * - since we iterate through both pipes, use the lower
3569 * viewport.y for offset
3570 * For non pipe split cases, use the same calculation for
3571 * pos_cpy.y as the 180 degree rotation case below,
3572 * but use pos_cpy.x as our input because we are rotating
3573 * 270 degrees
3574 */
3575 if (pipe_split_on || odm_combine_on) {
3576 int pos_cpy_x_offset;
3577 int other_pipe_viewport_y;
3578
3579 if (pipe_split_on) {
3580 if (pipe_ctx->bottom_pipe) {
3581 other_pipe_viewport_y =
3582 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3583 } else {
3584 other_pipe_viewport_y =
3585 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3586 }
3587 } else {
3588 if (pipe_ctx->next_odm_pipe) {
3589 other_pipe_viewport_y =
3590 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3591 } else {
3592 other_pipe_viewport_y =
3593 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3594 }
3595 }
3596 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3597 other_pipe_viewport_y : viewport_y;
3598 pos_cpy.x -= pos_cpy_x_offset;
3599 if (pos_cpy.x > viewport_height) {
3600 pos_cpy.x = pos_cpy.x - viewport_height;
3601 pos_cpy.y = viewport_height - pos_cpy.x;
3602 } else {
3603 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3604 }
3605 pos_cpy.y += pos_cpy_x_offset;
3606 } else {
3607 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3608 }
3609 pos_cpy.x = temp_y;
3610 }
3611 // Mirror horizontally and vertically
3612 else if (param.rotation == ROTATION_ANGLE_180) {
3613 int viewport_width =
3614 pipe_ctx->plane_res.scl_data.viewport.width;
3615 int viewport_x =
3616 pipe_ctx->plane_res.scl_data.viewport.x;
3617
3618 if (!param.mirror) {
3619 if (pipe_split_on || odm_combine_on) {
3620 if (pos_cpy.x >= viewport_width + viewport_x) {
3621 pos_cpy.x = 2 * viewport_width
3622 - pos_cpy.x + 2 * viewport_x;
3623 } else {
3624 uint32_t temp_x = pos_cpy.x;
3625
3626 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3627 if (temp_x >= viewport_x +
3628 (int)hubp->curs_attr.width || pos_cpy.x
3629 <= (int)hubp->curs_attr.width +
3630 pipe_ctx->plane_state->src_rect.x) {
3631 pos_cpy.x = 2 * viewport_width - temp_x;
3632 }
3633 }
3634 } else {
3635 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3636 }
3637 }
3638
3639 /**
3640 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3641 * Calculation:
3642 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3643 * pos_cpy.y_new = viewport.y + delta_from_bottom
3644 * Simplify it as:
3645 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3646 */
3647 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3648 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3649 }
3650
3651 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3652 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3653 }
3654
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3655 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3656 {
3657 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3658
3659 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3660 pipe_ctx->plane_res.hubp, attributes);
3661 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3662 pipe_ctx->plane_res.dpp, attributes);
3663 }
3664
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3665 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3666 {
3667 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3668 struct fixed31_32 multiplier;
3669 struct dpp_cursor_attributes opt_attr = { 0 };
3670 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3671 struct custom_float_format fmt;
3672
3673 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3674 return;
3675
3676 fmt.exponenta_bits = 5;
3677 fmt.mantissa_bits = 10;
3678 fmt.sign = true;
3679
3680 if (sdr_white_level > 80) {
3681 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3682 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3683 }
3684
3685 opt_attr.scale = hw_scale;
3686 opt_attr.bias = 0;
3687
3688 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3689 pipe_ctx->plane_res.dpp, &opt_attr);
3690 }
3691
3692 /*
3693 * apply_front_porch_workaround TODO FPGA still need?
3694 *
3695 * This is a workaround for a bug that has existed since R5xx and has not been
3696 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3697 */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3698 static void apply_front_porch_workaround(
3699 struct dc_crtc_timing *timing)
3700 {
3701 if (timing->flags.INTERLACE == 1) {
3702 if (timing->v_front_porch < 2)
3703 timing->v_front_porch = 2;
3704 } else {
3705 if (timing->v_front_porch < 1)
3706 timing->v_front_porch = 1;
3707 }
3708 }
3709
dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3710 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3711 {
3712 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3713 struct dc_crtc_timing patched_crtc_timing;
3714 int vesa_sync_start;
3715 int asic_blank_end;
3716 int interlace_factor;
3717
3718 patched_crtc_timing = *dc_crtc_timing;
3719 apply_front_porch_workaround(&patched_crtc_timing);
3720
3721 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3722
3723 vesa_sync_start = patched_crtc_timing.v_addressable +
3724 patched_crtc_timing.v_border_bottom +
3725 patched_crtc_timing.v_front_porch;
3726
3727 asic_blank_end = (patched_crtc_timing.v_total -
3728 vesa_sync_start -
3729 patched_crtc_timing.v_border_top)
3730 * interlace_factor;
3731
3732 return asic_blank_end -
3733 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3734 }
3735
dcn10_calc_vupdate_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3736 void dcn10_calc_vupdate_position(
3737 struct dc *dc,
3738 struct pipe_ctx *pipe_ctx,
3739 uint32_t *start_line,
3740 uint32_t *end_line)
3741 {
3742 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3743 int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3744
3745 if (vupdate_pos >= 0)
3746 *start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3747 else
3748 *start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3749 *end_line = (*start_line + 2) % timing->v_total;
3750 }
3751
dcn10_cal_vline_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3752 static void dcn10_cal_vline_position(
3753 struct dc *dc,
3754 struct pipe_ctx *pipe_ctx,
3755 uint32_t *start_line,
3756 uint32_t *end_line)
3757 {
3758 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3759 int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3760
3761 if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3762 if (vline_pos > 0)
3763 vline_pos--;
3764 else if (vline_pos < 0)
3765 vline_pos++;
3766
3767 vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3768 if (vline_pos >= 0)
3769 *start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3770 else
3771 *start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3772 *end_line = (*start_line + 2) % timing->v_total;
3773 } else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3774 // vsync is line 0 so start_line is just the requested line offset
3775 *start_line = vline_pos;
3776 *end_line = (*start_line + 2) % timing->v_total;
3777 } else
3778 ASSERT(0);
3779 }
3780
dcn10_setup_periodic_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3781 void dcn10_setup_periodic_interrupt(
3782 struct dc *dc,
3783 struct pipe_ctx *pipe_ctx)
3784 {
3785 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3786 uint32_t start_line = 0;
3787 uint32_t end_line = 0;
3788
3789 dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3790
3791 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3792 }
3793
dcn10_setup_vupdate_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3794 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3795 {
3796 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3797 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3798
3799 if (start_line < 0) {
3800 ASSERT(0);
3801 start_line = 0;
3802 }
3803
3804 if (tg->funcs->setup_vertical_interrupt2)
3805 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3806 }
3807
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)3808 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3809 struct dc_link_settings *link_settings)
3810 {
3811 struct encoder_unblank_param params = {0};
3812 struct dc_stream_state *stream = pipe_ctx->stream;
3813 struct dc_link *link = stream->link;
3814 struct dce_hwseq *hws = link->dc->hwseq;
3815
3816 /* only 3 items below are used by unblank */
3817 params.timing = pipe_ctx->stream->timing;
3818
3819 params.link_settings.link_rate = link_settings->link_rate;
3820
3821 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3822 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3823 params.timing.pix_clk_100hz /= 2;
3824 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
3825 }
3826
3827 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3828 hws->funcs.edp_backlight_control(link, true);
3829 }
3830 }
3831
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)3832 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3833 const uint8_t *custom_sdp_message,
3834 unsigned int sdp_message_size)
3835 {
3836 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3837 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3838 pipe_ctx->stream_res.stream_enc,
3839 custom_sdp_message,
3840 sdp_message_size);
3841 }
3842 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3843 enum dc_status dcn10_set_clock(struct dc *dc,
3844 enum dc_clock_type clock_type,
3845 uint32_t clk_khz,
3846 uint32_t stepping)
3847 {
3848 struct dc_state *context = dc->current_state;
3849 struct dc_clock_config clock_cfg = {0};
3850 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3851
3852 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3853 return DC_FAIL_UNSUPPORTED_1;
3854
3855 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3856 context, clock_type, &clock_cfg);
3857
3858 if (clk_khz > clock_cfg.max_clock_khz)
3859 return DC_FAIL_CLK_EXCEED_MAX;
3860
3861 if (clk_khz < clock_cfg.min_clock_khz)
3862 return DC_FAIL_CLK_BELOW_MIN;
3863
3864 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3865 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3866
3867 /*update internal request clock for update clock use*/
3868 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3869 current_clocks->dispclk_khz = clk_khz;
3870 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3871 current_clocks->dppclk_khz = clk_khz;
3872 else
3873 return DC_ERROR_UNEXPECTED;
3874
3875 if (dc->clk_mgr->funcs->update_clocks)
3876 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3877 context, true);
3878 return DC_OK;
3879
3880 }
3881
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3882 void dcn10_get_clock(struct dc *dc,
3883 enum dc_clock_type clock_type,
3884 struct dc_clock_config *clock_cfg)
3885 {
3886 struct dc_state *context = dc->current_state;
3887
3888 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3889 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3890
3891 }
3892
dcn10_get_dcc_en_bits(struct dc * dc,int * dcc_en_bits)3893 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3894 {
3895 struct resource_pool *pool = dc->res_pool;
3896 int i;
3897
3898 for (i = 0; i < pool->pipe_count; i++) {
3899 struct hubp *hubp = pool->hubps[i];
3900 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3901
3902 hubp->funcs->hubp_read_state(hubp);
3903
3904 if (!s->blank_en)
3905 dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3906 }
3907 }
3908